blob: 4a86b0f4c4c0a0efb9db5908a4b52969b514bbdd [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Blue Swirl0cac1b62012-04-09 16:50:52 +000060#include "cputlb.h"
61
Avi Kivity67d95c12011-12-15 15:25:22 +020062#define WANT_EXEC_OBSOLETE
63#include "exec-obsolete.h"
64
bellardfd6ce8f2003-05-14 19:00:11 +000065//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000066//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000067//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000068
69/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000070//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
Richard Henderson4438c8a2012-10-16 17:30:13 +100089uint8_t *code_gen_prologue;
blueswir1bdaf78e2008-10-04 07:24:27 +000090static uint8_t *code_gen_buffer;
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +100091static size_t code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +000092/* threshold to flush the translated code buffer */
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +100093static size_t code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +020094static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +000095
pbrooke2eef172008-06-08 01:09:01 +000096#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000097int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000098static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000099
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200100RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300101
102static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300103static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300104
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200105MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200106static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200107
pbrooke2eef172008-06-08 01:09:01 +0000108#endif
bellard9fa3e852004-01-04 18:06:42 +0000109
Andreas Färber9349b4f2012-03-14 01:38:32 +0100110CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000111/* current CPU in the current thread. It is only valid inside
112 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100113DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000114/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000115 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000116 2 = Adaptive rate instruction counting. */
117int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000118
bellard54936002003-05-13 00:25:15 +0000119typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000120 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000121 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000122 /* in order to optimize self modifying code, we count the number
123 of lookups we do to a given page to use a bitmap */
124 unsigned int code_write_count;
125 uint8_t *code_bitmap;
126#if defined(CONFIG_USER_ONLY)
127 unsigned long flags;
128#endif
bellard54936002003-05-13 00:25:15 +0000129} PageDesc;
130
Paul Brook41c1b1c2010-03-12 16:54:58 +0000131/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800132 while in user mode we want it to be based on virtual addresses. */
133#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000134#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
135# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
136#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800137# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000138#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000139#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800140# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000141#endif
bellard54936002003-05-13 00:25:15 +0000142
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800143/* Size of the L2 (and L3, etc) page tables. */
144#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000145#define L2_SIZE (1 << L2_BITS)
146
Avi Kivity3eef53d2012-02-10 14:57:31 +0200147#define P_L2_LEVELS \
148 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
149
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800150/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800151#define V_L1_BITS_REM \
152 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
153
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800154#if V_L1_BITS_REM < 4
155#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
156#else
157#define V_L1_BITS V_L1_BITS_REM
158#endif
159
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800160#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
161
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800162#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
163
Stefan Weilc6d50672012-03-16 20:23:49 +0100164uintptr_t qemu_real_host_page_size;
165uintptr_t qemu_host_page_size;
166uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000167
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800168/* This is a multi-level map on the virtual address space.
169 The bottom level has pointers to PageDesc. */
170static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000171
pbrooke2eef172008-06-08 01:09:01 +0000172#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200173typedef struct PhysPageEntry PhysPageEntry;
174
Avi Kivity5312bd82012-02-12 18:32:55 +0200175static MemoryRegionSection *phys_sections;
176static unsigned phys_sections_nb, phys_sections_nb_alloc;
177static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200178static uint16_t phys_section_notdirty;
179static uint16_t phys_section_rom;
180static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200181
Avi Kivity4346ae32012-02-10 17:00:01 +0200182struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200183 uint16_t is_leaf : 1;
184 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
185 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200186};
187
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200188/* Simple allocator for PhysPageEntry nodes */
189static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
190static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200193
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800194/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200195 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200196static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000197
pbrooke2eef172008-06-08 01:09:01 +0000198static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300199static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000200
Avi Kivity1ec9b902012-01-02 12:47:48 +0200201static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000202#endif
bellard33417e72003-08-10 21:47:01 +0000203
bellarde3db7222005-01-26 22:00:47 +0000204/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000205static int tb_flush_count;
206static int tb_phys_invalidate_count;
207
bellard7cb69ca2008-05-10 10:55:51 +0000208#ifdef _WIN32
Richard Henderson4438c8a2012-10-16 17:30:13 +1000209static inline void map_exec(void *addr, long size)
bellard7cb69ca2008-05-10 10:55:51 +0000210{
211 DWORD old_protect;
212 VirtualProtect(addr, size,
213 PAGE_EXECUTE_READWRITE, &old_protect);
214
215}
216#else
Richard Henderson4438c8a2012-10-16 17:30:13 +1000217static inline void map_exec(void *addr, long size)
bellard7cb69ca2008-05-10 10:55:51 +0000218{
bellard43694152008-05-29 09:35:57 +0000219 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000220
bellard43694152008-05-29 09:35:57 +0000221 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000222 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000223 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000224
225 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000226 end += page_size - 1;
227 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000228
229 mprotect((void *)start, end - start,
230 PROT_READ | PROT_WRITE | PROT_EXEC);
231}
232#endif
233
bellardb346ff42003-06-15 20:05:50 +0000234static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000235{
bellard83fb7ad2004-07-05 21:25:26 +0000236 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000237 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000238#ifdef _WIN32
239 {
240 SYSTEM_INFO system_info;
241
242 GetSystemInfo(&system_info);
243 qemu_real_host_page_size = system_info.dwPageSize;
244 }
245#else
246 qemu_real_host_page_size = getpagesize();
247#endif
bellard83fb7ad2004-07-05 21:25:26 +0000248 if (qemu_host_page_size == 0)
249 qemu_host_page_size = qemu_real_host_page_size;
250 if (qemu_host_page_size < TARGET_PAGE_SIZE)
251 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000252 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000253
Paul Brook2e9a5712010-05-05 16:32:59 +0100254#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000255 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100256#ifdef HAVE_KINFO_GETVMMAP
257 struct kinfo_vmentry *freep;
258 int i, cnt;
259
260 freep = kinfo_getvmmap(getpid(), &cnt);
261 if (freep) {
262 mmap_lock();
263 for (i = 0; i < cnt; i++) {
264 unsigned long startaddr, endaddr;
265
266 startaddr = freep[i].kve_start;
267 endaddr = freep[i].kve_end;
268 if (h2g_valid(startaddr)) {
269 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
270
271 if (h2g_valid(endaddr)) {
272 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200273 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100274 } else {
275#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
276 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200277 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100278#endif
279 }
280 }
281 }
282 free(freep);
283 mmap_unlock();
284 }
285#else
balrog50a95692007-12-12 01:16:23 +0000286 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000287
pbrook07765902008-05-31 16:33:53 +0000288 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800289
Aurelien Jarnofd436902010-04-10 17:20:36 +0200290 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000291 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800292 mmap_lock();
293
balrog50a95692007-12-12 01:16:23 +0000294 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800295 unsigned long startaddr, endaddr;
296 int n;
297
298 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
299
300 if (n == 2 && h2g_valid(startaddr)) {
301 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
302
303 if (h2g_valid(endaddr)) {
304 endaddr = h2g(endaddr);
305 } else {
306 endaddr = ~0ul;
307 }
308 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000309 }
310 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800311
balrog50a95692007-12-12 01:16:23 +0000312 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800313 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000314 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100315#endif
balrog50a95692007-12-12 01:16:23 +0000316 }
317#endif
bellard54936002003-05-13 00:25:15 +0000318}
319
Paul Brook41c1b1c2010-03-12 16:54:58 +0000320static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000321{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000322 PageDesc *pd;
323 void **lp;
324 int i;
325
pbrook17e23772008-06-09 13:47:45 +0000326#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500327 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800328# define ALLOC(P, SIZE) \
329 do { \
330 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
331 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800332 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000333#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800334# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500335 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000336#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800337
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800338 /* Level 1. Always allocated. */
339 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
340
341 /* Level 2..N-1. */
342 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
343 void **p = *lp;
344
345 if (p == NULL) {
346 if (!alloc) {
347 return NULL;
348 }
349 ALLOC(p, sizeof(void *) * L2_SIZE);
350 *lp = p;
351 }
352
353 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000354 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800355
356 pd = *lp;
357 if (pd == NULL) {
358 if (!alloc) {
359 return NULL;
360 }
361 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
362 *lp = pd;
363 }
364
365#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800366
367 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000368}
369
Paul Brook41c1b1c2010-03-12 16:54:58 +0000370static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000371{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800372 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000373}
374
Paul Brook6d9a1302010-02-28 23:55:53 +0000375#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200376
Avi Kivityf7bf5462012-02-13 20:12:05 +0200377static void phys_map_node_reserve(unsigned nodes)
378{
379 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
380 typedef PhysPageEntry Node[L2_SIZE];
381 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
382 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
383 phys_map_nodes_nb + nodes);
384 phys_map_nodes = g_renew(Node, phys_map_nodes,
385 phys_map_nodes_nb_alloc);
386 }
387}
388
389static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200390{
391 unsigned i;
392 uint16_t ret;
393
Avi Kivityf7bf5462012-02-13 20:12:05 +0200394 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200395 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200396 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200397 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200398 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200399 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200400 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200401 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200402}
403
404static void phys_map_nodes_reset(void)
405{
406 phys_map_nodes_nb = 0;
407}
408
Avi Kivityf7bf5462012-02-13 20:12:05 +0200409
Avi Kivity29990972012-02-13 20:21:20 +0200410static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
411 target_phys_addr_t *nb, uint16_t leaf,
412 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200413{
414 PhysPageEntry *p;
415 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200416 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200417
Avi Kivity07f07b32012-02-13 20:45:32 +0200418 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200419 lp->ptr = phys_map_node_alloc();
420 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200421 if (level == 0) {
422 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200423 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200424 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200425 }
426 }
427 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200428 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200429 }
Avi Kivity29990972012-02-13 20:21:20 +0200430 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200431
Avi Kivity29990972012-02-13 20:21:20 +0200432 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200433 if ((*index & (step - 1)) == 0 && *nb >= step) {
434 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200435 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200436 *index += step;
437 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200438 } else {
439 phys_page_set_level(lp, index, nb, leaf, level - 1);
440 }
441 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200442 }
443}
444
Avi Kivity29990972012-02-13 20:21:20 +0200445static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
446 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000447{
Avi Kivity29990972012-02-13 20:21:20 +0200448 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200449 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000450
Avi Kivity29990972012-02-13 20:21:20 +0200451 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000452}
453
Blue Swirl0cac1b62012-04-09 16:50:52 +0000454MemoryRegionSection *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000455{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200456 PhysPageEntry lp = phys_map;
457 PhysPageEntry *p;
458 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200459 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200460
Avi Kivity07f07b32012-02-13 20:45:32 +0200461 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200462 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200463 goto not_found;
464 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200465 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200466 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200467 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200468
Avi Kivityc19e8802012-02-13 20:25:31 +0200469 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200470not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200471 return &phys_sections[s_index];
472}
473
Blue Swirle5548612012-04-21 13:08:33 +0000474bool memory_region_is_unassigned(MemoryRegion *mr)
475{
476 return mr != &io_mem_ram && mr != &io_mem_rom
477 && mr != &io_mem_notdirty && !mr->rom_device
478 && mr != &io_mem_watch;
479}
480
pbrookc8a706f2008-06-02 16:16:42 +0000481#define mmap_lock() do { } while(0)
482#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000483#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000484
bellard43694152008-05-29 09:35:57 +0000485#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100486/* Currently it is not recommended to allocate big chunks of data in
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000487 user mode. It will change when a dedicated libc will be used. */
488/* ??? 64-bit hosts ought to have no problem mmaping data outside the
489 region in which the guest needs to run. Revisit this. */
bellard43694152008-05-29 09:35:57 +0000490#define USE_STATIC_CODE_GEN_BUFFER
491#endif
492
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000493/* ??? Should configure for this, not list operating systems here. */
494#if (defined(__linux__) \
495 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
496 || defined(__DragonFly__) || defined(__OpenBSD__) \
497 || defined(__NetBSD__))
498# define USE_MMAP
499#endif
500
Richard Henderson74d590c2012-10-16 17:30:14 +1000501/* Minimum size of the code gen buffer. This number is randomly chosen,
502 but not so small that we can't have a fair number of TB's live. */
503#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
504
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000505/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
506 indicated, this is constrained by the range of direct branches on the
507 host cpu, as used by the TCG implementation of goto_tb. */
508#if defined(__x86_64__)
509# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
510#elif defined(__sparc__)
511# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
512#elif defined(__arm__)
513# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
514#elif defined(__s390x__)
515 /* We have a +- 4GB range on the branches; leave some slop. */
516# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
517#else
518# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
519#endif
520
Richard Henderson3d85a722012-10-16 17:30:11 +1000521#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
522
523#define DEFAULT_CODE_GEN_BUFFER_SIZE \
524 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
525 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000526
527static inline size_t size_code_gen_buffer(size_t tb_size)
528{
529 /* Size the buffer. */
530 if (tb_size == 0) {
531#ifdef USE_STATIC_CODE_GEN_BUFFER
532 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
533#else
534 /* ??? Needs adjustments. */
535 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
536 static buffer, we could size this on RESERVED_VA, on the text
537 segment size of the executable, or continue to use the default. */
538 tb_size = (unsigned long)(ram_size / 4);
539#endif
540 }
541 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
542 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
543 }
544 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
545 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
546 }
547 code_gen_buffer_size = tb_size;
548 return tb_size;
549}
550
bellard43694152008-05-29 09:35:57 +0000551#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200552static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000553 __attribute__((aligned(CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000554
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000555static inline void *alloc_code_gen_buffer(void)
bellard26a5f132008-05-28 12:30:31 +0000556{
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000557 map_exec(static_code_gen_buffer, code_gen_buffer_size);
558 return static_code_gen_buffer;
559}
560#elif defined(USE_MMAP)
561static inline void *alloc_code_gen_buffer(void)
562{
563 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
564 uintptr_t start = 0;
565 void *buf;
blueswir1141ac462008-07-26 15:05:57 +0000566
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000567 /* Constrain the position of the buffer based on the host cpu.
568 Note that these addresses are chosen in concert with the
569 addresses assigned in the relevant linker script file. */
Richard Henderson405def12012-10-16 17:30:12 +1000570# if defined(__PIE__) || defined(__PIC__)
571 /* Don't bother setting a preferred location if we're building
572 a position-independent executable. We're more likely to get
573 an address near the main executable if we let the kernel
574 choose the address. */
575# elif defined(__x86_64__) && defined(MAP_32BIT)
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000576 /* Force the memory down into low memory with the executable.
577 Leave the choice of exact location with the kernel. */
578 flags |= MAP_32BIT;
579 /* Cannot expect to map more than 800MB in low memory. */
580 if (code_gen_buffer_size > 800u * 1024 * 1024) {
581 code_gen_buffer_size = 800u * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000582 }
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000583# elif defined(__sparc__)
584 start = 0x40000000ul;
585# elif defined(__s390x__)
586 start = 0x90000000ul;
587# endif
588
589 buf = mmap((void *)start, code_gen_buffer_size,
590 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
591 return buf == MAP_FAILED ? NULL : buf;
592}
bellard26a5f132008-05-28 12:30:31 +0000593#else
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000594static inline void *alloc_code_gen_buffer(void)
595{
596 void *buf = g_malloc(code_gen_buffer_size);
597 if (buf) {
598 map_exec(buf, code_gen_buffer_size);
599 }
600 return buf;
601}
602#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
603
604static inline void code_gen_alloc(size_t tb_size)
605{
606 code_gen_buffer_size = size_code_gen_buffer(tb_size);
607 code_gen_buffer = alloc_code_gen_buffer();
608 if (code_gen_buffer == NULL) {
609 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
610 exit(1);
611 }
612
Richard Henderson4438c8a2012-10-16 17:30:13 +1000613 /* Steal room for the prologue at the end of the buffer. This ensures
614 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
615 from TB's to the prologue are going to be in range. It also means
616 that we don't need to mark (additional) portions of the data segment
617 as executable. */
618 code_gen_prologue = code_gen_buffer + code_gen_buffer_size - 1024;
619 code_gen_buffer_size -= 1024;
620
Peter Maydella884da82011-06-22 11:58:25 +0100621 code_gen_buffer_max_size = code_gen_buffer_size -
622 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000623 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500624 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000625}
626
627/* Must be called before using the QEMU cpus. 'tb_size' is the size
628 (in bytes) allocated to the translation buffer. Zero means default
629 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200630void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000631{
bellard26a5f132008-05-28 12:30:31 +0000632 cpu_gen_init();
633 code_gen_alloc(tb_size);
634 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700635 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000636 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700637#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
638 /* There's no guest base to take into account, so go ahead and
639 initialize the prologue now. */
640 tcg_prologue_init(&tcg_ctx);
641#endif
bellard26a5f132008-05-28 12:30:31 +0000642}
643
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200644bool tcg_enabled(void)
645{
646 return code_gen_buffer != NULL;
647}
648
649void cpu_exec_init_all(void)
650{
651#if !defined(CONFIG_USER_ONLY)
652 memory_map_init();
653 io_mem_init();
654#endif
655}
656
pbrook9656f322008-07-01 20:01:19 +0000657#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
658
Juan Quintelae59fb372009-09-29 22:48:21 +0200659static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200660{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100661 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200662
aurel323098dba2009-03-07 21:28:24 +0000663 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
664 version_id is increased. */
665 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000666 tlb_flush(env, 1);
667
668 return 0;
669}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200670
671static const VMStateDescription vmstate_cpu_common = {
672 .name = "cpu_common",
673 .version_id = 1,
674 .minimum_version_id = 1,
675 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200676 .post_load = cpu_common_post_load,
677 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100678 VMSTATE_UINT32(halted, CPUArchState),
679 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200680 VMSTATE_END_OF_LIST()
681 }
682};
pbrook9656f322008-07-01 20:01:19 +0000683#endif
684
Andreas Färber9349b4f2012-03-14 01:38:32 +0100685CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400686{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100687 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400688
689 while (env) {
690 if (env->cpu_index == cpu)
691 break;
692 env = env->next_cpu;
693 }
694
695 return env;
696}
697
Andreas Färber9349b4f2012-03-14 01:38:32 +0100698void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000699{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100700 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000701 int cpu_index;
702
pbrookc2764712009-03-07 15:24:59 +0000703#if defined(CONFIG_USER_ONLY)
704 cpu_list_lock();
705#endif
bellard6a00d602005-11-21 23:25:50 +0000706 env->next_cpu = NULL;
707 penv = &first_cpu;
708 cpu_index = 0;
709 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700710 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000711 cpu_index++;
712 }
713 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000714 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000715 QTAILQ_INIT(&env->breakpoints);
716 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100717#ifndef CONFIG_USER_ONLY
718 env->thread_id = qemu_get_thread_id();
719#endif
bellard6a00d602005-11-21 23:25:50 +0000720 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000721#if defined(CONFIG_USER_ONLY)
722 cpu_list_unlock();
723#endif
pbrookb3c77242008-06-30 16:31:04 +0000724#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600725 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
726 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000727 cpu_save, cpu_load, env);
728#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000729}
730
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100731/* Allocate a new translation block. Flush the translation buffer if
732 too many translation blocks or too much generated code. */
733static TranslationBlock *tb_alloc(target_ulong pc)
734{
735 TranslationBlock *tb;
736
737 if (nb_tbs >= code_gen_max_blocks ||
738 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
739 return NULL;
740 tb = &tbs[nb_tbs++];
741 tb->pc = pc;
742 tb->cflags = 0;
743 return tb;
744}
745
746void tb_free(TranslationBlock *tb)
747{
748 /* In practice this is mostly used for single use temporary TB
749 Ignore the hard cases and just back up if this TB happens to
750 be the last one generated. */
751 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
752 code_gen_ptr = tb->tc_ptr;
753 nb_tbs--;
754 }
755}
756
bellard9fa3e852004-01-04 18:06:42 +0000757static inline void invalidate_page_bitmap(PageDesc *p)
758{
759 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500760 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000761 p->code_bitmap = NULL;
762 }
763 p->code_write_count = 0;
764}
765
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800766/* Set to NULL all the 'first_tb' fields in all PageDescs. */
767
768static void page_flush_tb_1 (int level, void **lp)
769{
770 int i;
771
772 if (*lp == NULL) {
773 return;
774 }
775 if (level == 0) {
776 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000777 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800778 pd[i].first_tb = NULL;
779 invalidate_page_bitmap(pd + i);
780 }
781 } else {
782 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000783 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800784 page_flush_tb_1 (level - 1, pp + i);
785 }
786 }
787}
788
bellardfd6ce8f2003-05-14 19:00:11 +0000789static void page_flush_tb(void)
790{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800791 int i;
792 for (i = 0; i < V_L1_SIZE; i++) {
793 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000794 }
795}
796
797/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000798/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100799void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000800{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100801 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000802#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000803 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
804 (unsigned long)(code_gen_ptr - code_gen_buffer),
805 nb_tbs, nb_tbs > 0 ?
806 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000807#endif
bellard26a5f132008-05-28 12:30:31 +0000808 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000809 cpu_abort(env1, "Internal error: code buffer overflow\n");
810
bellardfd6ce8f2003-05-14 19:00:11 +0000811 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000812
bellard6a00d602005-11-21 23:25:50 +0000813 for(env = first_cpu; env != NULL; env = env->next_cpu) {
814 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
815 }
bellard9fa3e852004-01-04 18:06:42 +0000816
bellard8a8a6082004-10-03 13:36:49 +0000817 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000818 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000819
bellardfd6ce8f2003-05-14 19:00:11 +0000820 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000821 /* XXX: flush processor icache at this point if cache flush is
822 expensive */
bellarde3db7222005-01-26 22:00:47 +0000823 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000824}
825
826#ifdef DEBUG_TB_CHECK
827
j_mayerbc98a7e2007-04-04 07:55:12 +0000828static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000829{
830 TranslationBlock *tb;
831 int i;
832 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000833 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
834 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000835 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
836 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000837 printf("ERROR invalidate: address=" TARGET_FMT_lx
838 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000839 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000840 }
841 }
842 }
843}
844
845/* verify that all the pages have correct rights for code */
846static void tb_page_check(void)
847{
848 TranslationBlock *tb;
849 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000850
pbrook99773bd2006-04-16 15:14:59 +0000851 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
852 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000853 flags1 = page_get_flags(tb->pc);
854 flags2 = page_get_flags(tb->pc + tb->size - 1);
855 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
856 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000857 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000858 }
859 }
860 }
861}
862
863#endif
864
865/* invalidate one TB */
866static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
867 int next_offset)
868{
869 TranslationBlock *tb1;
870 for(;;) {
871 tb1 = *ptb;
872 if (tb1 == tb) {
873 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
874 break;
875 }
876 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
877 }
878}
879
bellard9fa3e852004-01-04 18:06:42 +0000880static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
881{
882 TranslationBlock *tb1;
883 unsigned int n1;
884
885 for(;;) {
886 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200887 n1 = (uintptr_t)tb1 & 3;
888 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000889 if (tb1 == tb) {
890 *ptb = tb1->page_next[n1];
891 break;
892 }
893 ptb = &tb1->page_next[n1];
894 }
895}
896
bellardd4e81642003-05-25 16:46:15 +0000897static inline void tb_jmp_remove(TranslationBlock *tb, int n)
898{
899 TranslationBlock *tb1, **ptb;
900 unsigned int n1;
901
902 ptb = &tb->jmp_next[n];
903 tb1 = *ptb;
904 if (tb1) {
905 /* find tb(n) in circular list */
906 for(;;) {
907 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200908 n1 = (uintptr_t)tb1 & 3;
909 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardd4e81642003-05-25 16:46:15 +0000910 if (n1 == n && tb1 == tb)
911 break;
912 if (n1 == 2) {
913 ptb = &tb1->jmp_first;
914 } else {
915 ptb = &tb1->jmp_next[n1];
916 }
917 }
918 /* now we can suppress tb(n) from the list */
919 *ptb = tb->jmp_next[n];
920
921 tb->jmp_next[n] = NULL;
922 }
923}
924
925/* reset the jump entry 'n' of a TB so that it is not chained to
926 another TB */
927static inline void tb_reset_jump(TranslationBlock *tb, int n)
928{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200929 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000930}
931
Paul Brook41c1b1c2010-03-12 16:54:58 +0000932void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000933{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100934 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000935 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000936 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000937 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000938 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000939
bellard9fa3e852004-01-04 18:06:42 +0000940 /* remove the TB from the hash list */
941 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
942 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000943 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000944 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000945
bellard9fa3e852004-01-04 18:06:42 +0000946 /* remove the TB from the page list */
947 if (tb->page_addr[0] != page_addr) {
948 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
949 tb_page_remove(&p->first_tb, tb);
950 invalidate_page_bitmap(p);
951 }
952 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
953 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
954 tb_page_remove(&p->first_tb, tb);
955 invalidate_page_bitmap(p);
956 }
957
bellard8a40a182005-11-20 10:35:40 +0000958 tb_invalidated_flag = 1;
959
960 /* remove the TB from the hash list */
961 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000962 for(env = first_cpu; env != NULL; env = env->next_cpu) {
963 if (env->tb_jmp_cache[h] == tb)
964 env->tb_jmp_cache[h] = NULL;
965 }
bellard8a40a182005-11-20 10:35:40 +0000966
967 /* suppress this TB from the two jump lists */
968 tb_jmp_remove(tb, 0);
969 tb_jmp_remove(tb, 1);
970
971 /* suppress any remaining jumps to this TB */
972 tb1 = tb->jmp_first;
973 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200974 n1 = (uintptr_t)tb1 & 3;
bellard8a40a182005-11-20 10:35:40 +0000975 if (n1 == 2)
976 break;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200977 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000978 tb2 = tb1->jmp_next[n1];
979 tb_reset_jump(tb1, n1);
980 tb1->jmp_next[n1] = NULL;
981 tb1 = tb2;
982 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200983 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +0000984
bellarde3db7222005-01-26 22:00:47 +0000985 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000986}
987
988static inline void set_bits(uint8_t *tab, int start, int len)
989{
990 int end, mask, end1;
991
992 end = start + len;
993 tab += start >> 3;
994 mask = 0xff << (start & 7);
995 if ((start & ~7) == (end & ~7)) {
996 if (start < end) {
997 mask &= ~(0xff << (end & 7));
998 *tab |= mask;
999 }
1000 } else {
1001 *tab++ |= mask;
1002 start = (start + 8) & ~7;
1003 end1 = end & ~7;
1004 while (start < end1) {
1005 *tab++ = 0xff;
1006 start += 8;
1007 }
1008 if (start < end) {
1009 mask = ~(0xff << (end & 7));
1010 *tab |= mask;
1011 }
1012 }
1013}
1014
1015static void build_page_bitmap(PageDesc *p)
1016{
1017 int n, tb_start, tb_end;
1018 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001019
Anthony Liguori7267c092011-08-20 22:09:37 -05001020 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001021
1022 tb = p->first_tb;
1023 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001024 n = (uintptr_t)tb & 3;
1025 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001026 /* NOTE: this is subtle as a TB may span two physical pages */
1027 if (n == 0) {
1028 /* NOTE: tb_end may be after the end of the page, but
1029 it is not a problem */
1030 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1031 tb_end = tb_start + tb->size;
1032 if (tb_end > TARGET_PAGE_SIZE)
1033 tb_end = TARGET_PAGE_SIZE;
1034 } else {
1035 tb_start = 0;
1036 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1037 }
1038 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1039 tb = tb->page_next[n];
1040 }
1041}
1042
Andreas Färber9349b4f2012-03-14 01:38:32 +01001043TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001044 target_ulong pc, target_ulong cs_base,
1045 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001046{
1047 TranslationBlock *tb;
1048 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001049 tb_page_addr_t phys_pc, phys_page2;
1050 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001051 int code_gen_size;
1052
Paul Brook41c1b1c2010-03-12 16:54:58 +00001053 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001054 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001055 if (!tb) {
1056 /* flush must be done */
1057 tb_flush(env);
1058 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001059 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001060 /* Don't forget to invalidate previous TB info. */
1061 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001062 }
1063 tc_ptr = code_gen_ptr;
1064 tb->tc_ptr = tc_ptr;
1065 tb->cs_base = cs_base;
1066 tb->flags = flags;
1067 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001068 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001069 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1070 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001071
bellardd720b932004-04-25 17:57:43 +00001072 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001073 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001074 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001075 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001076 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001077 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001078 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001079 return tb;
bellardd720b932004-04-25 17:57:43 +00001080}
ths3b46e622007-09-17 08:09:54 +00001081
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001082/*
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001083 * Invalidate all TBs which intersect with the target physical address range
1084 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1085 * 'is_cpu_write_access' should be true if called from a real cpu write
1086 * access: the virtual CPU will exit the current TB if code is modified inside
1087 * this TB.
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001088 */
1089void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1090 int is_cpu_write_access)
1091{
1092 while (start < end) {
1093 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1094 start &= TARGET_PAGE_MASK;
1095 start += TARGET_PAGE_SIZE;
1096 }
1097}
1098
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001099/*
1100 * Invalidate all TBs which intersect with the target physical address range
1101 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1102 * 'is_cpu_write_access' should be true if called from a real cpu write
1103 * access: the virtual CPU will exit the current TB if code is modified inside
1104 * this TB.
1105 */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001106void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001107 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001108{
aliguori6b917542008-11-18 19:46:41 +00001109 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001110 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001111 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001112 PageDesc *p;
1113 int n;
1114#ifdef TARGET_HAS_PRECISE_SMC
1115 int current_tb_not_found = is_cpu_write_access;
1116 TranslationBlock *current_tb = NULL;
1117 int current_tb_modified = 0;
1118 target_ulong current_pc = 0;
1119 target_ulong current_cs_base = 0;
1120 int current_flags = 0;
1121#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001122
1123 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001124 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001125 return;
ths5fafdf22007-09-16 21:08:06 +00001126 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001127 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1128 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001129 /* build code bitmap */
1130 build_page_bitmap(p);
1131 }
1132
1133 /* we remove all the TBs in the range [start, end[ */
1134 /* XXX: see if in some cases it could be faster to invalidate all the code */
1135 tb = p->first_tb;
1136 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001137 n = (uintptr_t)tb & 3;
1138 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001139 tb_next = tb->page_next[n];
1140 /* NOTE: this is subtle as a TB may span two physical pages */
1141 if (n == 0) {
1142 /* NOTE: tb_end may be after the end of the page, but
1143 it is not a problem */
1144 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1145 tb_end = tb_start + tb->size;
1146 } else {
1147 tb_start = tb->page_addr[1];
1148 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1149 }
1150 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001151#ifdef TARGET_HAS_PRECISE_SMC
1152 if (current_tb_not_found) {
1153 current_tb_not_found = 0;
1154 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001155 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001156 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001157 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001158 }
1159 }
1160 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001161 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001162 /* If we are modifying the current TB, we must stop
1163 its execution. We could be more precise by checking
1164 that the modification is after the current PC, but it
1165 would require a specialized function to partially
1166 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001167
bellardd720b932004-04-25 17:57:43 +00001168 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001169 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001170 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1171 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001172 }
1173#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001174 /* we need to do that to handle the case where a signal
1175 occurs while doing tb_phys_invalidate() */
1176 saved_tb = NULL;
1177 if (env) {
1178 saved_tb = env->current_tb;
1179 env->current_tb = NULL;
1180 }
bellard9fa3e852004-01-04 18:06:42 +00001181 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001182 if (env) {
1183 env->current_tb = saved_tb;
1184 if (env->interrupt_request && env->current_tb)
1185 cpu_interrupt(env, env->interrupt_request);
1186 }
bellard9fa3e852004-01-04 18:06:42 +00001187 }
1188 tb = tb_next;
1189 }
1190#if !defined(CONFIG_USER_ONLY)
1191 /* if no code remaining, no need to continue to use slow writes */
1192 if (!p->first_tb) {
1193 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001194 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001195 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001196 }
1197 }
1198#endif
1199#ifdef TARGET_HAS_PRECISE_SMC
1200 if (current_tb_modified) {
1201 /* we generate a block containing just the instruction
1202 modifying the memory. It will ensure that it cannot modify
1203 itself */
bellardea1c1802004-06-14 18:56:36 +00001204 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001205 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001206 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001207 }
1208#endif
1209}
1210
1211/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001212static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001213{
1214 PageDesc *p;
1215 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001216#if 0
bellarda4193c82004-06-03 14:01:43 +00001217 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001218 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1219 cpu_single_env->mem_io_vaddr, len,
1220 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001221 cpu_single_env->eip +
1222 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001223 }
1224#endif
bellard9fa3e852004-01-04 18:06:42 +00001225 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001226 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001227 return;
1228 if (p->code_bitmap) {
1229 offset = start & ~TARGET_PAGE_MASK;
1230 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1231 if (b & ((1 << len) - 1))
1232 goto do_invalidate;
1233 } else {
1234 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001235 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001236 }
1237}
1238
bellard9fa3e852004-01-04 18:06:42 +00001239#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001240static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001241 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001242{
aliguori6b917542008-11-18 19:46:41 +00001243 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001244 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001245 int n;
bellardd720b932004-04-25 17:57:43 +00001246#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001247 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001248 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001249 int current_tb_modified = 0;
1250 target_ulong current_pc = 0;
1251 target_ulong current_cs_base = 0;
1252 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001253#endif
bellard9fa3e852004-01-04 18:06:42 +00001254
1255 addr &= TARGET_PAGE_MASK;
1256 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001257 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001258 return;
1259 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001260#ifdef TARGET_HAS_PRECISE_SMC
1261 if (tb && pc != 0) {
1262 current_tb = tb_find_pc(pc);
1263 }
1264#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001265 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001266 n = (uintptr_t)tb & 3;
1267 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001268#ifdef TARGET_HAS_PRECISE_SMC
1269 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001270 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001271 /* If we are modifying the current TB, we must stop
1272 its execution. We could be more precise by checking
1273 that the modification is after the current PC, but it
1274 would require a specialized function to partially
1275 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001276
bellardd720b932004-04-25 17:57:43 +00001277 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001278 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001279 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1280 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001281 }
1282#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001283 tb_phys_invalidate(tb, addr);
1284 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001285 }
1286 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001287#ifdef TARGET_HAS_PRECISE_SMC
1288 if (current_tb_modified) {
1289 /* we generate a block containing just the instruction
1290 modifying the memory. It will ensure that it cannot modify
1291 itself */
bellardea1c1802004-06-14 18:56:36 +00001292 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001293 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001294 cpu_resume_from_signal(env, puc);
1295 }
1296#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001297}
bellard9fa3e852004-01-04 18:06:42 +00001298#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001299
1300/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001301static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001302 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001303{
1304 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001305#ifndef CONFIG_USER_ONLY
1306 bool page_already_protected;
1307#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001308
bellard9fa3e852004-01-04 18:06:42 +00001309 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001310 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001311 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001312#ifndef CONFIG_USER_ONLY
1313 page_already_protected = p->first_tb != NULL;
1314#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001315 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001316 invalidate_page_bitmap(p);
1317
bellard107db442004-06-22 18:48:46 +00001318#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001319
bellard9fa3e852004-01-04 18:06:42 +00001320#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001321 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001322 target_ulong addr;
1323 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001324 int prot;
1325
bellardfd6ce8f2003-05-14 19:00:11 +00001326 /* force the host page as non writable (writes will have a
1327 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001328 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001329 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001330 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1331 addr += TARGET_PAGE_SIZE) {
1332
1333 p2 = page_find (addr >> TARGET_PAGE_BITS);
1334 if (!p2)
1335 continue;
1336 prot |= p2->flags;
1337 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001338 }
ths5fafdf22007-09-16 21:08:06 +00001339 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001340 (prot & PAGE_BITS) & ~PAGE_WRITE);
1341#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001342 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001343 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001344#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001345 }
bellard9fa3e852004-01-04 18:06:42 +00001346#else
1347 /* if some code is already present, then the pages are already
1348 protected. So we handle the case where only the first TB is
1349 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001350 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001351 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001352 }
1353#endif
bellardd720b932004-04-25 17:57:43 +00001354
1355#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001356}
1357
bellard9fa3e852004-01-04 18:06:42 +00001358/* add a new TB and link it to the physical page tables. phys_page2 is
1359 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001360void tb_link_page(TranslationBlock *tb,
1361 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001362{
bellard9fa3e852004-01-04 18:06:42 +00001363 unsigned int h;
1364 TranslationBlock **ptb;
1365
pbrookc8a706f2008-06-02 16:16:42 +00001366 /* Grab the mmap lock to stop another thread invalidating this TB
1367 before we are done. */
1368 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001369 /* add in the physical hash table */
1370 h = tb_phys_hash_func(phys_pc);
1371 ptb = &tb_phys_hash[h];
1372 tb->phys_hash_next = *ptb;
1373 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001374
1375 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001376 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1377 if (phys_page2 != -1)
1378 tb_alloc_page(tb, 1, phys_page2);
1379 else
1380 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001381
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001382 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001383 tb->jmp_next[0] = NULL;
1384 tb->jmp_next[1] = NULL;
1385
1386 /* init original jump addresses */
1387 if (tb->tb_next_offset[0] != 0xffff)
1388 tb_reset_jump(tb, 0);
1389 if (tb->tb_next_offset[1] != 0xffff)
1390 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001391
1392#ifdef DEBUG_TB_CHECK
1393 tb_page_check();
1394#endif
pbrookc8a706f2008-06-02 16:16:42 +00001395 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001396}
1397
bellarda513fe12003-05-27 23:29:48 +00001398/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1399 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001400TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001401{
1402 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001403 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001404 TranslationBlock *tb;
1405
1406 if (nb_tbs <= 0)
1407 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001408 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1409 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001410 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001411 }
bellarda513fe12003-05-27 23:29:48 +00001412 /* binary search (cf Knuth) */
1413 m_min = 0;
1414 m_max = nb_tbs - 1;
1415 while (m_min <= m_max) {
1416 m = (m_min + m_max) >> 1;
1417 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001418 v = (uintptr_t)tb->tc_ptr;
bellarda513fe12003-05-27 23:29:48 +00001419 if (v == tc_ptr)
1420 return tb;
1421 else if (tc_ptr < v) {
1422 m_max = m - 1;
1423 } else {
1424 m_min = m + 1;
1425 }
ths5fafdf22007-09-16 21:08:06 +00001426 }
bellarda513fe12003-05-27 23:29:48 +00001427 return &tbs[m_max];
1428}
bellard75012672003-06-21 13:11:07 +00001429
bellardea041c02003-06-25 16:16:50 +00001430static void tb_reset_jump_recursive(TranslationBlock *tb);
1431
1432static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1433{
1434 TranslationBlock *tb1, *tb_next, **ptb;
1435 unsigned int n1;
1436
1437 tb1 = tb->jmp_next[n];
1438 if (tb1 != NULL) {
1439 /* find head of list */
1440 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001441 n1 = (uintptr_t)tb1 & 3;
1442 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001443 if (n1 == 2)
1444 break;
1445 tb1 = tb1->jmp_next[n1];
1446 }
1447 /* we are now sure now that tb jumps to tb1 */
1448 tb_next = tb1;
1449
1450 /* remove tb from the jmp_first list */
1451 ptb = &tb_next->jmp_first;
1452 for(;;) {
1453 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001454 n1 = (uintptr_t)tb1 & 3;
1455 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001456 if (n1 == n && tb1 == tb)
1457 break;
1458 ptb = &tb1->jmp_next[n1];
1459 }
1460 *ptb = tb->jmp_next[n];
1461 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001462
bellardea041c02003-06-25 16:16:50 +00001463 /* suppress the jump to next tb in generated code */
1464 tb_reset_jump(tb, n);
1465
bellard01243112004-01-04 15:48:17 +00001466 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001467 tb_reset_jump_recursive(tb_next);
1468 }
1469}
1470
1471static void tb_reset_jump_recursive(TranslationBlock *tb)
1472{
1473 tb_reset_jump_recursive2(tb, 0);
1474 tb_reset_jump_recursive2(tb, 1);
1475}
1476
bellard1fddef42005-04-17 19:16:13 +00001477#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001478#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001479static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001480{
1481 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1482}
1483#else
Max Filippov1e7855a2012-04-10 02:48:17 +04001484void tb_invalidate_phys_addr(target_phys_addr_t addr)
bellardd720b932004-04-25 17:57:43 +00001485{
Anthony Liguoric227f092009-10-01 16:12:16 -05001486 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001487 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001488
Avi Kivity06ef3522012-02-13 16:11:22 +02001489 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001490 if (!(memory_region_is_ram(section->mr)
1491 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001492 return;
1493 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001494 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001495 + memory_region_section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001496 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001497}
Max Filippov1e7855a2012-04-10 02:48:17 +04001498
1499static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1500{
Max Filippov9d70c4b2012-05-27 20:21:08 +04001501 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1502 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +04001503}
bellardc27004e2005-01-03 23:35:10 +00001504#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001505#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001506
Paul Brookc527ee82010-03-01 03:31:14 +00001507#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001508void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001509
1510{
1511}
1512
Andreas Färber9349b4f2012-03-14 01:38:32 +01001513int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001514 int flags, CPUWatchpoint **watchpoint)
1515{
1516 return -ENOSYS;
1517}
1518#else
pbrook6658ffb2007-03-16 23:58:11 +00001519/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001520int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001521 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001522{
aliguorib4051332008-11-18 20:14:20 +00001523 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001524 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001525
aliguorib4051332008-11-18 20:14:20 +00001526 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001527 if ((len & (len - 1)) || (addr & ~len_mask) ||
1528 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001529 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1530 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1531 return -EINVAL;
1532 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001533 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001534
aliguoria1d1bb32008-11-18 20:07:32 +00001535 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001536 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001537 wp->flags = flags;
1538
aliguori2dc9f412008-11-18 20:56:59 +00001539 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001540 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001541 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001542 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001543 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001544
pbrook6658ffb2007-03-16 23:58:11 +00001545 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001546
1547 if (watchpoint)
1548 *watchpoint = wp;
1549 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001550}
1551
aliguoria1d1bb32008-11-18 20:07:32 +00001552/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001553int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001554 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001555{
aliguorib4051332008-11-18 20:14:20 +00001556 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001557 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001558
Blue Swirl72cf2d42009-09-12 07:36:22 +00001559 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001560 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001561 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001562 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001563 return 0;
1564 }
1565 }
aliguoria1d1bb32008-11-18 20:07:32 +00001566 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001567}
1568
aliguoria1d1bb32008-11-18 20:07:32 +00001569/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001570void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001571{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001572 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001573
aliguoria1d1bb32008-11-18 20:07:32 +00001574 tlb_flush_page(env, watchpoint->vaddr);
1575
Anthony Liguori7267c092011-08-20 22:09:37 -05001576 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001577}
1578
aliguoria1d1bb32008-11-18 20:07:32 +00001579/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001580void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001581{
aliguoric0ce9982008-11-25 22:13:57 +00001582 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001583
Blue Swirl72cf2d42009-09-12 07:36:22 +00001584 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001585 if (wp->flags & mask)
1586 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001587 }
aliguoria1d1bb32008-11-18 20:07:32 +00001588}
Paul Brookc527ee82010-03-01 03:31:14 +00001589#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001590
1591/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001592int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001593 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001594{
bellard1fddef42005-04-17 19:16:13 +00001595#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001596 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001597
Anthony Liguori7267c092011-08-20 22:09:37 -05001598 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001599
1600 bp->pc = pc;
1601 bp->flags = flags;
1602
aliguori2dc9f412008-11-18 20:56:59 +00001603 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001604 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001605 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001606 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001607 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001608
1609 breakpoint_invalidate(env, pc);
1610
1611 if (breakpoint)
1612 *breakpoint = bp;
1613 return 0;
1614#else
1615 return -ENOSYS;
1616#endif
1617}
1618
1619/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001620int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001621{
1622#if defined(TARGET_HAS_ICE)
1623 CPUBreakpoint *bp;
1624
Blue Swirl72cf2d42009-09-12 07:36:22 +00001625 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001626 if (bp->pc == pc && bp->flags == flags) {
1627 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001628 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001629 }
bellard4c3a88a2003-07-26 12:06:08 +00001630 }
aliguoria1d1bb32008-11-18 20:07:32 +00001631 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001632#else
aliguoria1d1bb32008-11-18 20:07:32 +00001633 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001634#endif
1635}
1636
aliguoria1d1bb32008-11-18 20:07:32 +00001637/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001638void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001639{
bellard1fddef42005-04-17 19:16:13 +00001640#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001641 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001642
aliguoria1d1bb32008-11-18 20:07:32 +00001643 breakpoint_invalidate(env, breakpoint->pc);
1644
Anthony Liguori7267c092011-08-20 22:09:37 -05001645 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001646#endif
1647}
1648
1649/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001650void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001651{
1652#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001653 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001654
Blue Swirl72cf2d42009-09-12 07:36:22 +00001655 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001656 if (bp->flags & mask)
1657 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001658 }
bellard4c3a88a2003-07-26 12:06:08 +00001659#endif
1660}
1661
bellardc33a3462003-07-29 20:50:33 +00001662/* enable or disable single step mode. EXCP_DEBUG is returned by the
1663 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001664void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001665{
bellard1fddef42005-04-17 19:16:13 +00001666#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001667 if (env->singlestep_enabled != enabled) {
1668 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001669 if (kvm_enabled())
1670 kvm_update_guest_debug(env, 0);
1671 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001672 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001673 /* XXX: only flush what is necessary */
1674 tb_flush(env);
1675 }
bellardc33a3462003-07-29 20:50:33 +00001676 }
1677#endif
1678}
1679
Andreas Färber9349b4f2012-03-14 01:38:32 +01001680static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001681{
pbrookd5975362008-06-07 20:50:51 +00001682 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1683 problem and hope the cpu will stop of its own accord. For userspace
1684 emulation this often isn't actually as bad as it sounds. Often
1685 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001686 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001687 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001688
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001689 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001690 tb = env->current_tb;
1691 /* if the cpu is currently executing code, we must unlink it and
1692 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001693 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001694 env->current_tb = NULL;
1695 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001696 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001697 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001698}
1699
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001700#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001701/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001702static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001703{
1704 int old_mask;
1705
1706 old_mask = env->interrupt_request;
1707 env->interrupt_request |= mask;
1708
aliguori8edac962009-04-24 18:03:45 +00001709 /*
1710 * If called from iothread context, wake the target cpu in
1711 * case its halted.
1712 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001713 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001714 qemu_cpu_kick(env);
1715 return;
1716 }
aliguori8edac962009-04-24 18:03:45 +00001717
pbrook2e70f6e2008-06-29 01:03:05 +00001718 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001719 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001720 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001721 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001722 cpu_abort(env, "Raised interrupt while not in I/O function");
1723 }
pbrook2e70f6e2008-06-29 01:03:05 +00001724 } else {
aurel323098dba2009-03-07 21:28:24 +00001725 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001726 }
1727}
1728
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001729CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1730
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001731#else /* CONFIG_USER_ONLY */
1732
Andreas Färber9349b4f2012-03-14 01:38:32 +01001733void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001734{
1735 env->interrupt_request |= mask;
1736 cpu_unlink_tb(env);
1737}
1738#endif /* CONFIG_USER_ONLY */
1739
Andreas Färber9349b4f2012-03-14 01:38:32 +01001740void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001741{
1742 env->interrupt_request &= ~mask;
1743}
1744
Andreas Färber9349b4f2012-03-14 01:38:32 +01001745void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001746{
1747 env->exit_request = 1;
1748 cpu_unlink_tb(env);
1749}
1750
Andreas Färber9349b4f2012-03-14 01:38:32 +01001751void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001752{
1753 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001754 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001755
1756 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001757 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001758 fprintf(stderr, "qemu: fatal: ");
1759 vfprintf(stderr, fmt, ap);
1760 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001761 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +00001762 if (qemu_log_enabled()) {
1763 qemu_log("qemu: fatal: ");
1764 qemu_log_vprintf(fmt, ap2);
1765 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001766 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +00001767 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001768 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001769 }
pbrook493ae1f2007-11-23 16:53:59 +00001770 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001771 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001772#if defined(CONFIG_USER_ONLY)
1773 {
1774 struct sigaction act;
1775 sigfillset(&act.sa_mask);
1776 act.sa_handler = SIG_DFL;
1777 sigaction(SIGABRT, &act, NULL);
1778 }
1779#endif
bellard75012672003-06-21 13:11:07 +00001780 abort();
1781}
1782
Andreas Färber9349b4f2012-03-14 01:38:32 +01001783CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001784{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001785 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1786 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001787 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001788#if defined(TARGET_HAS_ICE)
1789 CPUBreakpoint *bp;
1790 CPUWatchpoint *wp;
1791#endif
1792
Andreas Färber9349b4f2012-03-14 01:38:32 +01001793 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001794
1795 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001796 new_env->next_cpu = next_cpu;
1797 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001798
1799 /* Clone all break/watchpoints.
1800 Note: Once we support ptrace with hw-debug register access, make sure
1801 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001802 QTAILQ_INIT(&env->breakpoints);
1803 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001804#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001805 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001806 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1807 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001808 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001809 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1810 wp->flags, NULL);
1811 }
1812#endif
1813
thsc5be9f02007-02-28 20:20:53 +00001814 return new_env;
1815}
1816
bellard01243112004-01-04 15:48:17 +00001817#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001818void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001819{
1820 unsigned int i;
1821
1822 /* Discard jump cache entries for any tb which might potentially
1823 overlap the flushed page. */
1824 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1825 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001826 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001827
1828 i = tb_jmp_cache_hash_page(addr);
1829 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001830 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001831}
1832
Juan Quintelad24981d2012-05-22 00:42:40 +02001833static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1834 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +00001835{
Juan Quintelad24981d2012-05-22 00:42:40 +02001836 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +00001837
bellard1ccde1c2004-02-06 19:46:14 +00001838 /* we modify the TLB cache so that the dirty bit will be set again
1839 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001840 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001841 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001842 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001843 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001844 != (end - 1) - start) {
1845 abort();
1846 }
Blue Swirle5548612012-04-21 13:08:33 +00001847 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001848
1849}
1850
1851/* Note: start and end must be within the same ram block. */
1852void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1853 int dirty_flags)
1854{
1855 uintptr_t length;
1856
1857 start &= TARGET_PAGE_MASK;
1858 end = TARGET_PAGE_ALIGN(end);
1859
1860 length = end - start;
1861 if (length == 0)
1862 return;
1863 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1864
1865 if (tcg_enabled()) {
1866 tlb_reset_dirty_range_all(start, end, length);
1867 }
bellard1ccde1c2004-02-06 19:46:14 +00001868}
1869
aliguori74576192008-10-06 14:02:03 +00001870int cpu_physical_memory_set_dirty_tracking(int enable)
1871{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001872 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001873 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001874 return ret;
aliguori74576192008-10-06 14:02:03 +00001875}
1876
Blue Swirle5548612012-04-21 13:08:33 +00001877target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1878 MemoryRegionSection *section,
1879 target_ulong vaddr,
1880 target_phys_addr_t paddr,
1881 int prot,
1882 target_ulong *address)
1883{
1884 target_phys_addr_t iotlb;
1885 CPUWatchpoint *wp;
1886
Blue Swirlcc5bea62012-04-14 14:56:48 +00001887 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001888 /* Normal RAM. */
1889 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001890 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001891 if (!section->readonly) {
1892 iotlb |= phys_section_notdirty;
1893 } else {
1894 iotlb |= phys_section_rom;
1895 }
1896 } else {
1897 /* IO handlers are currently passed a physical address.
1898 It would be nice to pass an offset from the base address
1899 of that region. This would avoid having to special case RAM,
1900 and avoid full address decoding in every device.
1901 We can't use the high bits of pd for this because
1902 IO_MEM_ROMD uses these as a ram address. */
1903 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001904 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001905 }
1906
1907 /* Make accesses to pages with watchpoints go via the
1908 watchpoint trap routines. */
1909 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1910 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1911 /* Avoid trapping reads of pages with a write breakpoint. */
1912 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1913 iotlb = phys_section_watch + paddr;
1914 *address |= TLB_MMIO;
1915 break;
1916 }
1917 }
1918 }
1919
1920 return iotlb;
1921}
1922
bellard01243112004-01-04 15:48:17 +00001923#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001924/*
1925 * Walks guest process memory "regions" one by one
1926 * and calls callback function 'fn' for each region.
1927 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001928
1929struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00001930{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001931 walk_memory_regions_fn fn;
1932 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001933 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001934 int prot;
1935};
bellard9fa3e852004-01-04 18:06:42 +00001936
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001937static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001938 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001939{
1940 if (data->start != -1ul) {
1941 int rc = data->fn(data->priv, data->start, end, data->prot);
1942 if (rc != 0) {
1943 return rc;
bellard9fa3e852004-01-04 18:06:42 +00001944 }
bellard33417e72003-08-10 21:47:01 +00001945 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001946
1947 data->start = (new_prot ? end : -1ul);
1948 data->prot = new_prot;
1949
1950 return 0;
1951}
1952
1953static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001954 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001955{
Paul Brookb480d9b2010-03-12 23:23:29 +00001956 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001957 int i, rc;
1958
1959 if (*lp == NULL) {
1960 return walk_memory_regions_end(data, base, 0);
1961 }
1962
1963 if (level == 0) {
1964 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001965 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001966 int prot = pd[i].flags;
1967
1968 pa = base | (i << TARGET_PAGE_BITS);
1969 if (prot != data->prot) {
1970 rc = walk_memory_regions_end(data, pa, prot);
1971 if (rc != 0) {
1972 return rc;
1973 }
1974 }
1975 }
1976 } else {
1977 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001978 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001979 pa = base | ((abi_ulong)i <<
1980 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001981 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1982 if (rc != 0) {
1983 return rc;
1984 }
1985 }
1986 }
1987
1988 return 0;
1989}
1990
1991int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1992{
1993 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001994 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001995
1996 data.fn = fn;
1997 data.priv = priv;
1998 data.start = -1ul;
1999 data.prot = 0;
2000
2001 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002002 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002003 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2004 if (rc != 0) {
2005 return rc;
2006 }
2007 }
2008
2009 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002010}
2011
Paul Brookb480d9b2010-03-12 23:23:29 +00002012static int dump_region(void *priv, abi_ulong start,
2013 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002014{
2015 FILE *f = (FILE *)priv;
2016
Paul Brookb480d9b2010-03-12 23:23:29 +00002017 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2018 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002019 start, end, end - start,
2020 ((prot & PAGE_READ) ? 'r' : '-'),
2021 ((prot & PAGE_WRITE) ? 'w' : '-'),
2022 ((prot & PAGE_EXEC) ? 'x' : '-'));
2023
2024 return (0);
2025}
2026
2027/* dump memory mappings */
2028void page_dump(FILE *f)
2029{
2030 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2031 "start", "end", "size", "prot");
2032 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002033}
2034
pbrook53a59602006-03-25 19:31:22 +00002035int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002036{
bellard9fa3e852004-01-04 18:06:42 +00002037 PageDesc *p;
2038
2039 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002040 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002041 return 0;
2042 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002043}
2044
Richard Henderson376a7902010-03-10 15:57:04 -08002045/* Modify the flags of a page and invalidate the code if necessary.
2046 The flag PAGE_WRITE_ORG is positioned automatically depending
2047 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002048void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002049{
Richard Henderson376a7902010-03-10 15:57:04 -08002050 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002051
Richard Henderson376a7902010-03-10 15:57:04 -08002052 /* This function should never be called with addresses outside the
2053 guest address space. If this assert fires, it probably indicates
2054 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002055#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2056 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002057#endif
2058 assert(start < end);
2059
bellard9fa3e852004-01-04 18:06:42 +00002060 start = start & TARGET_PAGE_MASK;
2061 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002062
2063 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002064 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002065 }
2066
2067 for (addr = start, len = end - start;
2068 len != 0;
2069 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2070 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2071
2072 /* If the write protection bit is set, then we invalidate
2073 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002074 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002075 (flags & PAGE_WRITE) &&
2076 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002077 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002078 }
2079 p->flags = flags;
2080 }
bellard9fa3e852004-01-04 18:06:42 +00002081}
2082
ths3d97b402007-11-02 19:02:07 +00002083int page_check_range(target_ulong start, target_ulong len, int flags)
2084{
2085 PageDesc *p;
2086 target_ulong end;
2087 target_ulong addr;
2088
Richard Henderson376a7902010-03-10 15:57:04 -08002089 /* This function should never be called with addresses outside the
2090 guest address space. If this assert fires, it probably indicates
2091 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002092#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2093 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002094#endif
2095
Richard Henderson3e0650a2010-03-29 10:54:42 -07002096 if (len == 0) {
2097 return 0;
2098 }
Richard Henderson376a7902010-03-10 15:57:04 -08002099 if (start + len - 1 < start) {
2100 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002101 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002102 }
balrog55f280c2008-10-28 10:24:11 +00002103
ths3d97b402007-11-02 19:02:07 +00002104 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2105 start = start & TARGET_PAGE_MASK;
2106
Richard Henderson376a7902010-03-10 15:57:04 -08002107 for (addr = start, len = end - start;
2108 len != 0;
2109 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002110 p = page_find(addr >> TARGET_PAGE_BITS);
2111 if( !p )
2112 return -1;
2113 if( !(p->flags & PAGE_VALID) )
2114 return -1;
2115
bellarddae32702007-11-14 10:51:00 +00002116 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002117 return -1;
bellarddae32702007-11-14 10:51:00 +00002118 if (flags & PAGE_WRITE) {
2119 if (!(p->flags & PAGE_WRITE_ORG))
2120 return -1;
2121 /* unprotect the page if it was put read-only because it
2122 contains translated code */
2123 if (!(p->flags & PAGE_WRITE)) {
2124 if (!page_unprotect(addr, 0, NULL))
2125 return -1;
2126 }
2127 return 0;
2128 }
ths3d97b402007-11-02 19:02:07 +00002129 }
2130 return 0;
2131}
2132
bellard9fa3e852004-01-04 18:06:42 +00002133/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002134 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002135int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002136{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002137 unsigned int prot;
2138 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002139 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002140
pbrookc8a706f2008-06-02 16:16:42 +00002141 /* Technically this isn't safe inside a signal handler. However we
2142 know this only ever happens in a synchronous SEGV handler, so in
2143 practice it seems to be ok. */
2144 mmap_lock();
2145
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002146 p = page_find(address >> TARGET_PAGE_BITS);
2147 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002148 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002149 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002150 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002151
bellard9fa3e852004-01-04 18:06:42 +00002152 /* if the page was really writable, then we change its
2153 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002154 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2155 host_start = address & qemu_host_page_mask;
2156 host_end = host_start + qemu_host_page_size;
2157
2158 prot = 0;
2159 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2160 p = page_find(addr >> TARGET_PAGE_BITS);
2161 p->flags |= PAGE_WRITE;
2162 prot |= p->flags;
2163
bellard9fa3e852004-01-04 18:06:42 +00002164 /* and since the content will be modified, we must invalidate
2165 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002166 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002167#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002168 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002169#endif
bellard9fa3e852004-01-04 18:06:42 +00002170 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002171 mprotect((void *)g2h(host_start), qemu_host_page_size,
2172 prot & PAGE_BITS);
2173
2174 mmap_unlock();
2175 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002176 }
pbrookc8a706f2008-06-02 16:16:42 +00002177 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002178 return 0;
2179}
bellard9fa3e852004-01-04 18:06:42 +00002180#endif /* defined(CONFIG_USER_ONLY) */
2181
pbrooke2eef172008-06-08 01:09:01 +00002182#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002183
Paul Brookc04b2b72010-03-01 03:31:14 +00002184#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2185typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002186 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002187 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002188 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002189} subpage_t;
2190
Anthony Liguoric227f092009-10-01 16:12:16 -05002191static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002192 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002193static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002194static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002195{
Avi Kivity5312bd82012-02-12 18:32:55 +02002196 MemoryRegionSection *section = &phys_sections[section_index];
2197 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002198
2199 if (mr->subpage) {
2200 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2201 memory_region_destroy(&subpage->iomem);
2202 g_free(subpage);
2203 }
2204}
2205
Avi Kivity4346ae32012-02-10 17:00:01 +02002206static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002207{
2208 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002209 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002210
Avi Kivityc19e8802012-02-13 20:25:31 +02002211 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002212 return;
2213 }
2214
Avi Kivityc19e8802012-02-13 20:25:31 +02002215 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002216 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002217 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002218 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002219 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002220 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002221 }
Avi Kivity54688b12012-02-09 17:34:32 +02002222 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002223 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002224 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002225}
2226
2227static void destroy_all_mappings(void)
2228{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002229 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002230 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002231}
2232
Avi Kivity5312bd82012-02-12 18:32:55 +02002233static uint16_t phys_section_add(MemoryRegionSection *section)
2234{
2235 if (phys_sections_nb == phys_sections_nb_alloc) {
2236 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2237 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2238 phys_sections_nb_alloc);
2239 }
2240 phys_sections[phys_sections_nb] = *section;
2241 return phys_sections_nb++;
2242}
2243
2244static void phys_sections_clear(void)
2245{
2246 phys_sections_nb = 0;
2247}
2248
Avi Kivity0f0cb162012-02-13 17:14:32 +02002249static void register_subpage(MemoryRegionSection *section)
2250{
2251 subpage_t *subpage;
2252 target_phys_addr_t base = section->offset_within_address_space
2253 & TARGET_PAGE_MASK;
Avi Kivityf3705d52012-03-08 16:16:34 +02002254 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002255 MemoryRegionSection subsection = {
2256 .offset_within_address_space = base,
2257 .size = TARGET_PAGE_SIZE,
2258 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002259 target_phys_addr_t start, end;
2260
Avi Kivityf3705d52012-03-08 16:16:34 +02002261 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002262
Avi Kivityf3705d52012-03-08 16:16:34 +02002263 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002264 subpage = subpage_init(base);
2265 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002266 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2267 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002268 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002269 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002270 }
2271 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -04002272 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +02002273 subpage_register(subpage, start, end, phys_section_add(section));
2274}
2275
2276
2277static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002278{
Avi Kivitydd811242012-01-02 12:17:03 +02002279 target_phys_addr_t start_addr = section->offset_within_address_space;
2280 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002281 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002282 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002283
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002284 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002285
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002286 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002287 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2288 section_index);
bellard33417e72003-08-10 21:47:01 +00002289}
2290
Avi Kivity0f0cb162012-02-13 17:14:32 +02002291void cpu_register_physical_memory_log(MemoryRegionSection *section,
2292 bool readonly)
2293{
2294 MemoryRegionSection now = *section, remain = *section;
2295
2296 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2297 || (now.size < TARGET_PAGE_SIZE)) {
2298 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2299 - now.offset_within_address_space,
2300 now.size);
2301 register_subpage(&now);
2302 remain.size -= now.size;
2303 remain.offset_within_address_space += now.size;
2304 remain.offset_within_region += now.size;
2305 }
Tyler Hall69b67642012-07-25 18:45:04 -04002306 while (remain.size >= TARGET_PAGE_SIZE) {
2307 now = remain;
2308 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
2309 now.size = TARGET_PAGE_SIZE;
2310 register_subpage(&now);
2311 } else {
2312 now.size &= TARGET_PAGE_MASK;
2313 register_multipage(&now);
2314 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02002315 remain.size -= now.size;
2316 remain.offset_within_address_space += now.size;
2317 remain.offset_within_region += now.size;
2318 }
2319 now = remain;
2320 if (now.size) {
2321 register_subpage(&now);
2322 }
2323}
2324
2325
Anthony Liguoric227f092009-10-01 16:12:16 -05002326void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002327{
2328 if (kvm_enabled())
2329 kvm_coalesce_mmio_region(addr, size);
2330}
2331
Anthony Liguoric227f092009-10-01 16:12:16 -05002332void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002333{
2334 if (kvm_enabled())
2335 kvm_uncoalesce_mmio_region(addr, size);
2336}
2337
Sheng Yang62a27442010-01-26 19:21:16 +08002338void qemu_flush_coalesced_mmio_buffer(void)
2339{
2340 if (kvm_enabled())
2341 kvm_flush_coalesced_mmio_buffer();
2342}
2343
Marcelo Tosattic9027602010-03-01 20:25:08 -03002344#if defined(__linux__) && !defined(TARGET_S390X)
2345
2346#include <sys/vfs.h>
2347
2348#define HUGETLBFS_MAGIC 0x958458f6
2349
2350static long gethugepagesize(const char *path)
2351{
2352 struct statfs fs;
2353 int ret;
2354
2355 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002356 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002357 } while (ret != 0 && errno == EINTR);
2358
2359 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002360 perror(path);
2361 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002362 }
2363
2364 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002365 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002366
2367 return fs.f_bsize;
2368}
2369
Alex Williamson04b16652010-07-02 11:13:17 -06002370static void *file_ram_alloc(RAMBlock *block,
2371 ram_addr_t memory,
2372 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002373{
2374 char *filename;
2375 void *area;
2376 int fd;
2377#ifdef MAP_POPULATE
2378 int flags;
2379#endif
2380 unsigned long hpagesize;
2381
2382 hpagesize = gethugepagesize(path);
2383 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002384 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002385 }
2386
2387 if (memory < hpagesize) {
2388 return NULL;
2389 }
2390
2391 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2392 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2393 return NULL;
2394 }
2395
2396 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002397 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002398 }
2399
2400 fd = mkstemp(filename);
2401 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002402 perror("unable to create backing store for hugepages");
2403 free(filename);
2404 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002405 }
2406 unlink(filename);
2407 free(filename);
2408
2409 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2410
2411 /*
2412 * ftruncate is not supported by hugetlbfs in older
2413 * hosts, so don't bother bailing out on errors.
2414 * If anything goes wrong with it under other filesystems,
2415 * mmap will fail.
2416 */
2417 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002418 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002419
2420#ifdef MAP_POPULATE
2421 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2422 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2423 * to sidestep this quirk.
2424 */
2425 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2426 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2427#else
2428 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2429#endif
2430 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002431 perror("file_ram_alloc: can't mmap RAM pages");
2432 close(fd);
2433 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002434 }
Alex Williamson04b16652010-07-02 11:13:17 -06002435 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002436 return area;
2437}
2438#endif
2439
Alex Williamsond17b5282010-06-25 11:08:38 -06002440static ram_addr_t find_ram_offset(ram_addr_t size)
2441{
Alex Williamson04b16652010-07-02 11:13:17 -06002442 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002443 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002444
2445 if (QLIST_EMPTY(&ram_list.blocks))
2446 return 0;
2447
2448 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002449 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002450
2451 end = block->offset + block->length;
2452
2453 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2454 if (next_block->offset >= end) {
2455 next = MIN(next, next_block->offset);
2456 }
2457 }
2458 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002459 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002460 mingap = next - end;
2461 }
2462 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002463
2464 if (offset == RAM_ADDR_MAX) {
2465 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2466 (uint64_t)size);
2467 abort();
2468 }
2469
Alex Williamson04b16652010-07-02 11:13:17 -06002470 return offset;
2471}
2472
2473static ram_addr_t last_ram_offset(void)
2474{
Alex Williamsond17b5282010-06-25 11:08:38 -06002475 RAMBlock *block;
2476 ram_addr_t last = 0;
2477
2478 QLIST_FOREACH(block, &ram_list.blocks, next)
2479 last = MAX(last, block->offset + block->length);
2480
2481 return last;
2482}
2483
Jason Baronddb97f12012-08-02 15:44:16 -04002484static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2485{
2486 int ret;
2487 QemuOpts *machine_opts;
2488
2489 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2490 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2491 if (machine_opts &&
2492 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2493 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2494 if (ret) {
2495 perror("qemu_madvise");
2496 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2497 "but dump_guest_core=off specified\n");
2498 }
2499 }
2500}
2501
Avi Kivityc5705a72011-12-20 15:59:12 +02002502void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002503{
2504 RAMBlock *new_block, *block;
2505
Avi Kivityc5705a72011-12-20 15:59:12 +02002506 new_block = NULL;
2507 QLIST_FOREACH(block, &ram_list.blocks, next) {
2508 if (block->offset == addr) {
2509 new_block = block;
2510 break;
2511 }
2512 }
2513 assert(new_block);
2514 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002515
Anthony Liguori09e5ab62012-02-03 12:28:43 -06002516 if (dev) {
2517 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002518 if (id) {
2519 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002520 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002521 }
2522 }
2523 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2524
2525 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002526 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002527 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2528 new_block->idstr);
2529 abort();
2530 }
2531 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002532}
2533
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002534static int memory_try_enable_merging(void *addr, size_t len)
2535{
2536 QemuOpts *opts;
2537
2538 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2539 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
2540 /* disabled by the user */
2541 return 0;
2542 }
2543
2544 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
2545}
2546
Avi Kivityc5705a72011-12-20 15:59:12 +02002547ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2548 MemoryRegion *mr)
2549{
2550 RAMBlock *new_block;
2551
2552 size = TARGET_PAGE_ALIGN(size);
2553 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002554
Avi Kivity7c637362011-12-21 13:09:49 +02002555 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002556 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002557 if (host) {
2558 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002559 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002560 } else {
2561 if (mem_path) {
2562#if defined (__linux__) && !defined(TARGET_S390X)
2563 new_block->host = file_ram_alloc(new_block, size, mem_path);
2564 if (!new_block->host) {
2565 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002566 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002567 }
2568#else
2569 fprintf(stderr, "-mem-path option unsupported\n");
2570 exit(1);
2571#endif
2572 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02002573 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002574 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00002575 } else if (kvm_enabled()) {
2576 /* some s390/kvm configurations have special constraints */
2577 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01002578 } else {
2579 new_block->host = qemu_vmalloc(size);
2580 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002581 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002582 }
2583 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002584 new_block->length = size;
2585
2586 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2587
Anthony Liguori7267c092011-08-20 22:09:37 -05002588 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002589 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04002590 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2591 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02002592 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002593
Jason Baronddb97f12012-08-02 15:44:16 -04002594 qemu_ram_setup_dump(new_block->host, size);
2595
Cam Macdonell84b89d72010-07-26 18:10:57 -06002596 if (kvm_enabled())
2597 kvm_setup_guest_memory(new_block->host, size);
2598
2599 return new_block->offset;
2600}
2601
Avi Kivityc5705a72011-12-20 15:59:12 +02002602ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002603{
Avi Kivityc5705a72011-12-20 15:59:12 +02002604 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002605}
bellarde9a1ab12007-02-08 23:08:38 +00002606
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002607void qemu_ram_free_from_ptr(ram_addr_t addr)
2608{
2609 RAMBlock *block;
2610
2611 QLIST_FOREACH(block, &ram_list.blocks, next) {
2612 if (addr == block->offset) {
2613 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002614 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002615 return;
2616 }
2617 }
2618}
2619
Anthony Liguoric227f092009-10-01 16:12:16 -05002620void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002621{
Alex Williamson04b16652010-07-02 11:13:17 -06002622 RAMBlock *block;
2623
2624 QLIST_FOREACH(block, &ram_list.blocks, next) {
2625 if (addr == block->offset) {
2626 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002627 if (block->flags & RAM_PREALLOC_MASK) {
2628 ;
2629 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002630#if defined (__linux__) && !defined(TARGET_S390X)
2631 if (block->fd) {
2632 munmap(block->host, block->length);
2633 close(block->fd);
2634 } else {
2635 qemu_vfree(block->host);
2636 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002637#else
2638 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002639#endif
2640 } else {
2641#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2642 munmap(block->host, block->length);
2643#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002644 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002645 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002646 } else {
2647 qemu_vfree(block->host);
2648 }
Alex Williamson04b16652010-07-02 11:13:17 -06002649#endif
2650 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002651 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002652 return;
2653 }
2654 }
2655
bellarde9a1ab12007-02-08 23:08:38 +00002656}
2657
Huang Yingcd19cfa2011-03-02 08:56:19 +01002658#ifndef _WIN32
2659void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2660{
2661 RAMBlock *block;
2662 ram_addr_t offset;
2663 int flags;
2664 void *area, *vaddr;
2665
2666 QLIST_FOREACH(block, &ram_list.blocks, next) {
2667 offset = addr - block->offset;
2668 if (offset < block->length) {
2669 vaddr = block->host + offset;
2670 if (block->flags & RAM_PREALLOC_MASK) {
2671 ;
2672 } else {
2673 flags = MAP_FIXED;
2674 munmap(vaddr, length);
2675 if (mem_path) {
2676#if defined(__linux__) && !defined(TARGET_S390X)
2677 if (block->fd) {
2678#ifdef MAP_POPULATE
2679 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2680 MAP_PRIVATE;
2681#else
2682 flags |= MAP_PRIVATE;
2683#endif
2684 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2685 flags, block->fd, offset);
2686 } else {
2687 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2688 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2689 flags, -1, 0);
2690 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002691#else
2692 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002693#endif
2694 } else {
2695#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2696 flags |= MAP_SHARED | MAP_ANONYMOUS;
2697 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2698 flags, -1, 0);
2699#else
2700 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2701 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2702 flags, -1, 0);
2703#endif
2704 }
2705 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002706 fprintf(stderr, "Could not remap addr: "
2707 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002708 length, addr);
2709 exit(1);
2710 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002711 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04002712 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002713 }
2714 return;
2715 }
2716 }
2717}
2718#endif /* !_WIN32 */
2719
pbrookdc828ca2009-04-09 22:21:07 +00002720/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002721 With the exception of the softmmu code in this file, this should
2722 only be used for local memory (e.g. video ram) that the device owns,
2723 and knows it isn't going to access beyond the end of the block.
2724
2725 It should not be used for general purpose DMA.
2726 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2727 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002728void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002729{
pbrook94a6b542009-04-11 17:15:54 +00002730 RAMBlock *block;
2731
Alex Williamsonf471a172010-06-11 11:11:42 -06002732 QLIST_FOREACH(block, &ram_list.blocks, next) {
2733 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002734 /* Move this entry to to start of the list. */
2735 if (block != QLIST_FIRST(&ram_list.blocks)) {
2736 QLIST_REMOVE(block, next);
2737 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2738 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002739 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002740 /* We need to check if the requested address is in the RAM
2741 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002742 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002743 */
2744 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002745 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002746 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002747 block->host =
2748 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002749 }
2750 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002751 return block->host + (addr - block->offset);
2752 }
pbrook94a6b542009-04-11 17:15:54 +00002753 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002754
2755 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2756 abort();
2757
2758 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002759}
2760
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002761/* Return a host pointer to ram allocated with qemu_ram_alloc.
2762 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2763 */
2764void *qemu_safe_ram_ptr(ram_addr_t addr)
2765{
2766 RAMBlock *block;
2767
2768 QLIST_FOREACH(block, &ram_list.blocks, next) {
2769 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002770 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002771 /* We need to check if the requested address is in the RAM
2772 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002773 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002774 */
2775 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002776 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002777 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002778 block->host =
2779 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002780 }
2781 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002782 return block->host + (addr - block->offset);
2783 }
2784 }
2785
2786 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2787 abort();
2788
2789 return NULL;
2790}
2791
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002792/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2793 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002794void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002795{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002796 if (*size == 0) {
2797 return NULL;
2798 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002799 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002800 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002801 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002802 RAMBlock *block;
2803
2804 QLIST_FOREACH(block, &ram_list.blocks, next) {
2805 if (addr - block->offset < block->length) {
2806 if (addr - block->offset + *size > block->length)
2807 *size = block->length - addr + block->offset;
2808 return block->host + (addr - block->offset);
2809 }
2810 }
2811
2812 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2813 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002814 }
2815}
2816
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002817void qemu_put_ram_ptr(void *addr)
2818{
2819 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002820}
2821
Marcelo Tosattie8902612010-10-11 15:31:19 -03002822int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002823{
pbrook94a6b542009-04-11 17:15:54 +00002824 RAMBlock *block;
2825 uint8_t *host = ptr;
2826
Jan Kiszka868bb332011-06-21 22:59:09 +02002827 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002828 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002829 return 0;
2830 }
2831
Alex Williamsonf471a172010-06-11 11:11:42 -06002832 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002833 /* This case append when the block is not mapped. */
2834 if (block->host == NULL) {
2835 continue;
2836 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002837 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002838 *ram_addr = block->offset + (host - block->host);
2839 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002840 }
pbrook94a6b542009-04-11 17:15:54 +00002841 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002842
Marcelo Tosattie8902612010-10-11 15:31:19 -03002843 return -1;
2844}
Alex Williamsonf471a172010-06-11 11:11:42 -06002845
Marcelo Tosattie8902612010-10-11 15:31:19 -03002846/* Some of the softmmu routines need to translate from a host pointer
2847 (typically a TLB entry) back to a ram offset. */
2848ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2849{
2850 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002851
Marcelo Tosattie8902612010-10-11 15:31:19 -03002852 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2853 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2854 abort();
2855 }
2856 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002857}
2858
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002859static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2860 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002861{
pbrook67d3b952006-12-18 05:03:52 +00002862#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002863 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002864#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002865#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002866 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002867#endif
2868 return 0;
2869}
2870
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002871static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2872 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002873{
2874#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002875 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002876#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002877#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002878 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002879#endif
2880}
2881
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002882static const MemoryRegionOps unassigned_mem_ops = {
2883 .read = unassigned_mem_read,
2884 .write = unassigned_mem_write,
2885 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002886};
2887
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002888static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2889 unsigned size)
2890{
2891 abort();
2892}
2893
2894static void error_mem_write(void *opaque, target_phys_addr_t addr,
2895 uint64_t value, unsigned size)
2896{
2897 abort();
2898}
2899
2900static const MemoryRegionOps error_mem_ops = {
2901 .read = error_mem_read,
2902 .write = error_mem_write,
2903 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002904};
2905
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002906static const MemoryRegionOps rom_mem_ops = {
2907 .read = error_mem_read,
2908 .write = unassigned_mem_write,
2909 .endianness = DEVICE_NATIVE_ENDIAN,
2910};
2911
2912static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2913 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002914{
bellard3a7d9292005-08-21 09:26:42 +00002915 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002916 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002917 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2918#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002919 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002920 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002921#endif
2922 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002923 switch (size) {
2924 case 1:
2925 stb_p(qemu_get_ram_ptr(ram_addr), val);
2926 break;
2927 case 2:
2928 stw_p(qemu_get_ram_ptr(ram_addr), val);
2929 break;
2930 case 4:
2931 stl_p(qemu_get_ram_ptr(ram_addr), val);
2932 break;
2933 default:
2934 abort();
2935 }
bellardf23db162005-08-21 19:12:28 +00002936 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002937 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002938 /* we remove the notdirty callback only if the code has been
2939 flushed */
2940 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002941 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002942}
2943
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002944static const MemoryRegionOps notdirty_mem_ops = {
2945 .read = error_mem_read,
2946 .write = notdirty_mem_write,
2947 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002948};
2949
pbrook0f459d12008-06-09 00:20:13 +00002950/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002951static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002952{
Andreas Färber9349b4f2012-03-14 01:38:32 +01002953 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002954 target_ulong pc, cs_base;
2955 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002956 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002957 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002958 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002959
aliguori06d55cc2008-11-18 20:24:06 +00002960 if (env->watchpoint_hit) {
2961 /* We re-entered the check after replacing the TB. Now raise
2962 * the debug interrupt so that is will trigger after the
2963 * current instruction. */
2964 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2965 return;
2966 }
pbrook2e70f6e2008-06-29 01:03:05 +00002967 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002968 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002969 if ((vaddr == (wp->vaddr & len_mask) ||
2970 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002971 wp->flags |= BP_WATCHPOINT_HIT;
2972 if (!env->watchpoint_hit) {
2973 env->watchpoint_hit = wp;
2974 tb = tb_find_pc(env->mem_io_pc);
2975 if (!tb) {
2976 cpu_abort(env, "check_watchpoint: could not find TB for "
2977 "pc=%p", (void *)env->mem_io_pc);
2978 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00002979 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00002980 tb_phys_invalidate(tb, -1);
2981 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2982 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04002983 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00002984 } else {
2985 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2986 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04002987 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002988 }
aliguori06d55cc2008-11-18 20:24:06 +00002989 }
aliguori6e140f22008-11-18 20:37:55 +00002990 } else {
2991 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002992 }
2993 }
2994}
2995
pbrook6658ffb2007-03-16 23:58:11 +00002996/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2997 so these check for a hit then pass through to the normal out-of-line
2998 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02002999static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3000 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003001{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003002 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3003 switch (size) {
3004 case 1: return ldub_phys(addr);
3005 case 2: return lduw_phys(addr);
3006 case 4: return ldl_phys(addr);
3007 default: abort();
3008 }
pbrook6658ffb2007-03-16 23:58:11 +00003009}
3010
Avi Kivity1ec9b902012-01-02 12:47:48 +02003011static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3012 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003013{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003014 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3015 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003016 case 1:
3017 stb_phys(addr, val);
3018 break;
3019 case 2:
3020 stw_phys(addr, val);
3021 break;
3022 case 4:
3023 stl_phys(addr, val);
3024 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003025 default: abort();
3026 }
pbrook6658ffb2007-03-16 23:58:11 +00003027}
3028
Avi Kivity1ec9b902012-01-02 12:47:48 +02003029static const MemoryRegionOps watch_mem_ops = {
3030 .read = watch_mem_read,
3031 .write = watch_mem_write,
3032 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003033};
pbrook6658ffb2007-03-16 23:58:11 +00003034
Avi Kivity70c68e42012-01-02 12:32:48 +02003035static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3036 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003037{
Avi Kivity70c68e42012-01-02 12:32:48 +02003038 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003039 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003040 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003041#if defined(DEBUG_SUBPAGE)
3042 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3043 mmio, len, addr, idx);
3044#endif
blueswir1db7b5422007-05-26 17:36:03 +00003045
Avi Kivity5312bd82012-02-12 18:32:55 +02003046 section = &phys_sections[mmio->sub_section[idx]];
3047 addr += mmio->base;
3048 addr -= section->offset_within_address_space;
3049 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003050 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003051}
3052
Avi Kivity70c68e42012-01-02 12:32:48 +02003053static void subpage_write(void *opaque, target_phys_addr_t addr,
3054 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003055{
Avi Kivity70c68e42012-01-02 12:32:48 +02003056 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003057 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003058 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003059#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003060 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3061 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003062 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003063#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003064
Avi Kivity5312bd82012-02-12 18:32:55 +02003065 section = &phys_sections[mmio->sub_section[idx]];
3066 addr += mmio->base;
3067 addr -= section->offset_within_address_space;
3068 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003069 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003070}
3071
Avi Kivity70c68e42012-01-02 12:32:48 +02003072static const MemoryRegionOps subpage_ops = {
3073 .read = subpage_read,
3074 .write = subpage_write,
3075 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003076};
3077
Avi Kivityde712f92012-01-02 12:41:07 +02003078static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3079 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003080{
3081 ram_addr_t raddr = addr;
3082 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003083 switch (size) {
3084 case 1: return ldub_p(ptr);
3085 case 2: return lduw_p(ptr);
3086 case 4: return ldl_p(ptr);
3087 default: abort();
3088 }
Andreas Färber56384e82011-11-30 16:26:21 +01003089}
3090
Avi Kivityde712f92012-01-02 12:41:07 +02003091static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3092 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003093{
3094 ram_addr_t raddr = addr;
3095 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003096 switch (size) {
3097 case 1: return stb_p(ptr, value);
3098 case 2: return stw_p(ptr, value);
3099 case 4: return stl_p(ptr, value);
3100 default: abort();
3101 }
Andreas Färber56384e82011-11-30 16:26:21 +01003102}
3103
Avi Kivityde712f92012-01-02 12:41:07 +02003104static const MemoryRegionOps subpage_ram_ops = {
3105 .read = subpage_ram_read,
3106 .write = subpage_ram_write,
3107 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003108};
3109
Anthony Liguoric227f092009-10-01 16:12:16 -05003110static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003111 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003112{
3113 int idx, eidx;
3114
3115 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3116 return -1;
3117 idx = SUBPAGE_IDX(start);
3118 eidx = SUBPAGE_IDX(end);
3119#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003120 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003121 mmio, start, end, idx, eidx, memory);
3122#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003123 if (memory_region_is_ram(phys_sections[section].mr)) {
3124 MemoryRegionSection new_section = phys_sections[section];
3125 new_section.mr = &io_mem_subpage_ram;
3126 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003127 }
blueswir1db7b5422007-05-26 17:36:03 +00003128 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003129 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003130 }
3131
3132 return 0;
3133}
3134
Avi Kivity0f0cb162012-02-13 17:14:32 +02003135static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003136{
Anthony Liguoric227f092009-10-01 16:12:16 -05003137 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003138
Anthony Liguori7267c092011-08-20 22:09:37 -05003139 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003140
3141 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003142 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3143 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003144 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003145#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003146 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3147 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003148#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003149 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003150
3151 return mmio;
3152}
3153
Avi Kivity5312bd82012-02-12 18:32:55 +02003154static uint16_t dummy_section(MemoryRegion *mr)
3155{
3156 MemoryRegionSection section = {
3157 .mr = mr,
3158 .offset_within_address_space = 0,
3159 .offset_within_region = 0,
3160 .size = UINT64_MAX,
3161 };
3162
3163 return phys_section_add(&section);
3164}
3165
Avi Kivity37ec01d2012-03-08 18:08:35 +02003166MemoryRegion *iotlb_to_region(target_phys_addr_t index)
Avi Kivityaa102232012-03-08 17:06:55 +02003167{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003168 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003169}
3170
Avi Kivitye9179ce2009-06-14 11:38:52 +03003171static void io_mem_init(void)
3172{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003173 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003174 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3175 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3176 "unassigned", UINT64_MAX);
3177 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3178 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003179 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3180 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003181 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3182 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003183}
3184
Avi Kivity50c1e142012-02-08 21:36:02 +02003185static void core_begin(MemoryListener *listener)
3186{
Avi Kivity54688b12012-02-09 17:34:32 +02003187 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003188 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003189 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003190 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003191 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3192 phys_section_rom = dummy_section(&io_mem_rom);
3193 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003194}
3195
3196static void core_commit(MemoryListener *listener)
3197{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003198 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003199
3200 /* since each CPU stores ram addresses in its TLB cache, we must
3201 reset the modified entries */
3202 /* XXX: slow ! */
3203 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3204 tlb_flush(env, 1);
3205 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003206}
3207
Avi Kivity93632742012-02-08 16:54:16 +02003208static void core_region_add(MemoryListener *listener,
3209 MemoryRegionSection *section)
3210{
Avi Kivity4855d412012-02-08 21:16:05 +02003211 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003212}
3213
3214static void core_region_del(MemoryListener *listener,
3215 MemoryRegionSection *section)
3216{
Avi Kivity93632742012-02-08 16:54:16 +02003217}
3218
Avi Kivity50c1e142012-02-08 21:36:02 +02003219static void core_region_nop(MemoryListener *listener,
3220 MemoryRegionSection *section)
3221{
Avi Kivity54688b12012-02-09 17:34:32 +02003222 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003223}
3224
Avi Kivity93632742012-02-08 16:54:16 +02003225static void core_log_start(MemoryListener *listener,
3226 MemoryRegionSection *section)
3227{
3228}
3229
3230static void core_log_stop(MemoryListener *listener,
3231 MemoryRegionSection *section)
3232{
3233}
3234
3235static void core_log_sync(MemoryListener *listener,
3236 MemoryRegionSection *section)
3237{
3238}
3239
3240static void core_log_global_start(MemoryListener *listener)
3241{
3242 cpu_physical_memory_set_dirty_tracking(1);
3243}
3244
3245static void core_log_global_stop(MemoryListener *listener)
3246{
3247 cpu_physical_memory_set_dirty_tracking(0);
3248}
3249
3250static void core_eventfd_add(MemoryListener *listener,
3251 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003252 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity93632742012-02-08 16:54:16 +02003253{
3254}
3255
3256static void core_eventfd_del(MemoryListener *listener,
3257 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003258 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity93632742012-02-08 16:54:16 +02003259{
3260}
3261
Avi Kivity50c1e142012-02-08 21:36:02 +02003262static void io_begin(MemoryListener *listener)
3263{
3264}
3265
3266static void io_commit(MemoryListener *listener)
3267{
3268}
3269
Avi Kivity4855d412012-02-08 21:16:05 +02003270static void io_region_add(MemoryListener *listener,
3271 MemoryRegionSection *section)
3272{
Avi Kivitya2d33522012-03-05 17:40:12 +02003273 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3274
3275 mrio->mr = section->mr;
3276 mrio->offset = section->offset_within_region;
3277 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003278 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003279 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003280}
3281
3282static void io_region_del(MemoryListener *listener,
3283 MemoryRegionSection *section)
3284{
3285 isa_unassign_ioport(section->offset_within_address_space, section->size);
3286}
3287
Avi Kivity50c1e142012-02-08 21:36:02 +02003288static void io_region_nop(MemoryListener *listener,
3289 MemoryRegionSection *section)
3290{
3291}
3292
Avi Kivity4855d412012-02-08 21:16:05 +02003293static void io_log_start(MemoryListener *listener,
3294 MemoryRegionSection *section)
3295{
3296}
3297
3298static void io_log_stop(MemoryListener *listener,
3299 MemoryRegionSection *section)
3300{
3301}
3302
3303static void io_log_sync(MemoryListener *listener,
3304 MemoryRegionSection *section)
3305{
3306}
3307
3308static void io_log_global_start(MemoryListener *listener)
3309{
3310}
3311
3312static void io_log_global_stop(MemoryListener *listener)
3313{
3314}
3315
3316static void io_eventfd_add(MemoryListener *listener,
3317 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003318 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity4855d412012-02-08 21:16:05 +02003319{
3320}
3321
3322static void io_eventfd_del(MemoryListener *listener,
3323 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003324 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity4855d412012-02-08 21:16:05 +02003325{
3326}
3327
Avi Kivity93632742012-02-08 16:54:16 +02003328static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003329 .begin = core_begin,
3330 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003331 .region_add = core_region_add,
3332 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003333 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003334 .log_start = core_log_start,
3335 .log_stop = core_log_stop,
3336 .log_sync = core_log_sync,
3337 .log_global_start = core_log_global_start,
3338 .log_global_stop = core_log_global_stop,
3339 .eventfd_add = core_eventfd_add,
3340 .eventfd_del = core_eventfd_del,
3341 .priority = 0,
3342};
3343
Avi Kivity4855d412012-02-08 21:16:05 +02003344static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003345 .begin = io_begin,
3346 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003347 .region_add = io_region_add,
3348 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003349 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003350 .log_start = io_log_start,
3351 .log_stop = io_log_stop,
3352 .log_sync = io_log_sync,
3353 .log_global_start = io_log_global_start,
3354 .log_global_stop = io_log_global_stop,
3355 .eventfd_add = io_eventfd_add,
3356 .eventfd_del = io_eventfd_del,
3357 .priority = 0,
3358};
3359
Avi Kivity62152b82011-07-26 14:26:14 +03003360static void memory_map_init(void)
3361{
Anthony Liguori7267c092011-08-20 22:09:37 -05003362 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003363 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003364 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003365
Anthony Liguori7267c092011-08-20 22:09:37 -05003366 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003367 memory_region_init(system_io, "io", 65536);
3368 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003369
Avi Kivity4855d412012-02-08 21:16:05 +02003370 memory_listener_register(&core_memory_listener, system_memory);
3371 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003372}
3373
3374MemoryRegion *get_system_memory(void)
3375{
3376 return system_memory;
3377}
3378
Avi Kivity309cb472011-08-08 16:09:03 +03003379MemoryRegion *get_system_io(void)
3380{
3381 return system_io;
3382}
3383
pbrooke2eef172008-06-08 01:09:01 +00003384#endif /* !defined(CONFIG_USER_ONLY) */
3385
bellard13eb76e2004-01-24 15:23:36 +00003386/* physical memory access (slow version, mainly for debug) */
3387#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003388int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003389 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003390{
3391 int l, flags;
3392 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003393 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003394
3395 while (len > 0) {
3396 page = addr & TARGET_PAGE_MASK;
3397 l = (page + TARGET_PAGE_SIZE) - addr;
3398 if (l > len)
3399 l = len;
3400 flags = page_get_flags(page);
3401 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003402 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003403 if (is_write) {
3404 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003405 return -1;
bellard579a97f2007-11-11 14:26:47 +00003406 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003407 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003408 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003409 memcpy(p, buf, l);
3410 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003411 } else {
3412 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003413 return -1;
bellard579a97f2007-11-11 14:26:47 +00003414 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003415 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003416 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003417 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003418 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003419 }
3420 len -= l;
3421 buf += l;
3422 addr += l;
3423 }
Paul Brooka68fe892010-03-01 00:08:59 +00003424 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003425}
bellard8df1cd02005-01-28 22:37:22 +00003426
bellard13eb76e2004-01-24 15:23:36 +00003427#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003428
3429static void invalidate_and_set_dirty(target_phys_addr_t addr,
3430 target_phys_addr_t length)
3431{
3432 if (!cpu_physical_memory_is_dirty(addr)) {
3433 /* invalidate code */
3434 tb_invalidate_phys_page_range(addr, addr + length, 0);
3435 /* set dirty bit */
3436 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
3437 }
Anthony PERARDe2269392012-10-03 13:49:22 +00003438 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003439}
3440
Anthony Liguoric227f092009-10-01 16:12:16 -05003441void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003442 int len, int is_write)
3443{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003444 int l;
bellard13eb76e2004-01-24 15:23:36 +00003445 uint8_t *ptr;
3446 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003447 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003448 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003449
bellard13eb76e2004-01-24 15:23:36 +00003450 while (len > 0) {
3451 page = addr & TARGET_PAGE_MASK;
3452 l = (page + TARGET_PAGE_SIZE) - addr;
3453 if (l > len)
3454 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003455 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003456
bellard13eb76e2004-01-24 15:23:36 +00003457 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003458 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003459 target_phys_addr_t addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00003460 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003461 /* XXX: could force cpu_single_env to NULL to avoid
3462 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003463 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003464 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003465 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003466 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003467 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003468 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003469 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003470 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003471 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003472 l = 2;
3473 } else {
bellard1c213d12005-09-03 10:49:04 +00003474 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003475 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003476 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003477 l = 1;
3478 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003479 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003480 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003481 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003482 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003483 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003484 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003485 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003486 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003487 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003488 }
3489 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003490 if (!(memory_region_is_ram(section->mr) ||
3491 memory_region_is_romd(section->mr))) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003492 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003493 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003494 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003495 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003496 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003497 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003498 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003499 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003500 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003501 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003502 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003503 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003504 l = 2;
3505 } else {
bellard1c213d12005-09-03 10:49:04 +00003506 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003507 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003508 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003509 l = 1;
3510 }
3511 } else {
3512 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003513 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00003514 + memory_region_section_addr(section,
3515 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003516 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003517 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003518 }
3519 }
3520 len -= l;
3521 buf += l;
3522 addr += l;
3523 }
3524}
bellard8df1cd02005-01-28 22:37:22 +00003525
bellardd0ecd2a2006-04-23 17:14:48 +00003526/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003527void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003528 const uint8_t *buf, int len)
3529{
3530 int l;
3531 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003532 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003533 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003534
bellardd0ecd2a2006-04-23 17:14:48 +00003535 while (len > 0) {
3536 page = addr & TARGET_PAGE_MASK;
3537 l = (page + TARGET_PAGE_SIZE) - addr;
3538 if (l > len)
3539 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003540 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003541
Blue Swirlcc5bea62012-04-14 14:56:48 +00003542 if (!(memory_region_is_ram(section->mr) ||
3543 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00003544 /* do nothing */
3545 } else {
3546 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003547 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003548 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003549 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003550 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003551 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003552 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003553 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003554 }
3555 len -= l;
3556 buf += l;
3557 addr += l;
3558 }
3559}
3560
aliguori6d16c2f2009-01-22 16:59:11 +00003561typedef struct {
3562 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003563 target_phys_addr_t addr;
3564 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003565} BounceBuffer;
3566
3567static BounceBuffer bounce;
3568
aliguoriba223c22009-01-22 16:59:16 +00003569typedef struct MapClient {
3570 void *opaque;
3571 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003572 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003573} MapClient;
3574
Blue Swirl72cf2d42009-09-12 07:36:22 +00003575static QLIST_HEAD(map_client_list, MapClient) map_client_list
3576 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003577
3578void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3579{
Anthony Liguori7267c092011-08-20 22:09:37 -05003580 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003581
3582 client->opaque = opaque;
3583 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003584 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003585 return client;
3586}
3587
3588void cpu_unregister_map_client(void *_client)
3589{
3590 MapClient *client = (MapClient *)_client;
3591
Blue Swirl72cf2d42009-09-12 07:36:22 +00003592 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003593 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003594}
3595
3596static void cpu_notify_map_clients(void)
3597{
3598 MapClient *client;
3599
Blue Swirl72cf2d42009-09-12 07:36:22 +00003600 while (!QLIST_EMPTY(&map_client_list)) {
3601 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003602 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003603 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003604 }
3605}
3606
aliguori6d16c2f2009-01-22 16:59:11 +00003607/* Map a physical memory region into a host virtual address.
3608 * May map a subset of the requested range, given by and returned in *plen.
3609 * May return NULL if resources needed to perform the mapping are exhausted.
3610 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003611 * Use cpu_register_map_client() to know when retrying the map operation is
3612 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003613 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003614void *cpu_physical_memory_map(target_phys_addr_t addr,
3615 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003616 int is_write)
3617{
Anthony Liguoric227f092009-10-01 16:12:16 -05003618 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003619 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003620 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003621 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003622 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003623 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003624 ram_addr_t rlen;
3625 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003626
3627 while (len > 0) {
3628 page = addr & TARGET_PAGE_MASK;
3629 l = (page + TARGET_PAGE_SIZE) - addr;
3630 if (l > len)
3631 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003632 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003633
Avi Kivityf3705d52012-03-08 16:16:34 +02003634 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003635 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003636 break;
3637 }
3638 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3639 bounce.addr = addr;
3640 bounce.len = l;
3641 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003642 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003643 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003644
3645 *plen = l;
3646 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003647 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003648 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003649 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003650 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003651 }
aliguori6d16c2f2009-01-22 16:59:11 +00003652
3653 len -= l;
3654 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003655 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003656 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003657 rlen = todo;
3658 ret = qemu_ram_ptr_length(raddr, &rlen);
3659 *plen = rlen;
3660 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003661}
3662
3663/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3664 * Will also mark the memory as dirty if is_write == 1. access_len gives
3665 * the amount of memory that was actually read or written by the caller.
3666 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003667void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3668 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003669{
3670 if (buffer != bounce.buffer) {
3671 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003672 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003673 while (access_len) {
3674 unsigned l;
3675 l = TARGET_PAGE_SIZE;
3676 if (l > access_len)
3677 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003678 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003679 addr1 += l;
3680 access_len -= l;
3681 }
3682 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003683 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003684 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003685 }
aliguori6d16c2f2009-01-22 16:59:11 +00003686 return;
3687 }
3688 if (is_write) {
3689 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3690 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003691 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003692 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003693 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003694}
bellardd0ecd2a2006-04-23 17:14:48 +00003695
bellard8df1cd02005-01-28 22:37:22 +00003696/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003697static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3698 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003699{
bellard8df1cd02005-01-28 22:37:22 +00003700 uint8_t *ptr;
3701 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003702 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003703
Avi Kivity06ef3522012-02-13 16:11:22 +02003704 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003705
Blue Swirlcc5bea62012-04-14 14:56:48 +00003706 if (!(memory_region_is_ram(section->mr) ||
3707 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00003708 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003709 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003710 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003711#if defined(TARGET_WORDS_BIGENDIAN)
3712 if (endian == DEVICE_LITTLE_ENDIAN) {
3713 val = bswap32(val);
3714 }
3715#else
3716 if (endian == DEVICE_BIG_ENDIAN) {
3717 val = bswap32(val);
3718 }
3719#endif
bellard8df1cd02005-01-28 22:37:22 +00003720 } else {
3721 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003722 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003723 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003724 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003725 switch (endian) {
3726 case DEVICE_LITTLE_ENDIAN:
3727 val = ldl_le_p(ptr);
3728 break;
3729 case DEVICE_BIG_ENDIAN:
3730 val = ldl_be_p(ptr);
3731 break;
3732 default:
3733 val = ldl_p(ptr);
3734 break;
3735 }
bellard8df1cd02005-01-28 22:37:22 +00003736 }
3737 return val;
3738}
3739
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003740uint32_t ldl_phys(target_phys_addr_t addr)
3741{
3742 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3743}
3744
3745uint32_t ldl_le_phys(target_phys_addr_t addr)
3746{
3747 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3748}
3749
3750uint32_t ldl_be_phys(target_phys_addr_t addr)
3751{
3752 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3753}
3754
bellard84b7b8e2005-11-28 21:19:04 +00003755/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003756static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3757 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003758{
bellard84b7b8e2005-11-28 21:19:04 +00003759 uint8_t *ptr;
3760 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003761 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003762
Avi Kivity06ef3522012-02-13 16:11:22 +02003763 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003764
Blue Swirlcc5bea62012-04-14 14:56:48 +00003765 if (!(memory_region_is_ram(section->mr) ||
3766 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00003767 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003768 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003769
3770 /* XXX This is broken when device endian != cpu endian.
3771 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003772#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003773 val = io_mem_read(section->mr, addr, 4) << 32;
3774 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003775#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003776 val = io_mem_read(section->mr, addr, 4);
3777 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003778#endif
3779 } else {
3780 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003781 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003782 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003783 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003784 switch (endian) {
3785 case DEVICE_LITTLE_ENDIAN:
3786 val = ldq_le_p(ptr);
3787 break;
3788 case DEVICE_BIG_ENDIAN:
3789 val = ldq_be_p(ptr);
3790 break;
3791 default:
3792 val = ldq_p(ptr);
3793 break;
3794 }
bellard84b7b8e2005-11-28 21:19:04 +00003795 }
3796 return val;
3797}
3798
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003799uint64_t ldq_phys(target_phys_addr_t addr)
3800{
3801 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3802}
3803
3804uint64_t ldq_le_phys(target_phys_addr_t addr)
3805{
3806 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3807}
3808
3809uint64_t ldq_be_phys(target_phys_addr_t addr)
3810{
3811 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3812}
3813
bellardaab33092005-10-30 20:48:42 +00003814/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003815uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003816{
3817 uint8_t val;
3818 cpu_physical_memory_read(addr, &val, 1);
3819 return val;
3820}
3821
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003822/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003823static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3824 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003825{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003826 uint8_t *ptr;
3827 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003828 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003829
Avi Kivity06ef3522012-02-13 16:11:22 +02003830 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003831
Blue Swirlcc5bea62012-04-14 14:56:48 +00003832 if (!(memory_region_is_ram(section->mr) ||
3833 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003834 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003835 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003836 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003837#if defined(TARGET_WORDS_BIGENDIAN)
3838 if (endian == DEVICE_LITTLE_ENDIAN) {
3839 val = bswap16(val);
3840 }
3841#else
3842 if (endian == DEVICE_BIG_ENDIAN) {
3843 val = bswap16(val);
3844 }
3845#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003846 } else {
3847 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003848 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003849 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003850 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003851 switch (endian) {
3852 case DEVICE_LITTLE_ENDIAN:
3853 val = lduw_le_p(ptr);
3854 break;
3855 case DEVICE_BIG_ENDIAN:
3856 val = lduw_be_p(ptr);
3857 break;
3858 default:
3859 val = lduw_p(ptr);
3860 break;
3861 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003862 }
3863 return val;
bellardaab33092005-10-30 20:48:42 +00003864}
3865
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003866uint32_t lduw_phys(target_phys_addr_t addr)
3867{
3868 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3869}
3870
3871uint32_t lduw_le_phys(target_phys_addr_t addr)
3872{
3873 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3874}
3875
3876uint32_t lduw_be_phys(target_phys_addr_t addr)
3877{
3878 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3879}
3880
bellard8df1cd02005-01-28 22:37:22 +00003881/* warning: addr must be aligned. The ram page is not masked as dirty
3882 and the code inside is not invalidated. It is useful if the dirty
3883 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003884void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003885{
bellard8df1cd02005-01-28 22:37:22 +00003886 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003887 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003888
Avi Kivity06ef3522012-02-13 16:11:22 +02003889 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003890
Avi Kivityf3705d52012-03-08 16:16:34 +02003891 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003892 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003893 if (memory_region_is_ram(section->mr)) {
3894 section = &phys_sections[phys_section_rom];
3895 }
3896 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003897 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003898 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003899 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003900 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003901 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003902 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003903
3904 if (unlikely(in_migration)) {
3905 if (!cpu_physical_memory_is_dirty(addr1)) {
3906 /* invalidate code */
3907 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3908 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003909 cpu_physical_memory_set_dirty_flags(
3910 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003911 }
3912 }
bellard8df1cd02005-01-28 22:37:22 +00003913 }
3914}
3915
Anthony Liguoric227f092009-10-01 16:12:16 -05003916void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003917{
j_mayerbc98a7e2007-04-04 07:55:12 +00003918 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003919 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00003920
Avi Kivity06ef3522012-02-13 16:11:22 +02003921 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003922
Avi Kivityf3705d52012-03-08 16:16:34 +02003923 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003924 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003925 if (memory_region_is_ram(section->mr)) {
3926 section = &phys_sections[phys_section_rom];
3927 }
j_mayerbc98a7e2007-04-04 07:55:12 +00003928#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003929 io_mem_write(section->mr, addr, val >> 32, 4);
3930 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003931#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003932 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3933 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003934#endif
3935 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003936 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003937 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003938 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00003939 stq_p(ptr, val);
3940 }
3941}
3942
bellard8df1cd02005-01-28 22:37:22 +00003943/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003944static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
3945 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003946{
bellard8df1cd02005-01-28 22:37:22 +00003947 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003948 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003949
Avi Kivity06ef3522012-02-13 16:11:22 +02003950 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003951
Avi Kivityf3705d52012-03-08 16:16:34 +02003952 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003953 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003954 if (memory_region_is_ram(section->mr)) {
3955 section = &phys_sections[phys_section_rom];
3956 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003957#if defined(TARGET_WORDS_BIGENDIAN)
3958 if (endian == DEVICE_LITTLE_ENDIAN) {
3959 val = bswap32(val);
3960 }
3961#else
3962 if (endian == DEVICE_BIG_ENDIAN) {
3963 val = bswap32(val);
3964 }
3965#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02003966 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003967 } else {
3968 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003969 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003970 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00003971 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003972 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003973 switch (endian) {
3974 case DEVICE_LITTLE_ENDIAN:
3975 stl_le_p(ptr, val);
3976 break;
3977 case DEVICE_BIG_ENDIAN:
3978 stl_be_p(ptr, val);
3979 break;
3980 default:
3981 stl_p(ptr, val);
3982 break;
3983 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003984 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00003985 }
3986}
3987
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003988void stl_phys(target_phys_addr_t addr, uint32_t val)
3989{
3990 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3991}
3992
3993void stl_le_phys(target_phys_addr_t addr, uint32_t val)
3994{
3995 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3996}
3997
3998void stl_be_phys(target_phys_addr_t addr, uint32_t val)
3999{
4000 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4001}
4002
bellardaab33092005-10-30 20:48:42 +00004003/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004004void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004005{
4006 uint8_t v = val;
4007 cpu_physical_memory_write(addr, &v, 1);
4008}
4009
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004010/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004011static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4012 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004013{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004014 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004015 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004016
Avi Kivity06ef3522012-02-13 16:11:22 +02004017 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004018
Avi Kivityf3705d52012-03-08 16:16:34 +02004019 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00004020 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004021 if (memory_region_is_ram(section->mr)) {
4022 section = &phys_sections[phys_section_rom];
4023 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004024#if defined(TARGET_WORDS_BIGENDIAN)
4025 if (endian == DEVICE_LITTLE_ENDIAN) {
4026 val = bswap16(val);
4027 }
4028#else
4029 if (endian == DEVICE_BIG_ENDIAN) {
4030 val = bswap16(val);
4031 }
4032#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004033 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004034 } else {
4035 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004036 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004037 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004038 /* RAM case */
4039 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004040 switch (endian) {
4041 case DEVICE_LITTLE_ENDIAN:
4042 stw_le_p(ptr, val);
4043 break;
4044 case DEVICE_BIG_ENDIAN:
4045 stw_be_p(ptr, val);
4046 break;
4047 default:
4048 stw_p(ptr, val);
4049 break;
4050 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00004051 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004052 }
bellardaab33092005-10-30 20:48:42 +00004053}
4054
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004055void stw_phys(target_phys_addr_t addr, uint32_t val)
4056{
4057 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4058}
4059
4060void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4061{
4062 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4063}
4064
4065void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4066{
4067 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4068}
4069
bellardaab33092005-10-30 20:48:42 +00004070/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004071void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004072{
4073 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004074 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004075}
4076
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004077void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4078{
4079 val = cpu_to_le64(val);
4080 cpu_physical_memory_write(addr, &val, 8);
4081}
4082
4083void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4084{
4085 val = cpu_to_be64(val);
4086 cpu_physical_memory_write(addr, &val, 8);
4087}
4088
aliguori5e2972f2009-03-28 17:51:36 +00004089/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004090int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004091 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004092{
4093 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004094 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004095 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004096
4097 while (len > 0) {
4098 page = addr & TARGET_PAGE_MASK;
4099 phys_addr = cpu_get_phys_page_debug(env, page);
4100 /* if no physical page mapped, return an error */
4101 if (phys_addr == -1)
4102 return -1;
4103 l = (page + TARGET_PAGE_SIZE) - addr;
4104 if (l > len)
4105 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004106 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004107 if (is_write)
4108 cpu_physical_memory_write_rom(phys_addr, buf, l);
4109 else
aliguori5e2972f2009-03-28 17:51:36 +00004110 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004111 len -= l;
4112 buf += l;
4113 addr += l;
4114 }
4115 return 0;
4116}
Paul Brooka68fe892010-03-01 00:08:59 +00004117#endif
bellard13eb76e2004-01-24 15:23:36 +00004118
pbrook2e70f6e2008-06-29 01:03:05 +00004119/* in deterministic execution mode, instructions doing device I/Os
4120 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004121void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004122{
4123 TranslationBlock *tb;
4124 uint32_t n, cflags;
4125 target_ulong pc, cs_base;
4126 uint64_t flags;
4127
Blue Swirl20503962012-04-09 14:20:20 +00004128 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004129 if (!tb) {
4130 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004131 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004132 }
4133 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004134 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004135 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004136 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004137 n = n - env->icount_decr.u16.low;
4138 /* Generate a new TB ending on the I/O insn. */
4139 n++;
4140 /* On MIPS and SH, delay slot instructions can only be restarted if
4141 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004142 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004143 branch. */
4144#if defined(TARGET_MIPS)
4145 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4146 env->active_tc.PC -= 4;
4147 env->icount_decr.u16.low++;
4148 env->hflags &= ~MIPS_HFLAG_BMASK;
4149 }
4150#elif defined(TARGET_SH4)
4151 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4152 && n > 1) {
4153 env->pc -= 2;
4154 env->icount_decr.u16.low++;
4155 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4156 }
4157#endif
4158 /* This should never happen. */
4159 if (n > CF_COUNT_MASK)
4160 cpu_abort(env, "TB too big during recompile");
4161
4162 cflags = n | CF_LAST_IO;
4163 pc = tb->pc;
4164 cs_base = tb->cs_base;
4165 flags = tb->flags;
4166 tb_phys_invalidate(tb, -1);
4167 /* FIXME: In theory this could raise an exception. In practice
4168 we have already translated the block once so it's probably ok. */
4169 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004170 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004171 the first in the TB) then we end up generating a whole new TB and
4172 repeating the fault, which is horribly inefficient.
4173 Better would be to execute just this insn uncached, or generate a
4174 second new TB. */
4175 cpu_resume_from_signal(env, NULL);
4176}
4177
Paul Brookb3755a92010-03-12 16:54:58 +00004178#if !defined(CONFIG_USER_ONLY)
4179
Stefan Weil055403b2010-10-22 23:03:32 +02004180void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004181{
4182 int i, target_code_size, max_target_code_size;
4183 int direct_jmp_count, direct_jmp2_count, cross_page;
4184 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004185
bellarde3db7222005-01-26 22:00:47 +00004186 target_code_size = 0;
4187 max_target_code_size = 0;
4188 cross_page = 0;
4189 direct_jmp_count = 0;
4190 direct_jmp2_count = 0;
4191 for(i = 0; i < nb_tbs; i++) {
4192 tb = &tbs[i];
4193 target_code_size += tb->size;
4194 if (tb->size > max_target_code_size)
4195 max_target_code_size = tb->size;
4196 if (tb->page_addr[1] != -1)
4197 cross_page++;
4198 if (tb->tb_next_offset[0] != 0xffff) {
4199 direct_jmp_count++;
4200 if (tb->tb_next_offset[1] != 0xffff) {
4201 direct_jmp2_count++;
4202 }
4203 }
4204 }
4205 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004206 cpu_fprintf(f, "Translation buffer state:\n");
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +10004207 cpu_fprintf(f, "gen code size %td/%zd\n",
bellard26a5f132008-05-28 12:30:31 +00004208 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4209 cpu_fprintf(f, "TB count %d/%d\n",
4210 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004211 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004212 nb_tbs ? target_code_size / nb_tbs : 0,
4213 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004214 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004215 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4216 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004217 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4218 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004219 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4220 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004221 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004222 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4223 direct_jmp2_count,
4224 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004225 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004226 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4227 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4228 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004229 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004230}
4231
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004232/*
4233 * A helper function for the _utterly broken_ virtio device model to find out if
4234 * it's running on a big endian machine. Don't do this at home kids!
4235 */
4236bool virtio_is_big_endian(void);
4237bool virtio_is_big_endian(void)
4238{
4239#if defined(TARGET_WORDS_BIGENDIAN)
4240 return true;
4241#else
4242 return false;
4243#endif
4244}
4245
bellard61382a52003-10-27 21:22:23 +00004246#endif
Wen Congyang76f35532012-05-07 12:04:18 +08004247
4248#ifndef CONFIG_USER_ONLY
4249bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
4250{
4251 MemoryRegionSection *section;
4252
4253 section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
4254
4255 return !(memory_region_is_ram(section->mr) ||
4256 memory_region_is_romd(section->mr));
4257}
4258#endif