blob: 0594b0705736c124857b3c1993690e7ca5848f6b [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
Peter Maydell9e119082012-10-29 11:34:32 +100037#include "dma.h"
Avi Kivity62152b82011-07-26 14:26:14 +030038#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000039#if defined(CONFIG_USER_ONLY)
40#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010041#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
42#include <sys/param.h>
43#if __FreeBSD_version >= 700104
44#define HAVE_KINFO_GETVMMAP
45#define sigqueue sigqueue_freebsd /* avoid redefinition */
46#include <sys/time.h>
47#include <sys/proc.h>
48#include <machine/profile.h>
49#define _KERNEL
50#include <sys/user.h>
51#undef _KERNEL
52#undef sigqueue
53#include <libutil.h>
54#endif
55#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010056#else /* !CONFIG_USER_ONLY */
57#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010058#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000059#endif
bellard54936002003-05-13 00:25:15 +000060
Blue Swirl0cac1b62012-04-09 16:50:52 +000061#include "cputlb.h"
62
Avi Kivity7762c2c2012-09-20 16:02:51 +030063#include "memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020064
bellardfd6ce8f2003-05-14 19:00:11 +000065//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000066//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000067//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000068
69/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000070//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
Richard Henderson4438c8a2012-10-16 17:30:13 +100089uint8_t *code_gen_prologue;
blueswir1bdaf78e2008-10-04 07:24:27 +000090static uint8_t *code_gen_buffer;
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +100091static size_t code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +000092/* threshold to flush the translated code buffer */
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +100093static size_t code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +020094static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +000095
pbrooke2eef172008-06-08 01:09:01 +000096#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000097int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000098static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000099
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200100RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300101
102static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300103static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300104
Avi Kivityf6790af2012-10-02 20:13:51 +0200105AddressSpace address_space_io;
106AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +1000107DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +0200108
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200109MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200110static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200111
pbrooke2eef172008-06-08 01:09:01 +0000112#endif
bellard9fa3e852004-01-04 18:06:42 +0000113
Andreas Färber9349b4f2012-03-14 01:38:32 +0100114CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000115/* current CPU in the current thread. It is only valid inside
116 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100117DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000118/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000119 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000120 2 = Adaptive rate instruction counting. */
121int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000122
bellard54936002003-05-13 00:25:15 +0000123typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000124 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000125 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000126 /* in order to optimize self modifying code, we count the number
127 of lookups we do to a given page to use a bitmap */
128 unsigned int code_write_count;
129 uint8_t *code_bitmap;
130#if defined(CONFIG_USER_ONLY)
131 unsigned long flags;
132#endif
bellard54936002003-05-13 00:25:15 +0000133} PageDesc;
134
Paul Brook41c1b1c2010-03-12 16:54:58 +0000135/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800136 while in user mode we want it to be based on virtual addresses. */
137#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000138#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
139# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
140#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800141# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000142#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000143#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800144# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000145#endif
bellard54936002003-05-13 00:25:15 +0000146
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147/* Size of the L2 (and L3, etc) page tables. */
148#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000149#define L2_SIZE (1 << L2_BITS)
150
Avi Kivity3eef53d2012-02-10 14:57:31 +0200151#define P_L2_LEVELS \
152 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
153
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800154/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155#define V_L1_BITS_REM \
156 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800158#if V_L1_BITS_REM < 4
159#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
160#else
161#define V_L1_BITS V_L1_BITS_REM
162#endif
163
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800164#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
167
Stefan Weilc6d50672012-03-16 20:23:49 +0100168uintptr_t qemu_real_host_page_size;
169uintptr_t qemu_host_page_size;
170uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000171
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800172/* This is a multi-level map on the virtual address space.
173 The bottom level has pointers to PageDesc. */
174static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000175
pbrooke2eef172008-06-08 01:09:01 +0000176#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200177
Avi Kivity5312bd82012-02-12 18:32:55 +0200178static MemoryRegionSection *phys_sections;
179static unsigned phys_sections_nb, phys_sections_nb_alloc;
180static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200181static uint16_t phys_section_notdirty;
182static uint16_t phys_section_rom;
183static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200184
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200185/* Simple allocator for PhysPageEntry nodes */
186static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
187static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
188
Avi Kivity07f07b32012-02-13 20:45:32 +0200189#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200190
pbrooke2eef172008-06-08 01:09:01 +0000191static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300192static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000193static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000194
Avi Kivity1ec9b902012-01-02 12:47:48 +0200195static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000196#endif
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000197static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
198 tb_page_addr_t phys_page2);
bellard33417e72003-08-10 21:47:01 +0000199
bellarde3db7222005-01-26 22:00:47 +0000200/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000201static int tb_flush_count;
202static int tb_phys_invalidate_count;
203
bellard7cb69ca2008-05-10 10:55:51 +0000204#ifdef _WIN32
Richard Henderson4438c8a2012-10-16 17:30:13 +1000205static inline void map_exec(void *addr, long size)
bellard7cb69ca2008-05-10 10:55:51 +0000206{
207 DWORD old_protect;
208 VirtualProtect(addr, size,
209 PAGE_EXECUTE_READWRITE, &old_protect);
210
211}
212#else
Richard Henderson4438c8a2012-10-16 17:30:13 +1000213static inline void map_exec(void *addr, long size)
bellard7cb69ca2008-05-10 10:55:51 +0000214{
bellard43694152008-05-29 09:35:57 +0000215 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000216
bellard43694152008-05-29 09:35:57 +0000217 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000218 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000219 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000220
221 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000222 end += page_size - 1;
223 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000224
225 mprotect((void *)start, end - start,
226 PROT_READ | PROT_WRITE | PROT_EXEC);
227}
228#endif
229
bellardb346ff42003-06-15 20:05:50 +0000230static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000231{
bellard83fb7ad2004-07-05 21:25:26 +0000232 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000233 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000234#ifdef _WIN32
235 {
236 SYSTEM_INFO system_info;
237
238 GetSystemInfo(&system_info);
239 qemu_real_host_page_size = system_info.dwPageSize;
240 }
241#else
242 qemu_real_host_page_size = getpagesize();
243#endif
bellard83fb7ad2004-07-05 21:25:26 +0000244 if (qemu_host_page_size == 0)
245 qemu_host_page_size = qemu_real_host_page_size;
246 if (qemu_host_page_size < TARGET_PAGE_SIZE)
247 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000248 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000249
Paul Brook2e9a5712010-05-05 16:32:59 +0100250#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000251 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100252#ifdef HAVE_KINFO_GETVMMAP
253 struct kinfo_vmentry *freep;
254 int i, cnt;
255
256 freep = kinfo_getvmmap(getpid(), &cnt);
257 if (freep) {
258 mmap_lock();
259 for (i = 0; i < cnt; i++) {
260 unsigned long startaddr, endaddr;
261
262 startaddr = freep[i].kve_start;
263 endaddr = freep[i].kve_end;
264 if (h2g_valid(startaddr)) {
265 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
266
267 if (h2g_valid(endaddr)) {
268 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200269 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100270 } else {
271#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
272 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200273 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100274#endif
275 }
276 }
277 }
278 free(freep);
279 mmap_unlock();
280 }
281#else
balrog50a95692007-12-12 01:16:23 +0000282 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000283
pbrook07765902008-05-31 16:33:53 +0000284 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800285
Aurelien Jarnofd436902010-04-10 17:20:36 +0200286 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000287 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800288 mmap_lock();
289
balrog50a95692007-12-12 01:16:23 +0000290 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800291 unsigned long startaddr, endaddr;
292 int n;
293
294 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
295
296 if (n == 2 && h2g_valid(startaddr)) {
297 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
298
299 if (h2g_valid(endaddr)) {
300 endaddr = h2g(endaddr);
301 } else {
302 endaddr = ~0ul;
303 }
304 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000305 }
306 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800307
balrog50a95692007-12-12 01:16:23 +0000308 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800309 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000310 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100311#endif
balrog50a95692007-12-12 01:16:23 +0000312 }
313#endif
bellard54936002003-05-13 00:25:15 +0000314}
315
Paul Brook41c1b1c2010-03-12 16:54:58 +0000316static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000317{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000318 PageDesc *pd;
319 void **lp;
320 int i;
321
pbrook17e23772008-06-09 13:47:45 +0000322#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500323 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800324# define ALLOC(P, SIZE) \
325 do { \
326 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
327 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800328 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000329#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800330# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500331 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000332#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800333
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800334 /* Level 1. Always allocated. */
335 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
336
337 /* Level 2..N-1. */
338 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
339 void **p = *lp;
340
341 if (p == NULL) {
342 if (!alloc) {
343 return NULL;
344 }
345 ALLOC(p, sizeof(void *) * L2_SIZE);
346 *lp = p;
347 }
348
349 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000350 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800351
352 pd = *lp;
353 if (pd == NULL) {
354 if (!alloc) {
355 return NULL;
356 }
357 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
358 *lp = pd;
359 }
360
361#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362
363 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000364}
365
Paul Brook41c1b1c2010-03-12 16:54:58 +0000366static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000367{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800368 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000369}
370
Paul Brook6d9a1302010-02-28 23:55:53 +0000371#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200372
Avi Kivityf7bf5462012-02-13 20:12:05 +0200373static void phys_map_node_reserve(unsigned nodes)
374{
375 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
376 typedef PhysPageEntry Node[L2_SIZE];
377 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
378 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
379 phys_map_nodes_nb + nodes);
380 phys_map_nodes = g_renew(Node, phys_map_nodes,
381 phys_map_nodes_nb_alloc);
382 }
383}
384
385static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200386{
387 unsigned i;
388 uint16_t ret;
389
Avi Kivityf7bf5462012-02-13 20:12:05 +0200390 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200391 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200392 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200393 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200394 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200395 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200396 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200397 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200398}
399
400static void phys_map_nodes_reset(void)
401{
402 phys_map_nodes_nb = 0;
403}
404
Avi Kivityf7bf5462012-02-13 20:12:05 +0200405
Avi Kivitya8170e52012-10-23 12:30:10 +0200406static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
407 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200408 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200409{
410 PhysPageEntry *p;
411 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200412 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200413
Avi Kivity07f07b32012-02-13 20:45:32 +0200414 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200415 lp->ptr = phys_map_node_alloc();
416 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200417 if (level == 0) {
418 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200419 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200420 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200421 }
422 }
423 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200424 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200425 }
Avi Kivity29990972012-02-13 20:21:20 +0200426 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200427
Avi Kivity29990972012-02-13 20:21:20 +0200428 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200429 if ((*index & (step - 1)) == 0 && *nb >= step) {
430 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200431 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200432 *index += step;
433 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200434 } else {
435 phys_page_set_level(lp, index, nb, leaf, level - 1);
436 }
437 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200438 }
439}
440
Avi Kivityac1970f2012-10-03 16:22:53 +0200441static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200442 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200443 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000444{
Avi Kivity29990972012-02-13 20:21:20 +0200445 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200446 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000447
Avi Kivityac1970f2012-10-03 16:22:53 +0200448 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000449}
450
Avi Kivitya8170e52012-10-23 12:30:10 +0200451MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000452{
Avi Kivityac1970f2012-10-03 16:22:53 +0200453 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200454 PhysPageEntry *p;
455 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200456 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200457
Avi Kivity07f07b32012-02-13 20:45:32 +0200458 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200459 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200460 goto not_found;
461 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200462 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200463 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200464 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200465
Avi Kivityc19e8802012-02-13 20:25:31 +0200466 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200467not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200468 return &phys_sections[s_index];
469}
470
Blue Swirle5548612012-04-21 13:08:33 +0000471bool memory_region_is_unassigned(MemoryRegion *mr)
472{
473 return mr != &io_mem_ram && mr != &io_mem_rom
474 && mr != &io_mem_notdirty && !mr->rom_device
475 && mr != &io_mem_watch;
476}
477
pbrookc8a706f2008-06-02 16:16:42 +0000478#define mmap_lock() do { } while(0)
479#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000480#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000481
bellard43694152008-05-29 09:35:57 +0000482#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100483/* Currently it is not recommended to allocate big chunks of data in
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000484 user mode. It will change when a dedicated libc will be used. */
485/* ??? 64-bit hosts ought to have no problem mmaping data outside the
486 region in which the guest needs to run. Revisit this. */
bellard43694152008-05-29 09:35:57 +0000487#define USE_STATIC_CODE_GEN_BUFFER
488#endif
489
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000490/* ??? Should configure for this, not list operating systems here. */
491#if (defined(__linux__) \
492 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
493 || defined(__DragonFly__) || defined(__OpenBSD__) \
494 || defined(__NetBSD__))
495# define USE_MMAP
496#endif
497
Richard Henderson74d590c2012-10-16 17:30:14 +1000498/* Minimum size of the code gen buffer. This number is randomly chosen,
499 but not so small that we can't have a fair number of TB's live. */
500#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
501
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000502/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
503 indicated, this is constrained by the range of direct branches on the
504 host cpu, as used by the TCG implementation of goto_tb. */
505#if defined(__x86_64__)
506# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
507#elif defined(__sparc__)
508# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
509#elif defined(__arm__)
510# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
511#elif defined(__s390x__)
512 /* We have a +- 4GB range on the branches; leave some slop. */
513# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
514#else
515# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
516#endif
517
Richard Henderson3d85a722012-10-16 17:30:11 +1000518#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
519
520#define DEFAULT_CODE_GEN_BUFFER_SIZE \
521 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
522 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000523
524static inline size_t size_code_gen_buffer(size_t tb_size)
525{
526 /* Size the buffer. */
527 if (tb_size == 0) {
528#ifdef USE_STATIC_CODE_GEN_BUFFER
529 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
530#else
531 /* ??? Needs adjustments. */
532 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
533 static buffer, we could size this on RESERVED_VA, on the text
534 segment size of the executable, or continue to use the default. */
535 tb_size = (unsigned long)(ram_size / 4);
536#endif
537 }
538 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
539 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
540 }
541 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
542 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
543 }
544 code_gen_buffer_size = tb_size;
545 return tb_size;
546}
547
bellard43694152008-05-29 09:35:57 +0000548#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200549static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000550 __attribute__((aligned(CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000551
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000552static inline void *alloc_code_gen_buffer(void)
bellard26a5f132008-05-28 12:30:31 +0000553{
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000554 map_exec(static_code_gen_buffer, code_gen_buffer_size);
555 return static_code_gen_buffer;
556}
557#elif defined(USE_MMAP)
558static inline void *alloc_code_gen_buffer(void)
559{
560 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
561 uintptr_t start = 0;
562 void *buf;
blueswir1141ac462008-07-26 15:05:57 +0000563
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000564 /* Constrain the position of the buffer based on the host cpu.
565 Note that these addresses are chosen in concert with the
566 addresses assigned in the relevant linker script file. */
Richard Henderson405def12012-10-16 17:30:12 +1000567# if defined(__PIE__) || defined(__PIC__)
568 /* Don't bother setting a preferred location if we're building
569 a position-independent executable. We're more likely to get
570 an address near the main executable if we let the kernel
571 choose the address. */
572# elif defined(__x86_64__) && defined(MAP_32BIT)
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000573 /* Force the memory down into low memory with the executable.
574 Leave the choice of exact location with the kernel. */
575 flags |= MAP_32BIT;
576 /* Cannot expect to map more than 800MB in low memory. */
577 if (code_gen_buffer_size > 800u * 1024 * 1024) {
578 code_gen_buffer_size = 800u * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000579 }
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000580# elif defined(__sparc__)
581 start = 0x40000000ul;
582# elif defined(__s390x__)
583 start = 0x90000000ul;
584# endif
585
586 buf = mmap((void *)start, code_gen_buffer_size,
587 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
588 return buf == MAP_FAILED ? NULL : buf;
589}
bellard26a5f132008-05-28 12:30:31 +0000590#else
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000591static inline void *alloc_code_gen_buffer(void)
592{
593 void *buf = g_malloc(code_gen_buffer_size);
594 if (buf) {
595 map_exec(buf, code_gen_buffer_size);
596 }
597 return buf;
598}
599#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
600
601static inline void code_gen_alloc(size_t tb_size)
602{
603 code_gen_buffer_size = size_code_gen_buffer(tb_size);
604 code_gen_buffer = alloc_code_gen_buffer();
605 if (code_gen_buffer == NULL) {
606 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
607 exit(1);
608 }
609
Richard Henderson0be48352012-11-02 09:20:46 +1100610 qemu_madvise(code_gen_buffer, code_gen_buffer_size, QEMU_MADV_HUGEPAGE);
611
Richard Henderson4438c8a2012-10-16 17:30:13 +1000612 /* Steal room for the prologue at the end of the buffer. This ensures
613 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
614 from TB's to the prologue are going to be in range. It also means
615 that we don't need to mark (additional) portions of the data segment
616 as executable. */
617 code_gen_prologue = code_gen_buffer + code_gen_buffer_size - 1024;
618 code_gen_buffer_size -= 1024;
619
Peter Maydella884da82011-06-22 11:58:25 +0100620 code_gen_buffer_max_size = code_gen_buffer_size -
621 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000622 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500623 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000624}
625
626/* Must be called before using the QEMU cpus. 'tb_size' is the size
627 (in bytes) allocated to the translation buffer. Zero means default
628 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200629void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000630{
bellard26a5f132008-05-28 12:30:31 +0000631 cpu_gen_init();
632 code_gen_alloc(tb_size);
633 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700634 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000635 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700636#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
637 /* There's no guest base to take into account, so go ahead and
638 initialize the prologue now. */
639 tcg_prologue_init(&tcg_ctx);
640#endif
bellard26a5f132008-05-28 12:30:31 +0000641}
642
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200643bool tcg_enabled(void)
644{
645 return code_gen_buffer != NULL;
646}
647
648void cpu_exec_init_all(void)
649{
650#if !defined(CONFIG_USER_ONLY)
651 memory_map_init();
652 io_mem_init();
653#endif
654}
655
pbrook9656f322008-07-01 20:01:19 +0000656#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
657
Juan Quintelae59fb372009-09-29 22:48:21 +0200658static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200659{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100660 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200661
aurel323098dba2009-03-07 21:28:24 +0000662 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
663 version_id is increased. */
664 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000665 tlb_flush(env, 1);
666
667 return 0;
668}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200669
670static const VMStateDescription vmstate_cpu_common = {
671 .name = "cpu_common",
672 .version_id = 1,
673 .minimum_version_id = 1,
674 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200675 .post_load = cpu_common_post_load,
676 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100677 VMSTATE_UINT32(halted, CPUArchState),
678 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200679 VMSTATE_END_OF_LIST()
680 }
681};
pbrook9656f322008-07-01 20:01:19 +0000682#endif
683
Andreas Färber9349b4f2012-03-14 01:38:32 +0100684CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400685{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100686 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400687
688 while (env) {
689 if (env->cpu_index == cpu)
690 break;
691 env = env->next_cpu;
692 }
693
694 return env;
695}
696
Andreas Färber9349b4f2012-03-14 01:38:32 +0100697void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000698{
Andreas Färber9f09e182012-05-03 06:59:07 +0200699#ifndef CONFIG_USER_ONLY
700 CPUState *cpu = ENV_GET_CPU(env);
701#endif
Andreas Färber9349b4f2012-03-14 01:38:32 +0100702 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000703 int cpu_index;
704
pbrookc2764712009-03-07 15:24:59 +0000705#if defined(CONFIG_USER_ONLY)
706 cpu_list_lock();
707#endif
bellard6a00d602005-11-21 23:25:50 +0000708 env->next_cpu = NULL;
709 penv = &first_cpu;
710 cpu_index = 0;
711 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700712 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000713 cpu_index++;
714 }
715 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000716 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000717 QTAILQ_INIT(&env->breakpoints);
718 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100719#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200720 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100721#endif
bellard6a00d602005-11-21 23:25:50 +0000722 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000723#if defined(CONFIG_USER_ONLY)
724 cpu_list_unlock();
725#endif
pbrookb3c77242008-06-30 16:31:04 +0000726#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600727 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
728 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000729 cpu_save, cpu_load, env);
730#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000731}
732
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100733/* Allocate a new translation block. Flush the translation buffer if
734 too many translation blocks or too much generated code. */
735static TranslationBlock *tb_alloc(target_ulong pc)
736{
737 TranslationBlock *tb;
738
739 if (nb_tbs >= code_gen_max_blocks ||
740 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
741 return NULL;
742 tb = &tbs[nb_tbs++];
743 tb->pc = pc;
744 tb->cflags = 0;
745 return tb;
746}
747
748void tb_free(TranslationBlock *tb)
749{
750 /* In practice this is mostly used for single use temporary TB
751 Ignore the hard cases and just back up if this TB happens to
752 be the last one generated. */
753 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
754 code_gen_ptr = tb->tc_ptr;
755 nb_tbs--;
756 }
757}
758
bellard9fa3e852004-01-04 18:06:42 +0000759static inline void invalidate_page_bitmap(PageDesc *p)
760{
761 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500762 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000763 p->code_bitmap = NULL;
764 }
765 p->code_write_count = 0;
766}
767
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800768/* Set to NULL all the 'first_tb' fields in all PageDescs. */
769
770static void page_flush_tb_1 (int level, void **lp)
771{
772 int i;
773
774 if (*lp == NULL) {
775 return;
776 }
777 if (level == 0) {
778 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000779 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800780 pd[i].first_tb = NULL;
781 invalidate_page_bitmap(pd + i);
782 }
783 } else {
784 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000785 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800786 page_flush_tb_1 (level - 1, pp + i);
787 }
788 }
789}
790
bellardfd6ce8f2003-05-14 19:00:11 +0000791static void page_flush_tb(void)
792{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800793 int i;
794 for (i = 0; i < V_L1_SIZE; i++) {
795 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000796 }
797}
798
799/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000800/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100801void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000802{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100803 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000804#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000805 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
806 (unsigned long)(code_gen_ptr - code_gen_buffer),
807 nb_tbs, nb_tbs > 0 ?
808 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000809#endif
bellard26a5f132008-05-28 12:30:31 +0000810 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000811 cpu_abort(env1, "Internal error: code buffer overflow\n");
812
bellardfd6ce8f2003-05-14 19:00:11 +0000813 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000814
bellard6a00d602005-11-21 23:25:50 +0000815 for(env = first_cpu; env != NULL; env = env->next_cpu) {
816 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
817 }
bellard9fa3e852004-01-04 18:06:42 +0000818
bellard8a8a6082004-10-03 13:36:49 +0000819 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000820 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000821
bellardfd6ce8f2003-05-14 19:00:11 +0000822 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000823 /* XXX: flush processor icache at this point if cache flush is
824 expensive */
bellarde3db7222005-01-26 22:00:47 +0000825 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000826}
827
828#ifdef DEBUG_TB_CHECK
829
j_mayerbc98a7e2007-04-04 07:55:12 +0000830static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000831{
832 TranslationBlock *tb;
833 int i;
834 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000835 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
836 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000837 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
838 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000839 printf("ERROR invalidate: address=" TARGET_FMT_lx
840 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000841 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000842 }
843 }
844 }
845}
846
847/* verify that all the pages have correct rights for code */
848static void tb_page_check(void)
849{
850 TranslationBlock *tb;
851 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000852
pbrook99773bd2006-04-16 15:14:59 +0000853 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
854 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000855 flags1 = page_get_flags(tb->pc);
856 flags2 = page_get_flags(tb->pc + tb->size - 1);
857 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
858 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000859 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000860 }
861 }
862 }
863}
864
865#endif
866
867/* invalidate one TB */
868static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
869 int next_offset)
870{
871 TranslationBlock *tb1;
872 for(;;) {
873 tb1 = *ptb;
874 if (tb1 == tb) {
875 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
876 break;
877 }
878 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
879 }
880}
881
bellard9fa3e852004-01-04 18:06:42 +0000882static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
883{
884 TranslationBlock *tb1;
885 unsigned int n1;
886
887 for(;;) {
888 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200889 n1 = (uintptr_t)tb1 & 3;
890 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000891 if (tb1 == tb) {
892 *ptb = tb1->page_next[n1];
893 break;
894 }
895 ptb = &tb1->page_next[n1];
896 }
897}
898
bellardd4e81642003-05-25 16:46:15 +0000899static inline void tb_jmp_remove(TranslationBlock *tb, int n)
900{
901 TranslationBlock *tb1, **ptb;
902 unsigned int n1;
903
904 ptb = &tb->jmp_next[n];
905 tb1 = *ptb;
906 if (tb1) {
907 /* find tb(n) in circular list */
908 for(;;) {
909 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200910 n1 = (uintptr_t)tb1 & 3;
911 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardd4e81642003-05-25 16:46:15 +0000912 if (n1 == n && tb1 == tb)
913 break;
914 if (n1 == 2) {
915 ptb = &tb1->jmp_first;
916 } else {
917 ptb = &tb1->jmp_next[n1];
918 }
919 }
920 /* now we can suppress tb(n) from the list */
921 *ptb = tb->jmp_next[n];
922
923 tb->jmp_next[n] = NULL;
924 }
925}
926
927/* reset the jump entry 'n' of a TB so that it is not chained to
928 another TB */
929static inline void tb_reset_jump(TranslationBlock *tb, int n)
930{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200931 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000932}
933
Paul Brook41c1b1c2010-03-12 16:54:58 +0000934void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000935{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100936 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000937 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000938 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000939 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000940 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000941
bellard9fa3e852004-01-04 18:06:42 +0000942 /* remove the TB from the hash list */
943 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
944 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000945 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000946 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000947
bellard9fa3e852004-01-04 18:06:42 +0000948 /* remove the TB from the page list */
949 if (tb->page_addr[0] != page_addr) {
950 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
951 tb_page_remove(&p->first_tb, tb);
952 invalidate_page_bitmap(p);
953 }
954 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
955 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
956 tb_page_remove(&p->first_tb, tb);
957 invalidate_page_bitmap(p);
958 }
959
bellard8a40a182005-11-20 10:35:40 +0000960 tb_invalidated_flag = 1;
961
962 /* remove the TB from the hash list */
963 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000964 for(env = first_cpu; env != NULL; env = env->next_cpu) {
965 if (env->tb_jmp_cache[h] == tb)
966 env->tb_jmp_cache[h] = NULL;
967 }
bellard8a40a182005-11-20 10:35:40 +0000968
969 /* suppress this TB from the two jump lists */
970 tb_jmp_remove(tb, 0);
971 tb_jmp_remove(tb, 1);
972
973 /* suppress any remaining jumps to this TB */
974 tb1 = tb->jmp_first;
975 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200976 n1 = (uintptr_t)tb1 & 3;
bellard8a40a182005-11-20 10:35:40 +0000977 if (n1 == 2)
978 break;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200979 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000980 tb2 = tb1->jmp_next[n1];
981 tb_reset_jump(tb1, n1);
982 tb1->jmp_next[n1] = NULL;
983 tb1 = tb2;
984 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200985 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +0000986
bellarde3db7222005-01-26 22:00:47 +0000987 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000988}
989
990static inline void set_bits(uint8_t *tab, int start, int len)
991{
992 int end, mask, end1;
993
994 end = start + len;
995 tab += start >> 3;
996 mask = 0xff << (start & 7);
997 if ((start & ~7) == (end & ~7)) {
998 if (start < end) {
999 mask &= ~(0xff << (end & 7));
1000 *tab |= mask;
1001 }
1002 } else {
1003 *tab++ |= mask;
1004 start = (start + 8) & ~7;
1005 end1 = end & ~7;
1006 while (start < end1) {
1007 *tab++ = 0xff;
1008 start += 8;
1009 }
1010 if (start < end) {
1011 mask = ~(0xff << (end & 7));
1012 *tab |= mask;
1013 }
1014 }
1015}
1016
1017static void build_page_bitmap(PageDesc *p)
1018{
1019 int n, tb_start, tb_end;
1020 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001021
Anthony Liguori7267c092011-08-20 22:09:37 -05001022 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001023
1024 tb = p->first_tb;
1025 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001026 n = (uintptr_t)tb & 3;
1027 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001028 /* NOTE: this is subtle as a TB may span two physical pages */
1029 if (n == 0) {
1030 /* NOTE: tb_end may be after the end of the page, but
1031 it is not a problem */
1032 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1033 tb_end = tb_start + tb->size;
1034 if (tb_end > TARGET_PAGE_SIZE)
1035 tb_end = TARGET_PAGE_SIZE;
1036 } else {
1037 tb_start = 0;
1038 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1039 }
1040 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1041 tb = tb->page_next[n];
1042 }
1043}
1044
Andreas Färber9349b4f2012-03-14 01:38:32 +01001045TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001046 target_ulong pc, target_ulong cs_base,
1047 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001048{
1049 TranslationBlock *tb;
1050 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001051 tb_page_addr_t phys_pc, phys_page2;
1052 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001053 int code_gen_size;
1054
Paul Brook41c1b1c2010-03-12 16:54:58 +00001055 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001056 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001057 if (!tb) {
1058 /* flush must be done */
1059 tb_flush(env);
1060 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001061 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001062 /* Don't forget to invalidate previous TB info. */
1063 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001064 }
1065 tc_ptr = code_gen_ptr;
1066 tb->tc_ptr = tc_ptr;
1067 tb->cs_base = cs_base;
1068 tb->flags = flags;
1069 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001070 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001071 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1072 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001073
bellardd720b932004-04-25 17:57:43 +00001074 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001075 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001076 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001077 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001078 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001079 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001080 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001081 return tb;
bellardd720b932004-04-25 17:57:43 +00001082}
ths3b46e622007-09-17 08:09:54 +00001083
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001084/*
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001085 * Invalidate all TBs which intersect with the target physical address range
1086 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1087 * 'is_cpu_write_access' should be true if called from a real cpu write
1088 * access: the virtual CPU will exit the current TB if code is modified inside
1089 * this TB.
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001090 */
1091void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1092 int is_cpu_write_access)
1093{
1094 while (start < end) {
1095 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1096 start &= TARGET_PAGE_MASK;
1097 start += TARGET_PAGE_SIZE;
1098 }
1099}
1100
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001101/*
1102 * Invalidate all TBs which intersect with the target physical address range
1103 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1104 * 'is_cpu_write_access' should be true if called from a real cpu write
1105 * access: the virtual CPU will exit the current TB if code is modified inside
1106 * this TB.
1107 */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001108void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001109 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001110{
aliguori6b917542008-11-18 19:46:41 +00001111 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001112 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001113 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001114 PageDesc *p;
1115 int n;
1116#ifdef TARGET_HAS_PRECISE_SMC
1117 int current_tb_not_found = is_cpu_write_access;
1118 TranslationBlock *current_tb = NULL;
1119 int current_tb_modified = 0;
1120 target_ulong current_pc = 0;
1121 target_ulong current_cs_base = 0;
1122 int current_flags = 0;
1123#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001124
1125 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001126 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001127 return;
ths5fafdf22007-09-16 21:08:06 +00001128 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001129 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1130 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001131 /* build code bitmap */
1132 build_page_bitmap(p);
1133 }
1134
1135 /* we remove all the TBs in the range [start, end[ */
1136 /* XXX: see if in some cases it could be faster to invalidate all the code */
1137 tb = p->first_tb;
1138 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001139 n = (uintptr_t)tb & 3;
1140 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001141 tb_next = tb->page_next[n];
1142 /* NOTE: this is subtle as a TB may span two physical pages */
1143 if (n == 0) {
1144 /* NOTE: tb_end may be after the end of the page, but
1145 it is not a problem */
1146 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1147 tb_end = tb_start + tb->size;
1148 } else {
1149 tb_start = tb->page_addr[1];
1150 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1151 }
1152 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001153#ifdef TARGET_HAS_PRECISE_SMC
1154 if (current_tb_not_found) {
1155 current_tb_not_found = 0;
1156 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001157 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001158 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001159 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001160 }
1161 }
1162 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001163 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001164 /* If we are modifying the current TB, we must stop
1165 its execution. We could be more precise by checking
1166 that the modification is after the current PC, but it
1167 would require a specialized function to partially
1168 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001169
bellardd720b932004-04-25 17:57:43 +00001170 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001171 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001172 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1173 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001174 }
1175#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001176 /* we need to do that to handle the case where a signal
1177 occurs while doing tb_phys_invalidate() */
1178 saved_tb = NULL;
1179 if (env) {
1180 saved_tb = env->current_tb;
1181 env->current_tb = NULL;
1182 }
bellard9fa3e852004-01-04 18:06:42 +00001183 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001184 if (env) {
1185 env->current_tb = saved_tb;
1186 if (env->interrupt_request && env->current_tb)
1187 cpu_interrupt(env, env->interrupt_request);
1188 }
bellard9fa3e852004-01-04 18:06:42 +00001189 }
1190 tb = tb_next;
1191 }
1192#if !defined(CONFIG_USER_ONLY)
1193 /* if no code remaining, no need to continue to use slow writes */
1194 if (!p->first_tb) {
1195 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001196 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001197 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001198 }
1199 }
1200#endif
1201#ifdef TARGET_HAS_PRECISE_SMC
1202 if (current_tb_modified) {
1203 /* we generate a block containing just the instruction
1204 modifying the memory. It will ensure that it cannot modify
1205 itself */
bellardea1c1802004-06-14 18:56:36 +00001206 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001207 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001208 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001209 }
1210#endif
1211}
1212
1213/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001214static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001215{
1216 PageDesc *p;
1217 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001218#if 0
bellarda4193c82004-06-03 14:01:43 +00001219 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001220 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1221 cpu_single_env->mem_io_vaddr, len,
1222 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001223 cpu_single_env->eip +
1224 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001225 }
1226#endif
bellard9fa3e852004-01-04 18:06:42 +00001227 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001228 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001229 return;
1230 if (p->code_bitmap) {
1231 offset = start & ~TARGET_PAGE_MASK;
1232 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1233 if (b & ((1 << len) - 1))
1234 goto do_invalidate;
1235 } else {
1236 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001237 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001238 }
1239}
1240
bellard9fa3e852004-01-04 18:06:42 +00001241#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001242static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001243 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001244{
aliguori6b917542008-11-18 19:46:41 +00001245 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001246 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001247 int n;
bellardd720b932004-04-25 17:57:43 +00001248#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001249 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001250 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001251 int current_tb_modified = 0;
1252 target_ulong current_pc = 0;
1253 target_ulong current_cs_base = 0;
1254 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001255#endif
bellard9fa3e852004-01-04 18:06:42 +00001256
1257 addr &= TARGET_PAGE_MASK;
1258 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001259 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001260 return;
1261 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001262#ifdef TARGET_HAS_PRECISE_SMC
1263 if (tb && pc != 0) {
1264 current_tb = tb_find_pc(pc);
1265 }
1266#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001267 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001268 n = (uintptr_t)tb & 3;
1269 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001270#ifdef TARGET_HAS_PRECISE_SMC
1271 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001272 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001273 /* If we are modifying the current TB, we must stop
1274 its execution. We could be more precise by checking
1275 that the modification is after the current PC, but it
1276 would require a specialized function to partially
1277 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001278
bellardd720b932004-04-25 17:57:43 +00001279 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001280 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001281 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1282 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001283 }
1284#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001285 tb_phys_invalidate(tb, addr);
1286 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001287 }
1288 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001289#ifdef TARGET_HAS_PRECISE_SMC
1290 if (current_tb_modified) {
1291 /* we generate a block containing just the instruction
1292 modifying the memory. It will ensure that it cannot modify
1293 itself */
bellardea1c1802004-06-14 18:56:36 +00001294 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001295 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001296 cpu_resume_from_signal(env, puc);
1297 }
1298#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001299}
bellard9fa3e852004-01-04 18:06:42 +00001300#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001301
1302/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001303static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001304 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001305{
1306 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001307#ifndef CONFIG_USER_ONLY
1308 bool page_already_protected;
1309#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001310
bellard9fa3e852004-01-04 18:06:42 +00001311 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001312 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001313 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001314#ifndef CONFIG_USER_ONLY
1315 page_already_protected = p->first_tb != NULL;
1316#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001317 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001318 invalidate_page_bitmap(p);
1319
bellard107db442004-06-22 18:48:46 +00001320#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001321
bellard9fa3e852004-01-04 18:06:42 +00001322#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001323 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001324 target_ulong addr;
1325 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001326 int prot;
1327
bellardfd6ce8f2003-05-14 19:00:11 +00001328 /* force the host page as non writable (writes will have a
1329 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001330 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001331 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001332 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1333 addr += TARGET_PAGE_SIZE) {
1334
1335 p2 = page_find (addr >> TARGET_PAGE_BITS);
1336 if (!p2)
1337 continue;
1338 prot |= p2->flags;
1339 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001340 }
ths5fafdf22007-09-16 21:08:06 +00001341 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001342 (prot & PAGE_BITS) & ~PAGE_WRITE);
1343#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001344 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001345 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001346#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001347 }
bellard9fa3e852004-01-04 18:06:42 +00001348#else
1349 /* if some code is already present, then the pages are already
1350 protected. So we handle the case where only the first TB is
1351 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001352 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001353 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001354 }
1355#endif
bellardd720b932004-04-25 17:57:43 +00001356
1357#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001358}
1359
bellard9fa3e852004-01-04 18:06:42 +00001360/* add a new TB and link it to the physical page tables. phys_page2 is
1361 (-1) to indicate that only one page contains the TB. */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001362static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1363 tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001364{
bellard9fa3e852004-01-04 18:06:42 +00001365 unsigned int h;
1366 TranslationBlock **ptb;
1367
pbrookc8a706f2008-06-02 16:16:42 +00001368 /* Grab the mmap lock to stop another thread invalidating this TB
1369 before we are done. */
1370 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001371 /* add in the physical hash table */
1372 h = tb_phys_hash_func(phys_pc);
1373 ptb = &tb_phys_hash[h];
1374 tb->phys_hash_next = *ptb;
1375 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001376
1377 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001378 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1379 if (phys_page2 != -1)
1380 tb_alloc_page(tb, 1, phys_page2);
1381 else
1382 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001383
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001384 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001385 tb->jmp_next[0] = NULL;
1386 tb->jmp_next[1] = NULL;
1387
1388 /* init original jump addresses */
1389 if (tb->tb_next_offset[0] != 0xffff)
1390 tb_reset_jump(tb, 0);
1391 if (tb->tb_next_offset[1] != 0xffff)
1392 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001393
1394#ifdef DEBUG_TB_CHECK
1395 tb_page_check();
1396#endif
pbrookc8a706f2008-06-02 16:16:42 +00001397 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001398}
1399
Yeongkyoon Leefdbb84d2012-10-31 16:04:24 +09001400#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
1401/* check whether the given addr is in TCG generated code buffer or not */
1402bool is_tcg_gen_code(uintptr_t tc_ptr)
1403{
1404 /* This can be called during code generation, code_gen_buffer_max_size
1405 is used instead of code_gen_ptr for upper boundary checking */
1406 return (tc_ptr >= (uintptr_t)code_gen_buffer &&
1407 tc_ptr < (uintptr_t)(code_gen_buffer + code_gen_buffer_max_size));
1408}
1409#endif
1410
bellarda513fe12003-05-27 23:29:48 +00001411/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1412 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001413TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001414{
1415 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001416 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001417 TranslationBlock *tb;
1418
1419 if (nb_tbs <= 0)
1420 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001421 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1422 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001423 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001424 }
bellarda513fe12003-05-27 23:29:48 +00001425 /* binary search (cf Knuth) */
1426 m_min = 0;
1427 m_max = nb_tbs - 1;
1428 while (m_min <= m_max) {
1429 m = (m_min + m_max) >> 1;
1430 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001431 v = (uintptr_t)tb->tc_ptr;
bellarda513fe12003-05-27 23:29:48 +00001432 if (v == tc_ptr)
1433 return tb;
1434 else if (tc_ptr < v) {
1435 m_max = m - 1;
1436 } else {
1437 m_min = m + 1;
1438 }
ths5fafdf22007-09-16 21:08:06 +00001439 }
bellarda513fe12003-05-27 23:29:48 +00001440 return &tbs[m_max];
1441}
bellard75012672003-06-21 13:11:07 +00001442
bellardea041c02003-06-25 16:16:50 +00001443static void tb_reset_jump_recursive(TranslationBlock *tb);
1444
1445static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1446{
1447 TranslationBlock *tb1, *tb_next, **ptb;
1448 unsigned int n1;
1449
1450 tb1 = tb->jmp_next[n];
1451 if (tb1 != NULL) {
1452 /* find head of list */
1453 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001454 n1 = (uintptr_t)tb1 & 3;
1455 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001456 if (n1 == 2)
1457 break;
1458 tb1 = tb1->jmp_next[n1];
1459 }
1460 /* we are now sure now that tb jumps to tb1 */
1461 tb_next = tb1;
1462
1463 /* remove tb from the jmp_first list */
1464 ptb = &tb_next->jmp_first;
1465 for(;;) {
1466 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001467 n1 = (uintptr_t)tb1 & 3;
1468 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001469 if (n1 == n && tb1 == tb)
1470 break;
1471 ptb = &tb1->jmp_next[n1];
1472 }
1473 *ptb = tb->jmp_next[n];
1474 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001475
bellardea041c02003-06-25 16:16:50 +00001476 /* suppress the jump to next tb in generated code */
1477 tb_reset_jump(tb, n);
1478
bellard01243112004-01-04 15:48:17 +00001479 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001480 tb_reset_jump_recursive(tb_next);
1481 }
1482}
1483
1484static void tb_reset_jump_recursive(TranslationBlock *tb)
1485{
1486 tb_reset_jump_recursive2(tb, 0);
1487 tb_reset_jump_recursive2(tb, 1);
1488}
1489
bellard1fddef42005-04-17 19:16:13 +00001490#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001491#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001492static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001493{
1494 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1495}
1496#else
Avi Kivitya8170e52012-10-23 12:30:10 +02001497void tb_invalidate_phys_addr(hwaddr addr)
bellardd720b932004-04-25 17:57:43 +00001498{
Anthony Liguoric227f092009-10-01 16:12:16 -05001499 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001500 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001501
Avi Kivityac1970f2012-10-03 16:22:53 +02001502 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001503 if (!(memory_region_is_ram(section->mr)
1504 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001505 return;
1506 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001507 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001508 + memory_region_section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001509 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001510}
Max Filippov1e7855a2012-04-10 02:48:17 +04001511
1512static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1513{
Max Filippov9d70c4b2012-05-27 20:21:08 +04001514 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1515 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +04001516}
bellardc27004e2005-01-03 23:35:10 +00001517#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001518#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001519
Paul Brookc527ee82010-03-01 03:31:14 +00001520#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001521void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001522
1523{
1524}
1525
Andreas Färber9349b4f2012-03-14 01:38:32 +01001526int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001527 int flags, CPUWatchpoint **watchpoint)
1528{
1529 return -ENOSYS;
1530}
1531#else
pbrook6658ffb2007-03-16 23:58:11 +00001532/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001533int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001534 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001535{
aliguorib4051332008-11-18 20:14:20 +00001536 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001537 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001538
aliguorib4051332008-11-18 20:14:20 +00001539 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001540 if ((len & (len - 1)) || (addr & ~len_mask) ||
1541 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001542 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1543 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1544 return -EINVAL;
1545 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001546 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001547
aliguoria1d1bb32008-11-18 20:07:32 +00001548 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001549 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001550 wp->flags = flags;
1551
aliguori2dc9f412008-11-18 20:56:59 +00001552 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001553 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001554 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001555 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001556 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001557
pbrook6658ffb2007-03-16 23:58:11 +00001558 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001559
1560 if (watchpoint)
1561 *watchpoint = wp;
1562 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001563}
1564
aliguoria1d1bb32008-11-18 20:07:32 +00001565/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001566int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001567 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001568{
aliguorib4051332008-11-18 20:14:20 +00001569 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001570 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001571
Blue Swirl72cf2d42009-09-12 07:36:22 +00001572 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001573 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001574 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001575 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001576 return 0;
1577 }
1578 }
aliguoria1d1bb32008-11-18 20:07:32 +00001579 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001580}
1581
aliguoria1d1bb32008-11-18 20:07:32 +00001582/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001583void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001584{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001585 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001586
aliguoria1d1bb32008-11-18 20:07:32 +00001587 tlb_flush_page(env, watchpoint->vaddr);
1588
Anthony Liguori7267c092011-08-20 22:09:37 -05001589 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001590}
1591
aliguoria1d1bb32008-11-18 20:07:32 +00001592/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001593void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001594{
aliguoric0ce9982008-11-25 22:13:57 +00001595 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001596
Blue Swirl72cf2d42009-09-12 07:36:22 +00001597 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001598 if (wp->flags & mask)
1599 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001600 }
aliguoria1d1bb32008-11-18 20:07:32 +00001601}
Paul Brookc527ee82010-03-01 03:31:14 +00001602#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001603
1604/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001605int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001606 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001607{
bellard1fddef42005-04-17 19:16:13 +00001608#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001609 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001610
Anthony Liguori7267c092011-08-20 22:09:37 -05001611 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001612
1613 bp->pc = pc;
1614 bp->flags = flags;
1615
aliguori2dc9f412008-11-18 20:56:59 +00001616 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001617 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001618 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001619 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001620 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001621
1622 breakpoint_invalidate(env, pc);
1623
1624 if (breakpoint)
1625 *breakpoint = bp;
1626 return 0;
1627#else
1628 return -ENOSYS;
1629#endif
1630}
1631
1632/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001633int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001634{
1635#if defined(TARGET_HAS_ICE)
1636 CPUBreakpoint *bp;
1637
Blue Swirl72cf2d42009-09-12 07:36:22 +00001638 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001639 if (bp->pc == pc && bp->flags == flags) {
1640 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001641 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001642 }
bellard4c3a88a2003-07-26 12:06:08 +00001643 }
aliguoria1d1bb32008-11-18 20:07:32 +00001644 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001645#else
aliguoria1d1bb32008-11-18 20:07:32 +00001646 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001647#endif
1648}
1649
aliguoria1d1bb32008-11-18 20:07:32 +00001650/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001651void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001652{
bellard1fddef42005-04-17 19:16:13 +00001653#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001654 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001655
aliguoria1d1bb32008-11-18 20:07:32 +00001656 breakpoint_invalidate(env, breakpoint->pc);
1657
Anthony Liguori7267c092011-08-20 22:09:37 -05001658 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001659#endif
1660}
1661
1662/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001663void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001664{
1665#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001666 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001667
Blue Swirl72cf2d42009-09-12 07:36:22 +00001668 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001669 if (bp->flags & mask)
1670 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001671 }
bellard4c3a88a2003-07-26 12:06:08 +00001672#endif
1673}
1674
bellardc33a3462003-07-29 20:50:33 +00001675/* enable or disable single step mode. EXCP_DEBUG is returned by the
1676 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001677void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001678{
bellard1fddef42005-04-17 19:16:13 +00001679#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001680 if (env->singlestep_enabled != enabled) {
1681 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001682 if (kvm_enabled())
1683 kvm_update_guest_debug(env, 0);
1684 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001685 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001686 /* XXX: only flush what is necessary */
1687 tb_flush(env);
1688 }
bellardc33a3462003-07-29 20:50:33 +00001689 }
1690#endif
1691}
1692
Andreas Färber9349b4f2012-03-14 01:38:32 +01001693static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001694{
pbrookd5975362008-06-07 20:50:51 +00001695 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1696 problem and hope the cpu will stop of its own accord. For userspace
1697 emulation this often isn't actually as bad as it sounds. Often
1698 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001699 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001700 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001701
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001702 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001703 tb = env->current_tb;
1704 /* if the cpu is currently executing code, we must unlink it and
1705 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001706 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001707 env->current_tb = NULL;
1708 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001709 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001710 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001711}
1712
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001713#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001714/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001715static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001716{
Andreas Färber60e82572012-05-02 22:23:49 +02001717 CPUState *cpu = ENV_GET_CPU(env);
aurel323098dba2009-03-07 21:28:24 +00001718 int old_mask;
1719
1720 old_mask = env->interrupt_request;
1721 env->interrupt_request |= mask;
1722
aliguori8edac962009-04-24 18:03:45 +00001723 /*
1724 * If called from iothread context, wake the target cpu in
1725 * case its halted.
1726 */
Andreas Färber60e82572012-05-02 22:23:49 +02001727 if (!qemu_cpu_is_self(cpu)) {
Andreas Färberc08d7422012-05-03 04:34:15 +02001728 qemu_cpu_kick(cpu);
aliguori8edac962009-04-24 18:03:45 +00001729 return;
1730 }
aliguori8edac962009-04-24 18:03:45 +00001731
pbrook2e70f6e2008-06-29 01:03:05 +00001732 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001733 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001734 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001735 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001736 cpu_abort(env, "Raised interrupt while not in I/O function");
1737 }
pbrook2e70f6e2008-06-29 01:03:05 +00001738 } else {
aurel323098dba2009-03-07 21:28:24 +00001739 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001740 }
1741}
1742
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001743CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1744
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001745#else /* CONFIG_USER_ONLY */
1746
Andreas Färber9349b4f2012-03-14 01:38:32 +01001747void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001748{
1749 env->interrupt_request |= mask;
1750 cpu_unlink_tb(env);
1751}
1752#endif /* CONFIG_USER_ONLY */
1753
Andreas Färber9349b4f2012-03-14 01:38:32 +01001754void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001755{
1756 env->interrupt_request &= ~mask;
1757}
1758
Andreas Färber9349b4f2012-03-14 01:38:32 +01001759void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001760{
1761 env->exit_request = 1;
1762 cpu_unlink_tb(env);
1763}
1764
Andreas Färber9349b4f2012-03-14 01:38:32 +01001765void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001766{
1767 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001768 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001769
1770 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001771 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001772 fprintf(stderr, "qemu: fatal: ");
1773 vfprintf(stderr, fmt, ap);
1774 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001775 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +00001776 if (qemu_log_enabled()) {
1777 qemu_log("qemu: fatal: ");
1778 qemu_log_vprintf(fmt, ap2);
1779 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001780 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +00001781 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001782 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001783 }
pbrook493ae1f2007-11-23 16:53:59 +00001784 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001785 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001786#if defined(CONFIG_USER_ONLY)
1787 {
1788 struct sigaction act;
1789 sigfillset(&act.sa_mask);
1790 act.sa_handler = SIG_DFL;
1791 sigaction(SIGABRT, &act, NULL);
1792 }
1793#endif
bellard75012672003-06-21 13:11:07 +00001794 abort();
1795}
1796
Andreas Färber9349b4f2012-03-14 01:38:32 +01001797CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001798{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001799 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1800 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001801 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001802#if defined(TARGET_HAS_ICE)
1803 CPUBreakpoint *bp;
1804 CPUWatchpoint *wp;
1805#endif
1806
Andreas Färber9349b4f2012-03-14 01:38:32 +01001807 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001808
1809 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001810 new_env->next_cpu = next_cpu;
1811 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001812
1813 /* Clone all break/watchpoints.
1814 Note: Once we support ptrace with hw-debug register access, make sure
1815 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001816 QTAILQ_INIT(&env->breakpoints);
1817 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001818#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001819 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001820 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1821 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001822 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001823 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1824 wp->flags, NULL);
1825 }
1826#endif
1827
thsc5be9f02007-02-28 20:20:53 +00001828 return new_env;
1829}
1830
bellard01243112004-01-04 15:48:17 +00001831#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001832void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001833{
1834 unsigned int i;
1835
1836 /* Discard jump cache entries for any tb which might potentially
1837 overlap the flushed page. */
1838 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1839 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001840 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001841
1842 i = tb_jmp_cache_hash_page(addr);
1843 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001844 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001845}
1846
Juan Quintelad24981d2012-05-22 00:42:40 +02001847static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1848 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +00001849{
Juan Quintelad24981d2012-05-22 00:42:40 +02001850 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +00001851
bellard1ccde1c2004-02-06 19:46:14 +00001852 /* we modify the TLB cache so that the dirty bit will be set again
1853 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001854 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001855 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001856 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001857 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001858 != (end - 1) - start) {
1859 abort();
1860 }
Blue Swirle5548612012-04-21 13:08:33 +00001861 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001862
1863}
1864
1865/* Note: start and end must be within the same ram block. */
1866void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1867 int dirty_flags)
1868{
1869 uintptr_t length;
1870
1871 start &= TARGET_PAGE_MASK;
1872 end = TARGET_PAGE_ALIGN(end);
1873
1874 length = end - start;
1875 if (length == 0)
1876 return;
1877 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1878
1879 if (tcg_enabled()) {
1880 tlb_reset_dirty_range_all(start, end, length);
1881 }
bellard1ccde1c2004-02-06 19:46:14 +00001882}
1883
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001884static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +00001885{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001886 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001887 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001888 return ret;
aliguori74576192008-10-06 14:02:03 +00001889}
1890
Avi Kivitya8170e52012-10-23 12:30:10 +02001891hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +00001892 MemoryRegionSection *section,
1893 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +02001894 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +00001895 int prot,
1896 target_ulong *address)
1897{
Avi Kivitya8170e52012-10-23 12:30:10 +02001898 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001899 CPUWatchpoint *wp;
1900
Blue Swirlcc5bea62012-04-14 14:56:48 +00001901 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001902 /* Normal RAM. */
1903 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001904 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001905 if (!section->readonly) {
1906 iotlb |= phys_section_notdirty;
1907 } else {
1908 iotlb |= phys_section_rom;
1909 }
1910 } else {
1911 /* IO handlers are currently passed a physical address.
1912 It would be nice to pass an offset from the base address
1913 of that region. This would avoid having to special case RAM,
1914 and avoid full address decoding in every device.
1915 We can't use the high bits of pd for this because
1916 IO_MEM_ROMD uses these as a ram address. */
1917 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001918 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001919 }
1920
1921 /* Make accesses to pages with watchpoints go via the
1922 watchpoint trap routines. */
1923 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1924 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1925 /* Avoid trapping reads of pages with a write breakpoint. */
1926 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1927 iotlb = phys_section_watch + paddr;
1928 *address |= TLB_MMIO;
1929 break;
1930 }
1931 }
1932 }
1933
1934 return iotlb;
1935}
1936
bellard01243112004-01-04 15:48:17 +00001937#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001938/*
1939 * Walks guest process memory "regions" one by one
1940 * and calls callback function 'fn' for each region.
1941 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001942
1943struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00001944{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001945 walk_memory_regions_fn fn;
1946 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001947 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001948 int prot;
1949};
bellard9fa3e852004-01-04 18:06:42 +00001950
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001951static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001952 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001953{
1954 if (data->start != -1ul) {
1955 int rc = data->fn(data->priv, data->start, end, data->prot);
1956 if (rc != 0) {
1957 return rc;
bellard9fa3e852004-01-04 18:06:42 +00001958 }
bellard33417e72003-08-10 21:47:01 +00001959 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001960
1961 data->start = (new_prot ? end : -1ul);
1962 data->prot = new_prot;
1963
1964 return 0;
1965}
1966
1967static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001968 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001969{
Paul Brookb480d9b2010-03-12 23:23:29 +00001970 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001971 int i, rc;
1972
1973 if (*lp == NULL) {
1974 return walk_memory_regions_end(data, base, 0);
1975 }
1976
1977 if (level == 0) {
1978 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001979 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001980 int prot = pd[i].flags;
1981
1982 pa = base | (i << TARGET_PAGE_BITS);
1983 if (prot != data->prot) {
1984 rc = walk_memory_regions_end(data, pa, prot);
1985 if (rc != 0) {
1986 return rc;
1987 }
1988 }
1989 }
1990 } else {
1991 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001992 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001993 pa = base | ((abi_ulong)i <<
1994 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001995 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1996 if (rc != 0) {
1997 return rc;
1998 }
1999 }
2000 }
2001
2002 return 0;
2003}
2004
2005int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2006{
2007 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02002008 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002009
2010 data.fn = fn;
2011 data.priv = priv;
2012 data.start = -1ul;
2013 data.prot = 0;
2014
2015 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002016 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002017 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2018 if (rc != 0) {
2019 return rc;
2020 }
2021 }
2022
2023 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002024}
2025
Paul Brookb480d9b2010-03-12 23:23:29 +00002026static int dump_region(void *priv, abi_ulong start,
2027 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002028{
2029 FILE *f = (FILE *)priv;
2030
Paul Brookb480d9b2010-03-12 23:23:29 +00002031 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2032 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002033 start, end, end - start,
2034 ((prot & PAGE_READ) ? 'r' : '-'),
2035 ((prot & PAGE_WRITE) ? 'w' : '-'),
2036 ((prot & PAGE_EXEC) ? 'x' : '-'));
2037
2038 return (0);
2039}
2040
2041/* dump memory mappings */
2042void page_dump(FILE *f)
2043{
2044 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2045 "start", "end", "size", "prot");
2046 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002047}
2048
pbrook53a59602006-03-25 19:31:22 +00002049int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002050{
bellard9fa3e852004-01-04 18:06:42 +00002051 PageDesc *p;
2052
2053 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002054 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002055 return 0;
2056 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002057}
2058
Richard Henderson376a7902010-03-10 15:57:04 -08002059/* Modify the flags of a page and invalidate the code if necessary.
2060 The flag PAGE_WRITE_ORG is positioned automatically depending
2061 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002062void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002063{
Richard Henderson376a7902010-03-10 15:57:04 -08002064 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002065
Richard Henderson376a7902010-03-10 15:57:04 -08002066 /* This function should never be called with addresses outside the
2067 guest address space. If this assert fires, it probably indicates
2068 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002069#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2070 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002071#endif
2072 assert(start < end);
2073
bellard9fa3e852004-01-04 18:06:42 +00002074 start = start & TARGET_PAGE_MASK;
2075 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002076
2077 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002078 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002079 }
2080
2081 for (addr = start, len = end - start;
2082 len != 0;
2083 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2084 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2085
2086 /* If the write protection bit is set, then we invalidate
2087 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002088 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002089 (flags & PAGE_WRITE) &&
2090 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002091 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002092 }
2093 p->flags = flags;
2094 }
bellard9fa3e852004-01-04 18:06:42 +00002095}
2096
ths3d97b402007-11-02 19:02:07 +00002097int page_check_range(target_ulong start, target_ulong len, int flags)
2098{
2099 PageDesc *p;
2100 target_ulong end;
2101 target_ulong addr;
2102
Richard Henderson376a7902010-03-10 15:57:04 -08002103 /* This function should never be called with addresses outside the
2104 guest address space. If this assert fires, it probably indicates
2105 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002106#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2107 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002108#endif
2109
Richard Henderson3e0650a2010-03-29 10:54:42 -07002110 if (len == 0) {
2111 return 0;
2112 }
Richard Henderson376a7902010-03-10 15:57:04 -08002113 if (start + len - 1 < start) {
2114 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002115 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002116 }
balrog55f280c2008-10-28 10:24:11 +00002117
ths3d97b402007-11-02 19:02:07 +00002118 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2119 start = start & TARGET_PAGE_MASK;
2120
Richard Henderson376a7902010-03-10 15:57:04 -08002121 for (addr = start, len = end - start;
2122 len != 0;
2123 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002124 p = page_find(addr >> TARGET_PAGE_BITS);
2125 if( !p )
2126 return -1;
2127 if( !(p->flags & PAGE_VALID) )
2128 return -1;
2129
bellarddae32702007-11-14 10:51:00 +00002130 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002131 return -1;
bellarddae32702007-11-14 10:51:00 +00002132 if (flags & PAGE_WRITE) {
2133 if (!(p->flags & PAGE_WRITE_ORG))
2134 return -1;
2135 /* unprotect the page if it was put read-only because it
2136 contains translated code */
2137 if (!(p->flags & PAGE_WRITE)) {
2138 if (!page_unprotect(addr, 0, NULL))
2139 return -1;
2140 }
2141 return 0;
2142 }
ths3d97b402007-11-02 19:02:07 +00002143 }
2144 return 0;
2145}
2146
bellard9fa3e852004-01-04 18:06:42 +00002147/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002148 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002149int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002150{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002151 unsigned int prot;
2152 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002153 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002154
pbrookc8a706f2008-06-02 16:16:42 +00002155 /* Technically this isn't safe inside a signal handler. However we
2156 know this only ever happens in a synchronous SEGV handler, so in
2157 practice it seems to be ok. */
2158 mmap_lock();
2159
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002160 p = page_find(address >> TARGET_PAGE_BITS);
2161 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002162 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002163 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002164 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002165
bellard9fa3e852004-01-04 18:06:42 +00002166 /* if the page was really writable, then we change its
2167 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002168 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2169 host_start = address & qemu_host_page_mask;
2170 host_end = host_start + qemu_host_page_size;
2171
2172 prot = 0;
2173 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2174 p = page_find(addr >> TARGET_PAGE_BITS);
2175 p->flags |= PAGE_WRITE;
2176 prot |= p->flags;
2177
bellard9fa3e852004-01-04 18:06:42 +00002178 /* and since the content will be modified, we must invalidate
2179 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002180 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002181#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002182 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002183#endif
bellard9fa3e852004-01-04 18:06:42 +00002184 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002185 mprotect((void *)g2h(host_start), qemu_host_page_size,
2186 prot & PAGE_BITS);
2187
2188 mmap_unlock();
2189 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002190 }
pbrookc8a706f2008-06-02 16:16:42 +00002191 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002192 return 0;
2193}
bellard9fa3e852004-01-04 18:06:42 +00002194#endif /* defined(CONFIG_USER_ONLY) */
2195
pbrooke2eef172008-06-08 01:09:01 +00002196#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002197
Paul Brookc04b2b72010-03-01 03:31:14 +00002198#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2199typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002200 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +02002201 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002202 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002203} subpage_t;
2204
Anthony Liguoric227f092009-10-01 16:12:16 -05002205static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002206 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +02002207static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002208static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002209{
Avi Kivity5312bd82012-02-12 18:32:55 +02002210 MemoryRegionSection *section = &phys_sections[section_index];
2211 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002212
2213 if (mr->subpage) {
2214 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2215 memory_region_destroy(&subpage->iomem);
2216 g_free(subpage);
2217 }
2218}
2219
Avi Kivity4346ae32012-02-10 17:00:01 +02002220static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002221{
2222 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002223 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002224
Avi Kivityc19e8802012-02-13 20:25:31 +02002225 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002226 return;
2227 }
2228
Avi Kivityc19e8802012-02-13 20:25:31 +02002229 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002230 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002231 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002232 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002233 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002234 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002235 }
Avi Kivity54688b12012-02-09 17:34:32 +02002236 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002237 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002238 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002239}
2240
Avi Kivityac1970f2012-10-03 16:22:53 +02002241static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +02002242{
Avi Kivityac1970f2012-10-03 16:22:53 +02002243 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002244 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002245}
2246
Avi Kivity5312bd82012-02-12 18:32:55 +02002247static uint16_t phys_section_add(MemoryRegionSection *section)
2248{
2249 if (phys_sections_nb == phys_sections_nb_alloc) {
2250 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2251 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2252 phys_sections_nb_alloc);
2253 }
2254 phys_sections[phys_sections_nb] = *section;
2255 return phys_sections_nb++;
2256}
2257
2258static void phys_sections_clear(void)
2259{
2260 phys_sections_nb = 0;
2261}
2262
Avi Kivityac1970f2012-10-03 16:22:53 +02002263static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02002264{
2265 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02002266 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02002267 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +02002268 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002269 MemoryRegionSection subsection = {
2270 .offset_within_address_space = base,
2271 .size = TARGET_PAGE_SIZE,
2272 };
Avi Kivitya8170e52012-10-23 12:30:10 +02002273 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02002274
Avi Kivityf3705d52012-03-08 16:16:34 +02002275 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002276
Avi Kivityf3705d52012-03-08 16:16:34 +02002277 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002278 subpage = subpage_init(base);
2279 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02002280 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +02002281 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002282 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002283 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002284 }
2285 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -04002286 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +02002287 subpage_register(subpage, start, end, phys_section_add(section));
2288}
2289
2290
Avi Kivityac1970f2012-10-03 16:22:53 +02002291static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002292{
Avi Kivitya8170e52012-10-23 12:30:10 +02002293 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +02002294 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +02002295 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002296 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002297
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002298 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002299
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002300 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +02002301 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +02002302 section_index);
bellard33417e72003-08-10 21:47:01 +00002303}
2304
Avi Kivityac1970f2012-10-03 16:22:53 +02002305static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02002306{
Avi Kivityac1970f2012-10-03 16:22:53 +02002307 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002308 MemoryRegionSection now = *section, remain = *section;
2309
2310 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2311 || (now.size < TARGET_PAGE_SIZE)) {
2312 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2313 - now.offset_within_address_space,
2314 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02002315 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002316 remain.size -= now.size;
2317 remain.offset_within_address_space += now.size;
2318 remain.offset_within_region += now.size;
2319 }
Tyler Hall69b67642012-07-25 18:45:04 -04002320 while (remain.size >= TARGET_PAGE_SIZE) {
2321 now = remain;
2322 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
2323 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +02002324 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04002325 } else {
2326 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +02002327 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04002328 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02002329 remain.size -= now.size;
2330 remain.offset_within_address_space += now.size;
2331 remain.offset_within_region += now.size;
2332 }
2333 now = remain;
2334 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002335 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002336 }
2337}
2338
Sheng Yang62a27442010-01-26 19:21:16 +08002339void qemu_flush_coalesced_mmio_buffer(void)
2340{
2341 if (kvm_enabled())
2342 kvm_flush_coalesced_mmio_buffer();
2343}
2344
Marcelo Tosattic9027602010-03-01 20:25:08 -03002345#if defined(__linux__) && !defined(TARGET_S390X)
2346
2347#include <sys/vfs.h>
2348
2349#define HUGETLBFS_MAGIC 0x958458f6
2350
2351static long gethugepagesize(const char *path)
2352{
2353 struct statfs fs;
2354 int ret;
2355
2356 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002357 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002358 } while (ret != 0 && errno == EINTR);
2359
2360 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002361 perror(path);
2362 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002363 }
2364
2365 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002366 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002367
2368 return fs.f_bsize;
2369}
2370
Alex Williamson04b16652010-07-02 11:13:17 -06002371static void *file_ram_alloc(RAMBlock *block,
2372 ram_addr_t memory,
2373 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002374{
2375 char *filename;
2376 void *area;
2377 int fd;
2378#ifdef MAP_POPULATE
2379 int flags;
2380#endif
2381 unsigned long hpagesize;
2382
2383 hpagesize = gethugepagesize(path);
2384 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002385 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002386 }
2387
2388 if (memory < hpagesize) {
2389 return NULL;
2390 }
2391
2392 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2393 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2394 return NULL;
2395 }
2396
2397 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002398 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002399 }
2400
2401 fd = mkstemp(filename);
2402 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002403 perror("unable to create backing store for hugepages");
2404 free(filename);
2405 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002406 }
2407 unlink(filename);
2408 free(filename);
2409
2410 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2411
2412 /*
2413 * ftruncate is not supported by hugetlbfs in older
2414 * hosts, so don't bother bailing out on errors.
2415 * If anything goes wrong with it under other filesystems,
2416 * mmap will fail.
2417 */
2418 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002419 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002420
2421#ifdef MAP_POPULATE
2422 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2423 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2424 * to sidestep this quirk.
2425 */
2426 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2427 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2428#else
2429 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2430#endif
2431 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002432 perror("file_ram_alloc: can't mmap RAM pages");
2433 close(fd);
2434 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002435 }
Alex Williamson04b16652010-07-02 11:13:17 -06002436 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002437 return area;
2438}
2439#endif
2440
Alex Williamsond17b5282010-06-25 11:08:38 -06002441static ram_addr_t find_ram_offset(ram_addr_t size)
2442{
Alex Williamson04b16652010-07-02 11:13:17 -06002443 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002444 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002445
2446 if (QLIST_EMPTY(&ram_list.blocks))
2447 return 0;
2448
2449 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002450 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002451
2452 end = block->offset + block->length;
2453
2454 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2455 if (next_block->offset >= end) {
2456 next = MIN(next, next_block->offset);
2457 }
2458 }
2459 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002460 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002461 mingap = next - end;
2462 }
2463 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002464
2465 if (offset == RAM_ADDR_MAX) {
2466 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2467 (uint64_t)size);
2468 abort();
2469 }
2470
Alex Williamson04b16652010-07-02 11:13:17 -06002471 return offset;
2472}
2473
Juan Quintela652d7ec2012-07-20 10:37:54 +02002474ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06002475{
Alex Williamsond17b5282010-06-25 11:08:38 -06002476 RAMBlock *block;
2477 ram_addr_t last = 0;
2478
2479 QLIST_FOREACH(block, &ram_list.blocks, next)
2480 last = MAX(last, block->offset + block->length);
2481
2482 return last;
2483}
2484
Jason Baronddb97f12012-08-02 15:44:16 -04002485static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2486{
2487 int ret;
2488 QemuOpts *machine_opts;
2489
2490 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2491 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2492 if (machine_opts &&
2493 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2494 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2495 if (ret) {
2496 perror("qemu_madvise");
2497 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2498 "but dump_guest_core=off specified\n");
2499 }
2500 }
2501}
2502
Avi Kivityc5705a72011-12-20 15:59:12 +02002503void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002504{
2505 RAMBlock *new_block, *block;
2506
Avi Kivityc5705a72011-12-20 15:59:12 +02002507 new_block = NULL;
2508 QLIST_FOREACH(block, &ram_list.blocks, next) {
2509 if (block->offset == addr) {
2510 new_block = block;
2511 break;
2512 }
2513 }
2514 assert(new_block);
2515 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002516
Anthony Liguori09e5ab62012-02-03 12:28:43 -06002517 if (dev) {
2518 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002519 if (id) {
2520 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002521 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002522 }
2523 }
2524 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2525
2526 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002527 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002528 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2529 new_block->idstr);
2530 abort();
2531 }
2532 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002533}
2534
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002535static int memory_try_enable_merging(void *addr, size_t len)
2536{
2537 QemuOpts *opts;
2538
2539 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2540 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
2541 /* disabled by the user */
2542 return 0;
2543 }
2544
2545 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
2546}
2547
Avi Kivityc5705a72011-12-20 15:59:12 +02002548ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2549 MemoryRegion *mr)
2550{
2551 RAMBlock *new_block;
2552
2553 size = TARGET_PAGE_ALIGN(size);
2554 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002555
Avi Kivity7c637362011-12-21 13:09:49 +02002556 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002557 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002558 if (host) {
2559 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002560 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002561 } else {
2562 if (mem_path) {
2563#if defined (__linux__) && !defined(TARGET_S390X)
2564 new_block->host = file_ram_alloc(new_block, size, mem_path);
2565 if (!new_block->host) {
2566 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002567 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002568 }
2569#else
2570 fprintf(stderr, "-mem-path option unsupported\n");
2571 exit(1);
2572#endif
2573 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02002574 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002575 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00002576 } else if (kvm_enabled()) {
2577 /* some s390/kvm configurations have special constraints */
2578 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01002579 } else {
2580 new_block->host = qemu_vmalloc(size);
2581 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002582 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002583 }
2584 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002585 new_block->length = size;
2586
2587 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2588
Anthony Liguori7267c092011-08-20 22:09:37 -05002589 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002590 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04002591 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2592 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02002593 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002594
Jason Baronddb97f12012-08-02 15:44:16 -04002595 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03002596 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04002597
Cam Macdonell84b89d72010-07-26 18:10:57 -06002598 if (kvm_enabled())
2599 kvm_setup_guest_memory(new_block->host, size);
2600
2601 return new_block->offset;
2602}
2603
Avi Kivityc5705a72011-12-20 15:59:12 +02002604ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002605{
Avi Kivityc5705a72011-12-20 15:59:12 +02002606 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002607}
bellarde9a1ab12007-02-08 23:08:38 +00002608
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002609void qemu_ram_free_from_ptr(ram_addr_t addr)
2610{
2611 RAMBlock *block;
2612
2613 QLIST_FOREACH(block, &ram_list.blocks, next) {
2614 if (addr == block->offset) {
2615 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002616 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002617 return;
2618 }
2619 }
2620}
2621
Anthony Liguoric227f092009-10-01 16:12:16 -05002622void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002623{
Alex Williamson04b16652010-07-02 11:13:17 -06002624 RAMBlock *block;
2625
2626 QLIST_FOREACH(block, &ram_list.blocks, next) {
2627 if (addr == block->offset) {
2628 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002629 if (block->flags & RAM_PREALLOC_MASK) {
2630 ;
2631 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002632#if defined (__linux__) && !defined(TARGET_S390X)
2633 if (block->fd) {
2634 munmap(block->host, block->length);
2635 close(block->fd);
2636 } else {
2637 qemu_vfree(block->host);
2638 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002639#else
2640 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002641#endif
2642 } else {
2643#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2644 munmap(block->host, block->length);
2645#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002646 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002647 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002648 } else {
2649 qemu_vfree(block->host);
2650 }
Alex Williamson04b16652010-07-02 11:13:17 -06002651#endif
2652 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002653 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002654 return;
2655 }
2656 }
2657
bellarde9a1ab12007-02-08 23:08:38 +00002658}
2659
Huang Yingcd19cfa2011-03-02 08:56:19 +01002660#ifndef _WIN32
2661void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2662{
2663 RAMBlock *block;
2664 ram_addr_t offset;
2665 int flags;
2666 void *area, *vaddr;
2667
2668 QLIST_FOREACH(block, &ram_list.blocks, next) {
2669 offset = addr - block->offset;
2670 if (offset < block->length) {
2671 vaddr = block->host + offset;
2672 if (block->flags & RAM_PREALLOC_MASK) {
2673 ;
2674 } else {
2675 flags = MAP_FIXED;
2676 munmap(vaddr, length);
2677 if (mem_path) {
2678#if defined(__linux__) && !defined(TARGET_S390X)
2679 if (block->fd) {
2680#ifdef MAP_POPULATE
2681 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2682 MAP_PRIVATE;
2683#else
2684 flags |= MAP_PRIVATE;
2685#endif
2686 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2687 flags, block->fd, offset);
2688 } else {
2689 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2690 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2691 flags, -1, 0);
2692 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002693#else
2694 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002695#endif
2696 } else {
2697#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2698 flags |= MAP_SHARED | MAP_ANONYMOUS;
2699 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2700 flags, -1, 0);
2701#else
2702 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2703 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2704 flags, -1, 0);
2705#endif
2706 }
2707 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002708 fprintf(stderr, "Could not remap addr: "
2709 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002710 length, addr);
2711 exit(1);
2712 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002713 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04002714 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002715 }
2716 return;
2717 }
2718 }
2719}
2720#endif /* !_WIN32 */
2721
pbrookdc828ca2009-04-09 22:21:07 +00002722/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002723 With the exception of the softmmu code in this file, this should
2724 only be used for local memory (e.g. video ram) that the device owns,
2725 and knows it isn't going to access beyond the end of the block.
2726
2727 It should not be used for general purpose DMA.
2728 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2729 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002730void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002731{
pbrook94a6b542009-04-11 17:15:54 +00002732 RAMBlock *block;
2733
Alex Williamsonf471a172010-06-11 11:11:42 -06002734 QLIST_FOREACH(block, &ram_list.blocks, next) {
2735 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002736 /* Move this entry to to start of the list. */
2737 if (block != QLIST_FIRST(&ram_list.blocks)) {
2738 QLIST_REMOVE(block, next);
2739 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2740 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002741 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002742 /* We need to check if the requested address is in the RAM
2743 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002744 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002745 */
2746 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002747 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002748 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002749 block->host =
2750 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002751 }
2752 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002753 return block->host + (addr - block->offset);
2754 }
pbrook94a6b542009-04-11 17:15:54 +00002755 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002756
2757 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2758 abort();
2759
2760 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002761}
2762
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002763/* Return a host pointer to ram allocated with qemu_ram_alloc.
2764 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2765 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002766static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002767{
2768 RAMBlock *block;
2769
2770 QLIST_FOREACH(block, &ram_list.blocks, next) {
2771 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002772 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002773 /* We need to check if the requested address is in the RAM
2774 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002775 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002776 */
2777 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002778 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002779 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002780 block->host =
2781 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002782 }
2783 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002784 return block->host + (addr - block->offset);
2785 }
2786 }
2787
2788 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2789 abort();
2790
2791 return NULL;
2792}
2793
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002794/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2795 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002796static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002797{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002798 if (*size == 0) {
2799 return NULL;
2800 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002801 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002802 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002803 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002804 RAMBlock *block;
2805
2806 QLIST_FOREACH(block, &ram_list.blocks, next) {
2807 if (addr - block->offset < block->length) {
2808 if (addr - block->offset + *size > block->length)
2809 *size = block->length - addr + block->offset;
2810 return block->host + (addr - block->offset);
2811 }
2812 }
2813
2814 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2815 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002816 }
2817}
2818
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002819void qemu_put_ram_ptr(void *addr)
2820{
2821 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002822}
2823
Marcelo Tosattie8902612010-10-11 15:31:19 -03002824int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002825{
pbrook94a6b542009-04-11 17:15:54 +00002826 RAMBlock *block;
2827 uint8_t *host = ptr;
2828
Jan Kiszka868bb332011-06-21 22:59:09 +02002829 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002830 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002831 return 0;
2832 }
2833
Alex Williamsonf471a172010-06-11 11:11:42 -06002834 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002835 /* This case append when the block is not mapped. */
2836 if (block->host == NULL) {
2837 continue;
2838 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002839 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002840 *ram_addr = block->offset + (host - block->host);
2841 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002842 }
pbrook94a6b542009-04-11 17:15:54 +00002843 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002844
Marcelo Tosattie8902612010-10-11 15:31:19 -03002845 return -1;
2846}
Alex Williamsonf471a172010-06-11 11:11:42 -06002847
Marcelo Tosattie8902612010-10-11 15:31:19 -03002848/* Some of the softmmu routines need to translate from a host pointer
2849 (typically a TLB entry) back to a ram offset. */
2850ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2851{
2852 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002853
Marcelo Tosattie8902612010-10-11 15:31:19 -03002854 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2855 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2856 abort();
2857 }
2858 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002859}
2860
Avi Kivitya8170e52012-10-23 12:30:10 +02002861static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002862 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002863{
pbrook67d3b952006-12-18 05:03:52 +00002864#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002865 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002866#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002867#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002868 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002869#endif
2870 return 0;
2871}
2872
Avi Kivitya8170e52012-10-23 12:30:10 +02002873static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002874 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002875{
2876#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002877 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002878#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002879#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002880 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002881#endif
2882}
2883
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002884static const MemoryRegionOps unassigned_mem_ops = {
2885 .read = unassigned_mem_read,
2886 .write = unassigned_mem_write,
2887 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002888};
2889
Avi Kivitya8170e52012-10-23 12:30:10 +02002890static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002891 unsigned size)
2892{
2893 abort();
2894}
2895
Avi Kivitya8170e52012-10-23 12:30:10 +02002896static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002897 uint64_t value, unsigned size)
2898{
2899 abort();
2900}
2901
2902static const MemoryRegionOps error_mem_ops = {
2903 .read = error_mem_read,
2904 .write = error_mem_write,
2905 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002906};
2907
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002908static const MemoryRegionOps rom_mem_ops = {
2909 .read = error_mem_read,
2910 .write = unassigned_mem_write,
2911 .endianness = DEVICE_NATIVE_ENDIAN,
2912};
2913
Avi Kivitya8170e52012-10-23 12:30:10 +02002914static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002915 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002916{
bellard3a7d9292005-08-21 09:26:42 +00002917 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002918 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002919 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2920#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002921 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002922 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002923#endif
2924 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002925 switch (size) {
2926 case 1:
2927 stb_p(qemu_get_ram_ptr(ram_addr), val);
2928 break;
2929 case 2:
2930 stw_p(qemu_get_ram_ptr(ram_addr), val);
2931 break;
2932 case 4:
2933 stl_p(qemu_get_ram_ptr(ram_addr), val);
2934 break;
2935 default:
2936 abort();
2937 }
bellardf23db162005-08-21 19:12:28 +00002938 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002939 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002940 /* we remove the notdirty callback only if the code has been
2941 flushed */
2942 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002943 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002944}
2945
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002946static const MemoryRegionOps notdirty_mem_ops = {
2947 .read = error_mem_read,
2948 .write = notdirty_mem_write,
2949 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002950};
2951
pbrook0f459d12008-06-09 00:20:13 +00002952/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002953static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002954{
Andreas Färber9349b4f2012-03-14 01:38:32 +01002955 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002956 target_ulong pc, cs_base;
2957 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002958 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002959 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002960 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002961
aliguori06d55cc2008-11-18 20:24:06 +00002962 if (env->watchpoint_hit) {
2963 /* We re-entered the check after replacing the TB. Now raise
2964 * the debug interrupt so that is will trigger after the
2965 * current instruction. */
2966 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2967 return;
2968 }
pbrook2e70f6e2008-06-29 01:03:05 +00002969 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002970 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002971 if ((vaddr == (wp->vaddr & len_mask) ||
2972 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002973 wp->flags |= BP_WATCHPOINT_HIT;
2974 if (!env->watchpoint_hit) {
2975 env->watchpoint_hit = wp;
2976 tb = tb_find_pc(env->mem_io_pc);
2977 if (!tb) {
2978 cpu_abort(env, "check_watchpoint: could not find TB for "
2979 "pc=%p", (void *)env->mem_io_pc);
2980 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00002981 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00002982 tb_phys_invalidate(tb, -1);
2983 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2984 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04002985 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00002986 } else {
2987 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2988 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04002989 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002990 }
aliguori06d55cc2008-11-18 20:24:06 +00002991 }
aliguori6e140f22008-11-18 20:37:55 +00002992 } else {
2993 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002994 }
2995 }
2996}
2997
pbrook6658ffb2007-03-16 23:58:11 +00002998/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2999 so these check for a hit then pass through to the normal out-of-line
3000 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02003001static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02003002 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003003{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003004 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3005 switch (size) {
3006 case 1: return ldub_phys(addr);
3007 case 2: return lduw_phys(addr);
3008 case 4: return ldl_phys(addr);
3009 default: abort();
3010 }
pbrook6658ffb2007-03-16 23:58:11 +00003011}
3012
Avi Kivitya8170e52012-10-23 12:30:10 +02003013static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02003014 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003015{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003016 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3017 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003018 case 1:
3019 stb_phys(addr, val);
3020 break;
3021 case 2:
3022 stw_phys(addr, val);
3023 break;
3024 case 4:
3025 stl_phys(addr, val);
3026 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003027 default: abort();
3028 }
pbrook6658ffb2007-03-16 23:58:11 +00003029}
3030
Avi Kivity1ec9b902012-01-02 12:47:48 +02003031static const MemoryRegionOps watch_mem_ops = {
3032 .read = watch_mem_read,
3033 .write = watch_mem_write,
3034 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003035};
pbrook6658ffb2007-03-16 23:58:11 +00003036
Avi Kivitya8170e52012-10-23 12:30:10 +02003037static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02003038 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003039{
Avi Kivity70c68e42012-01-02 12:32:48 +02003040 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003041 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003042 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003043#if defined(DEBUG_SUBPAGE)
3044 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3045 mmio, len, addr, idx);
3046#endif
blueswir1db7b5422007-05-26 17:36:03 +00003047
Avi Kivity5312bd82012-02-12 18:32:55 +02003048 section = &phys_sections[mmio->sub_section[idx]];
3049 addr += mmio->base;
3050 addr -= section->offset_within_address_space;
3051 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003052 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003053}
3054
Avi Kivitya8170e52012-10-23 12:30:10 +02003055static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02003056 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003057{
Avi Kivity70c68e42012-01-02 12:32:48 +02003058 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003059 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003060 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003061#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003062 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3063 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003064 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003065#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003066
Avi Kivity5312bd82012-02-12 18:32:55 +02003067 section = &phys_sections[mmio->sub_section[idx]];
3068 addr += mmio->base;
3069 addr -= section->offset_within_address_space;
3070 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003071 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003072}
3073
Avi Kivity70c68e42012-01-02 12:32:48 +02003074static const MemoryRegionOps subpage_ops = {
3075 .read = subpage_read,
3076 .write = subpage_write,
3077 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003078};
3079
Avi Kivitya8170e52012-10-23 12:30:10 +02003080static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02003081 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003082{
3083 ram_addr_t raddr = addr;
3084 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003085 switch (size) {
3086 case 1: return ldub_p(ptr);
3087 case 2: return lduw_p(ptr);
3088 case 4: return ldl_p(ptr);
3089 default: abort();
3090 }
Andreas Färber56384e82011-11-30 16:26:21 +01003091}
3092
Avi Kivitya8170e52012-10-23 12:30:10 +02003093static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02003094 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003095{
3096 ram_addr_t raddr = addr;
3097 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003098 switch (size) {
3099 case 1: return stb_p(ptr, value);
3100 case 2: return stw_p(ptr, value);
3101 case 4: return stl_p(ptr, value);
3102 default: abort();
3103 }
Andreas Färber56384e82011-11-30 16:26:21 +01003104}
3105
Avi Kivityde712f92012-01-02 12:41:07 +02003106static const MemoryRegionOps subpage_ram_ops = {
3107 .read = subpage_ram_read,
3108 .write = subpage_ram_write,
3109 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003110};
3111
Anthony Liguoric227f092009-10-01 16:12:16 -05003112static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003113 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003114{
3115 int idx, eidx;
3116
3117 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3118 return -1;
3119 idx = SUBPAGE_IDX(start);
3120 eidx = SUBPAGE_IDX(end);
3121#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003122 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003123 mmio, start, end, idx, eidx, memory);
3124#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003125 if (memory_region_is_ram(phys_sections[section].mr)) {
3126 MemoryRegionSection new_section = phys_sections[section];
3127 new_section.mr = &io_mem_subpage_ram;
3128 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003129 }
blueswir1db7b5422007-05-26 17:36:03 +00003130 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003131 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003132 }
3133
3134 return 0;
3135}
3136
Avi Kivitya8170e52012-10-23 12:30:10 +02003137static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00003138{
Anthony Liguoric227f092009-10-01 16:12:16 -05003139 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003140
Anthony Liguori7267c092011-08-20 22:09:37 -05003141 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003142
3143 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003144 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3145 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003146 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003147#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003148 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3149 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003150#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003151 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003152
3153 return mmio;
3154}
3155
Avi Kivity5312bd82012-02-12 18:32:55 +02003156static uint16_t dummy_section(MemoryRegion *mr)
3157{
3158 MemoryRegionSection section = {
3159 .mr = mr,
3160 .offset_within_address_space = 0,
3161 .offset_within_region = 0,
3162 .size = UINT64_MAX,
3163 };
3164
3165 return phys_section_add(&section);
3166}
3167
Avi Kivitya8170e52012-10-23 12:30:10 +02003168MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02003169{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003170 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003171}
3172
Avi Kivitye9179ce2009-06-14 11:38:52 +03003173static void io_mem_init(void)
3174{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003175 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003176 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3177 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3178 "unassigned", UINT64_MAX);
3179 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3180 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003181 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3182 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003183 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3184 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003185}
3186
Avi Kivityac1970f2012-10-03 16:22:53 +02003187static void mem_begin(MemoryListener *listener)
3188{
3189 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
3190
3191 destroy_all_mappings(d);
3192 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
3193}
3194
Avi Kivity50c1e142012-02-08 21:36:02 +02003195static void core_begin(MemoryListener *listener)
3196{
Avi Kivity5312bd82012-02-12 18:32:55 +02003197 phys_sections_clear();
3198 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003199 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3200 phys_section_rom = dummy_section(&io_mem_rom);
3201 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003202}
3203
Avi Kivity1d711482012-10-02 18:54:45 +02003204static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02003205{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003206 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003207
3208 /* since each CPU stores ram addresses in its TLB cache, we must
3209 reset the modified entries */
3210 /* XXX: slow ! */
3211 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3212 tlb_flush(env, 1);
3213 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003214}
3215
Avi Kivity93632742012-02-08 16:54:16 +02003216static void core_log_global_start(MemoryListener *listener)
3217{
3218 cpu_physical_memory_set_dirty_tracking(1);
3219}
3220
3221static void core_log_global_stop(MemoryListener *listener)
3222{
3223 cpu_physical_memory_set_dirty_tracking(0);
3224}
3225
Avi Kivity4855d412012-02-08 21:16:05 +02003226static void io_region_add(MemoryListener *listener,
3227 MemoryRegionSection *section)
3228{
Avi Kivitya2d33522012-03-05 17:40:12 +02003229 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3230
3231 mrio->mr = section->mr;
3232 mrio->offset = section->offset_within_region;
3233 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003234 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003235 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003236}
3237
3238static void io_region_del(MemoryListener *listener,
3239 MemoryRegionSection *section)
3240{
3241 isa_unassign_ioport(section->offset_within_address_space, section->size);
3242}
3243
Avi Kivity93632742012-02-08 16:54:16 +02003244static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003245 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02003246 .log_global_start = core_log_global_start,
3247 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02003248 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02003249};
3250
Avi Kivity4855d412012-02-08 21:16:05 +02003251static MemoryListener io_memory_listener = {
3252 .region_add = io_region_add,
3253 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02003254 .priority = 0,
3255};
3256
Avi Kivity1d711482012-10-02 18:54:45 +02003257static MemoryListener tcg_memory_listener = {
3258 .commit = tcg_commit,
3259};
3260
Avi Kivityac1970f2012-10-03 16:22:53 +02003261void address_space_init_dispatch(AddressSpace *as)
3262{
3263 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
3264
3265 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
3266 d->listener = (MemoryListener) {
3267 .begin = mem_begin,
3268 .region_add = mem_add,
3269 .region_nop = mem_add,
3270 .priority = 0,
3271 };
3272 as->dispatch = d;
3273 memory_listener_register(&d->listener, as);
3274}
3275
Avi Kivity83f3c252012-10-07 12:59:55 +02003276void address_space_destroy_dispatch(AddressSpace *as)
3277{
3278 AddressSpaceDispatch *d = as->dispatch;
3279
3280 memory_listener_unregister(&d->listener);
3281 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
3282 g_free(d);
3283 as->dispatch = NULL;
3284}
3285
Avi Kivity62152b82011-07-26 14:26:14 +03003286static void memory_map_init(void)
3287{
Anthony Liguori7267c092011-08-20 22:09:37 -05003288 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003289 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02003290 address_space_init(&address_space_memory, system_memory);
3291 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03003292
Anthony Liguori7267c092011-08-20 22:09:37 -05003293 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003294 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02003295 address_space_init(&address_space_io, system_io);
3296 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02003297
Avi Kivityf6790af2012-10-02 20:13:51 +02003298 memory_listener_register(&core_memory_listener, &address_space_memory);
3299 memory_listener_register(&io_memory_listener, &address_space_io);
3300 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10003301
3302 dma_context_init(&dma_context_memory, &address_space_memory,
3303 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03003304}
3305
3306MemoryRegion *get_system_memory(void)
3307{
3308 return system_memory;
3309}
3310
Avi Kivity309cb472011-08-08 16:09:03 +03003311MemoryRegion *get_system_io(void)
3312{
3313 return system_io;
3314}
3315
pbrooke2eef172008-06-08 01:09:01 +00003316#endif /* !defined(CONFIG_USER_ONLY) */
3317
bellard13eb76e2004-01-24 15:23:36 +00003318/* physical memory access (slow version, mainly for debug) */
3319#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003320int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003321 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003322{
3323 int l, flags;
3324 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003325 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003326
3327 while (len > 0) {
3328 page = addr & TARGET_PAGE_MASK;
3329 l = (page + TARGET_PAGE_SIZE) - addr;
3330 if (l > len)
3331 l = len;
3332 flags = page_get_flags(page);
3333 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003334 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003335 if (is_write) {
3336 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003337 return -1;
bellard579a97f2007-11-11 14:26:47 +00003338 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003339 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003340 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003341 memcpy(p, buf, l);
3342 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003343 } else {
3344 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003345 return -1;
bellard579a97f2007-11-11 14:26:47 +00003346 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003347 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003348 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003349 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003350 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003351 }
3352 len -= l;
3353 buf += l;
3354 addr += l;
3355 }
Paul Brooka68fe892010-03-01 00:08:59 +00003356 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003357}
bellard8df1cd02005-01-28 22:37:22 +00003358
bellard13eb76e2004-01-24 15:23:36 +00003359#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003360
Avi Kivitya8170e52012-10-23 12:30:10 +02003361static void invalidate_and_set_dirty(hwaddr addr,
3362 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003363{
3364 if (!cpu_physical_memory_is_dirty(addr)) {
3365 /* invalidate code */
3366 tb_invalidate_phys_page_range(addr, addr + length, 0);
3367 /* set dirty bit */
3368 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
3369 }
Anthony PERARDe2269392012-10-03 13:49:22 +00003370 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003371}
3372
Avi Kivitya8170e52012-10-23 12:30:10 +02003373void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02003374 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00003375{
Avi Kivityac1970f2012-10-03 16:22:53 +02003376 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003377 int l;
bellard13eb76e2004-01-24 15:23:36 +00003378 uint8_t *ptr;
3379 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02003380 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003381 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003382
bellard13eb76e2004-01-24 15:23:36 +00003383 while (len > 0) {
3384 page = addr & TARGET_PAGE_MASK;
3385 l = (page + TARGET_PAGE_SIZE) - addr;
3386 if (l > len)
3387 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003388 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003389
bellard13eb76e2004-01-24 15:23:36 +00003390 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003391 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02003392 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00003393 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003394 /* XXX: could force cpu_single_env to NULL to avoid
3395 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003396 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003397 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003398 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003399 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003400 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003401 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003402 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003403 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003404 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003405 l = 2;
3406 } else {
bellard1c213d12005-09-03 10:49:04 +00003407 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003408 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003409 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003410 l = 1;
3411 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003412 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003413 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003414 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003415 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003416 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003417 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003418 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003419 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003420 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003421 }
3422 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003423 if (!(memory_region_is_ram(section->mr) ||
3424 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02003425 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00003426 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003427 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003428 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003429 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003430 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003431 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003432 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003433 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003434 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003435 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003436 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003437 l = 2;
3438 } else {
bellard1c213d12005-09-03 10:49:04 +00003439 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003440 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003441 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003442 l = 1;
3443 }
3444 } else {
3445 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003446 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00003447 + memory_region_section_addr(section,
3448 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003449 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003450 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003451 }
3452 }
3453 len -= l;
3454 buf += l;
3455 addr += l;
3456 }
3457}
bellard8df1cd02005-01-28 22:37:22 +00003458
Avi Kivitya8170e52012-10-23 12:30:10 +02003459void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02003460 const uint8_t *buf, int len)
3461{
3462 address_space_rw(as, addr, (uint8_t *)buf, len, true);
3463}
3464
3465/**
3466 * address_space_read: read from an address space.
3467 *
3468 * @as: #AddressSpace to be accessed
3469 * @addr: address within that address space
3470 * @buf: buffer with the data transferred
3471 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003472void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003473{
3474 address_space_rw(as, addr, buf, len, false);
3475}
3476
3477
Avi Kivitya8170e52012-10-23 12:30:10 +02003478void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02003479 int len, int is_write)
3480{
3481 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
3482}
3483
bellardd0ecd2a2006-04-23 17:14:48 +00003484/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02003485void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003486 const uint8_t *buf, int len)
3487{
Avi Kivityac1970f2012-10-03 16:22:53 +02003488 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00003489 int l;
3490 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02003491 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003492 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003493
bellardd0ecd2a2006-04-23 17:14:48 +00003494 while (len > 0) {
3495 page = addr & TARGET_PAGE_MASK;
3496 l = (page + TARGET_PAGE_SIZE) - addr;
3497 if (l > len)
3498 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003499 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003500
Blue Swirlcc5bea62012-04-14 14:56:48 +00003501 if (!(memory_region_is_ram(section->mr) ||
3502 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00003503 /* do nothing */
3504 } else {
3505 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003506 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003507 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003508 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003509 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003510 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003511 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003512 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003513 }
3514 len -= l;
3515 buf += l;
3516 addr += l;
3517 }
3518}
3519
aliguori6d16c2f2009-01-22 16:59:11 +00003520typedef struct {
3521 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02003522 hwaddr addr;
3523 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00003524} BounceBuffer;
3525
3526static BounceBuffer bounce;
3527
aliguoriba223c22009-01-22 16:59:16 +00003528typedef struct MapClient {
3529 void *opaque;
3530 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003531 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003532} MapClient;
3533
Blue Swirl72cf2d42009-09-12 07:36:22 +00003534static QLIST_HEAD(map_client_list, MapClient) map_client_list
3535 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003536
3537void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3538{
Anthony Liguori7267c092011-08-20 22:09:37 -05003539 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003540
3541 client->opaque = opaque;
3542 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003543 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003544 return client;
3545}
3546
Blue Swirl8b9c99d2012-10-28 11:04:51 +00003547static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00003548{
3549 MapClient *client = (MapClient *)_client;
3550
Blue Swirl72cf2d42009-09-12 07:36:22 +00003551 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003552 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003553}
3554
3555static void cpu_notify_map_clients(void)
3556{
3557 MapClient *client;
3558
Blue Swirl72cf2d42009-09-12 07:36:22 +00003559 while (!QLIST_EMPTY(&map_client_list)) {
3560 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003561 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003562 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003563 }
3564}
3565
aliguori6d16c2f2009-01-22 16:59:11 +00003566/* Map a physical memory region into a host virtual address.
3567 * May map a subset of the requested range, given by and returned in *plen.
3568 * May return NULL if resources needed to perform the mapping are exhausted.
3569 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003570 * Use cpu_register_map_client() to know when retrying the map operation is
3571 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003572 */
Avi Kivityac1970f2012-10-03 16:22:53 +02003573void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02003574 hwaddr addr,
3575 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003576 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00003577{
Avi Kivityac1970f2012-10-03 16:22:53 +02003578 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02003579 hwaddr len = *plen;
3580 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003581 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003582 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003583 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003584 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003585 ram_addr_t rlen;
3586 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003587
3588 while (len > 0) {
3589 page = addr & TARGET_PAGE_MASK;
3590 l = (page + TARGET_PAGE_SIZE) - addr;
3591 if (l > len)
3592 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003593 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003594
Avi Kivityf3705d52012-03-08 16:16:34 +02003595 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003596 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003597 break;
3598 }
3599 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3600 bounce.addr = addr;
3601 bounce.len = l;
3602 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02003603 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003604 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003605
3606 *plen = l;
3607 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003608 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003609 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003610 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003611 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003612 }
aliguori6d16c2f2009-01-22 16:59:11 +00003613
3614 len -= l;
3615 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003616 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003617 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003618 rlen = todo;
3619 ret = qemu_ram_ptr_length(raddr, &rlen);
3620 *plen = rlen;
3621 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003622}
3623
Avi Kivityac1970f2012-10-03 16:22:53 +02003624/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003625 * Will also mark the memory as dirty if is_write == 1. access_len gives
3626 * the amount of memory that was actually read or written by the caller.
3627 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003628void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3629 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003630{
3631 if (buffer != bounce.buffer) {
3632 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003633 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003634 while (access_len) {
3635 unsigned l;
3636 l = TARGET_PAGE_SIZE;
3637 if (l > access_len)
3638 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003639 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003640 addr1 += l;
3641 access_len -= l;
3642 }
3643 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003644 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003645 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003646 }
aliguori6d16c2f2009-01-22 16:59:11 +00003647 return;
3648 }
3649 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02003650 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003651 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003652 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003653 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003654 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003655}
bellardd0ecd2a2006-04-23 17:14:48 +00003656
Avi Kivitya8170e52012-10-23 12:30:10 +02003657void *cpu_physical_memory_map(hwaddr addr,
3658 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003659 int is_write)
3660{
3661 return address_space_map(&address_space_memory, addr, plen, is_write);
3662}
3663
Avi Kivitya8170e52012-10-23 12:30:10 +02003664void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3665 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003666{
3667 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3668}
3669
bellard8df1cd02005-01-28 22:37:22 +00003670/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003671static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003672 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003673{
bellard8df1cd02005-01-28 22:37:22 +00003674 uint8_t *ptr;
3675 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003676 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003677
Avi Kivityac1970f2012-10-03 16:22:53 +02003678 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003679
Blue Swirlcc5bea62012-04-14 14:56:48 +00003680 if (!(memory_region_is_ram(section->mr) ||
3681 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00003682 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003683 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003684 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003685#if defined(TARGET_WORDS_BIGENDIAN)
3686 if (endian == DEVICE_LITTLE_ENDIAN) {
3687 val = bswap32(val);
3688 }
3689#else
3690 if (endian == DEVICE_BIG_ENDIAN) {
3691 val = bswap32(val);
3692 }
3693#endif
bellard8df1cd02005-01-28 22:37:22 +00003694 } else {
3695 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003696 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003697 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003698 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003699 switch (endian) {
3700 case DEVICE_LITTLE_ENDIAN:
3701 val = ldl_le_p(ptr);
3702 break;
3703 case DEVICE_BIG_ENDIAN:
3704 val = ldl_be_p(ptr);
3705 break;
3706 default:
3707 val = ldl_p(ptr);
3708 break;
3709 }
bellard8df1cd02005-01-28 22:37:22 +00003710 }
3711 return val;
3712}
3713
Avi Kivitya8170e52012-10-23 12:30:10 +02003714uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003715{
3716 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3717}
3718
Avi Kivitya8170e52012-10-23 12:30:10 +02003719uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003720{
3721 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3722}
3723
Avi Kivitya8170e52012-10-23 12:30:10 +02003724uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003725{
3726 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3727}
3728
bellard84b7b8e2005-11-28 21:19:04 +00003729/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003730static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003731 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003732{
bellard84b7b8e2005-11-28 21:19:04 +00003733 uint8_t *ptr;
3734 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003735 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003736
Avi Kivityac1970f2012-10-03 16:22:53 +02003737 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003738
Blue Swirlcc5bea62012-04-14 14:56:48 +00003739 if (!(memory_region_is_ram(section->mr) ||
3740 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00003741 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003742 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003743
3744 /* XXX This is broken when device endian != cpu endian.
3745 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003746#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003747 val = io_mem_read(section->mr, addr, 4) << 32;
3748 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003749#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003750 val = io_mem_read(section->mr, addr, 4);
3751 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003752#endif
3753 } else {
3754 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003755 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003756 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003757 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003758 switch (endian) {
3759 case DEVICE_LITTLE_ENDIAN:
3760 val = ldq_le_p(ptr);
3761 break;
3762 case DEVICE_BIG_ENDIAN:
3763 val = ldq_be_p(ptr);
3764 break;
3765 default:
3766 val = ldq_p(ptr);
3767 break;
3768 }
bellard84b7b8e2005-11-28 21:19:04 +00003769 }
3770 return val;
3771}
3772
Avi Kivitya8170e52012-10-23 12:30:10 +02003773uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003774{
3775 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3776}
3777
Avi Kivitya8170e52012-10-23 12:30:10 +02003778uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003779{
3780 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3781}
3782
Avi Kivitya8170e52012-10-23 12:30:10 +02003783uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003784{
3785 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3786}
3787
bellardaab33092005-10-30 20:48:42 +00003788/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02003789uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00003790{
3791 uint8_t val;
3792 cpu_physical_memory_read(addr, &val, 1);
3793 return val;
3794}
3795
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003796/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003797static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003798 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003799{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003800 uint8_t *ptr;
3801 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003802 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003803
Avi Kivityac1970f2012-10-03 16:22:53 +02003804 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003805
Blue Swirlcc5bea62012-04-14 14:56:48 +00003806 if (!(memory_region_is_ram(section->mr) ||
3807 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003808 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003809 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003810 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003811#if defined(TARGET_WORDS_BIGENDIAN)
3812 if (endian == DEVICE_LITTLE_ENDIAN) {
3813 val = bswap16(val);
3814 }
3815#else
3816 if (endian == DEVICE_BIG_ENDIAN) {
3817 val = bswap16(val);
3818 }
3819#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003820 } else {
3821 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003822 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003823 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003824 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003825 switch (endian) {
3826 case DEVICE_LITTLE_ENDIAN:
3827 val = lduw_le_p(ptr);
3828 break;
3829 case DEVICE_BIG_ENDIAN:
3830 val = lduw_be_p(ptr);
3831 break;
3832 default:
3833 val = lduw_p(ptr);
3834 break;
3835 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003836 }
3837 return val;
bellardaab33092005-10-30 20:48:42 +00003838}
3839
Avi Kivitya8170e52012-10-23 12:30:10 +02003840uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003841{
3842 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3843}
3844
Avi Kivitya8170e52012-10-23 12:30:10 +02003845uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003846{
3847 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3848}
3849
Avi Kivitya8170e52012-10-23 12:30:10 +02003850uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003851{
3852 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3853}
3854
bellard8df1cd02005-01-28 22:37:22 +00003855/* warning: addr must be aligned. The ram page is not masked as dirty
3856 and the code inside is not invalidated. It is useful if the dirty
3857 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02003858void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003859{
bellard8df1cd02005-01-28 22:37:22 +00003860 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003861 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003862
Avi Kivityac1970f2012-10-03 16:22:53 +02003863 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003864
Avi Kivityf3705d52012-03-08 16:16:34 +02003865 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003866 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003867 if (memory_region_is_ram(section->mr)) {
3868 section = &phys_sections[phys_section_rom];
3869 }
3870 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003871 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003872 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003873 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003874 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003875 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003876 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003877
3878 if (unlikely(in_migration)) {
3879 if (!cpu_physical_memory_is_dirty(addr1)) {
3880 /* invalidate code */
3881 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3882 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003883 cpu_physical_memory_set_dirty_flags(
3884 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003885 }
3886 }
bellard8df1cd02005-01-28 22:37:22 +00003887 }
3888}
3889
Avi Kivitya8170e52012-10-23 12:30:10 +02003890void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003891{
j_mayerbc98a7e2007-04-04 07:55:12 +00003892 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003893 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00003894
Avi Kivityac1970f2012-10-03 16:22:53 +02003895 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003896
Avi Kivityf3705d52012-03-08 16:16:34 +02003897 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003898 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003899 if (memory_region_is_ram(section->mr)) {
3900 section = &phys_sections[phys_section_rom];
3901 }
j_mayerbc98a7e2007-04-04 07:55:12 +00003902#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003903 io_mem_write(section->mr, addr, val >> 32, 4);
3904 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003905#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003906 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3907 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003908#endif
3909 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003910 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003911 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003912 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00003913 stq_p(ptr, val);
3914 }
3915}
3916
bellard8df1cd02005-01-28 22:37:22 +00003917/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003918static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003919 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003920{
bellard8df1cd02005-01-28 22:37:22 +00003921 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003922 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003923
Avi Kivityac1970f2012-10-03 16:22:53 +02003924 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003925
Avi Kivityf3705d52012-03-08 16:16:34 +02003926 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003927 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003928 if (memory_region_is_ram(section->mr)) {
3929 section = &phys_sections[phys_section_rom];
3930 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003931#if defined(TARGET_WORDS_BIGENDIAN)
3932 if (endian == DEVICE_LITTLE_ENDIAN) {
3933 val = bswap32(val);
3934 }
3935#else
3936 if (endian == DEVICE_BIG_ENDIAN) {
3937 val = bswap32(val);
3938 }
3939#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02003940 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003941 } else {
3942 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003943 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003944 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00003945 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003946 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003947 switch (endian) {
3948 case DEVICE_LITTLE_ENDIAN:
3949 stl_le_p(ptr, val);
3950 break;
3951 case DEVICE_BIG_ENDIAN:
3952 stl_be_p(ptr, val);
3953 break;
3954 default:
3955 stl_p(ptr, val);
3956 break;
3957 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003958 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00003959 }
3960}
3961
Avi Kivitya8170e52012-10-23 12:30:10 +02003962void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003963{
3964 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3965}
3966
Avi Kivitya8170e52012-10-23 12:30:10 +02003967void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003968{
3969 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3970}
3971
Avi Kivitya8170e52012-10-23 12:30:10 +02003972void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003973{
3974 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
3975}
3976
bellardaab33092005-10-30 20:48:42 +00003977/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02003978void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003979{
3980 uint8_t v = val;
3981 cpu_physical_memory_write(addr, &v, 1);
3982}
3983
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003984/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003985static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003986 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003987{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003988 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003989 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003990
Avi Kivityac1970f2012-10-03 16:22:53 +02003991 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003992
Avi Kivityf3705d52012-03-08 16:16:34 +02003993 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003994 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003995 if (memory_region_is_ram(section->mr)) {
3996 section = &phys_sections[phys_section_rom];
3997 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003998#if defined(TARGET_WORDS_BIGENDIAN)
3999 if (endian == DEVICE_LITTLE_ENDIAN) {
4000 val = bswap16(val);
4001 }
4002#else
4003 if (endian == DEVICE_BIG_ENDIAN) {
4004 val = bswap16(val);
4005 }
4006#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004007 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004008 } else {
4009 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004010 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004011 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004012 /* RAM case */
4013 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004014 switch (endian) {
4015 case DEVICE_LITTLE_ENDIAN:
4016 stw_le_p(ptr, val);
4017 break;
4018 case DEVICE_BIG_ENDIAN:
4019 stw_be_p(ptr, val);
4020 break;
4021 default:
4022 stw_p(ptr, val);
4023 break;
4024 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00004025 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004026 }
bellardaab33092005-10-30 20:48:42 +00004027}
4028
Avi Kivitya8170e52012-10-23 12:30:10 +02004029void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004030{
4031 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4032}
4033
Avi Kivitya8170e52012-10-23 12:30:10 +02004034void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004035{
4036 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4037}
4038
Avi Kivitya8170e52012-10-23 12:30:10 +02004039void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004040{
4041 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4042}
4043
bellardaab33092005-10-30 20:48:42 +00004044/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02004045void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004046{
4047 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004048 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004049}
4050
Avi Kivitya8170e52012-10-23 12:30:10 +02004051void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004052{
4053 val = cpu_to_le64(val);
4054 cpu_physical_memory_write(addr, &val, 8);
4055}
4056
Avi Kivitya8170e52012-10-23 12:30:10 +02004057void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004058{
4059 val = cpu_to_be64(val);
4060 cpu_physical_memory_write(addr, &val, 8);
4061}
4062
aliguori5e2972f2009-03-28 17:51:36 +00004063/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004064int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004065 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004066{
4067 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02004068 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004069 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004070
4071 while (len > 0) {
4072 page = addr & TARGET_PAGE_MASK;
4073 phys_addr = cpu_get_phys_page_debug(env, page);
4074 /* if no physical page mapped, return an error */
4075 if (phys_addr == -1)
4076 return -1;
4077 l = (page + TARGET_PAGE_SIZE) - addr;
4078 if (l > len)
4079 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004080 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004081 if (is_write)
4082 cpu_physical_memory_write_rom(phys_addr, buf, l);
4083 else
aliguori5e2972f2009-03-28 17:51:36 +00004084 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004085 len -= l;
4086 buf += l;
4087 addr += l;
4088 }
4089 return 0;
4090}
Paul Brooka68fe892010-03-01 00:08:59 +00004091#endif
bellard13eb76e2004-01-24 15:23:36 +00004092
pbrook2e70f6e2008-06-29 01:03:05 +00004093/* in deterministic execution mode, instructions doing device I/Os
4094 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004095void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004096{
4097 TranslationBlock *tb;
4098 uint32_t n, cflags;
4099 target_ulong pc, cs_base;
4100 uint64_t flags;
4101
Blue Swirl20503962012-04-09 14:20:20 +00004102 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004103 if (!tb) {
4104 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004105 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004106 }
4107 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004108 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004109 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004110 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004111 n = n - env->icount_decr.u16.low;
4112 /* Generate a new TB ending on the I/O insn. */
4113 n++;
4114 /* On MIPS and SH, delay slot instructions can only be restarted if
4115 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004116 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004117 branch. */
4118#if defined(TARGET_MIPS)
4119 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4120 env->active_tc.PC -= 4;
4121 env->icount_decr.u16.low++;
4122 env->hflags &= ~MIPS_HFLAG_BMASK;
4123 }
4124#elif defined(TARGET_SH4)
4125 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4126 && n > 1) {
4127 env->pc -= 2;
4128 env->icount_decr.u16.low++;
4129 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4130 }
4131#endif
4132 /* This should never happen. */
4133 if (n > CF_COUNT_MASK)
4134 cpu_abort(env, "TB too big during recompile");
4135
4136 cflags = n | CF_LAST_IO;
4137 pc = tb->pc;
4138 cs_base = tb->cs_base;
4139 flags = tb->flags;
4140 tb_phys_invalidate(tb, -1);
4141 /* FIXME: In theory this could raise an exception. In practice
4142 we have already translated the block once so it's probably ok. */
4143 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004144 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004145 the first in the TB) then we end up generating a whole new TB and
4146 repeating the fault, which is horribly inefficient.
4147 Better would be to execute just this insn uncached, or generate a
4148 second new TB. */
4149 cpu_resume_from_signal(env, NULL);
4150}
4151
Paul Brookb3755a92010-03-12 16:54:58 +00004152#if !defined(CONFIG_USER_ONLY)
4153
Stefan Weil055403b2010-10-22 23:03:32 +02004154void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004155{
4156 int i, target_code_size, max_target_code_size;
4157 int direct_jmp_count, direct_jmp2_count, cross_page;
4158 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004159
bellarde3db7222005-01-26 22:00:47 +00004160 target_code_size = 0;
4161 max_target_code_size = 0;
4162 cross_page = 0;
4163 direct_jmp_count = 0;
4164 direct_jmp2_count = 0;
4165 for(i = 0; i < nb_tbs; i++) {
4166 tb = &tbs[i];
4167 target_code_size += tb->size;
4168 if (tb->size > max_target_code_size)
4169 max_target_code_size = tb->size;
4170 if (tb->page_addr[1] != -1)
4171 cross_page++;
4172 if (tb->tb_next_offset[0] != 0xffff) {
4173 direct_jmp_count++;
4174 if (tb->tb_next_offset[1] != 0xffff) {
4175 direct_jmp2_count++;
4176 }
4177 }
4178 }
4179 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004180 cpu_fprintf(f, "Translation buffer state:\n");
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +10004181 cpu_fprintf(f, "gen code size %td/%zd\n",
bellard26a5f132008-05-28 12:30:31 +00004182 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4183 cpu_fprintf(f, "TB count %d/%d\n",
4184 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004185 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004186 nb_tbs ? target_code_size / nb_tbs : 0,
4187 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004188 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004189 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4190 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004191 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4192 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004193 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4194 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004195 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004196 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4197 direct_jmp2_count,
4198 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004199 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004200 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4201 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4202 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004203 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004204}
4205
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004206/*
4207 * A helper function for the _utterly broken_ virtio device model to find out if
4208 * it's running on a big endian machine. Don't do this at home kids!
4209 */
4210bool virtio_is_big_endian(void);
4211bool virtio_is_big_endian(void)
4212{
4213#if defined(TARGET_WORDS_BIGENDIAN)
4214 return true;
4215#else
4216 return false;
4217#endif
4218}
4219
bellard61382a52003-10-27 21:22:23 +00004220#endif
Wen Congyang76f35532012-05-07 12:04:18 +08004221
4222#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02004223bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08004224{
4225 MemoryRegionSection *section;
4226
Avi Kivityac1970f2012-10-03 16:22:53 +02004227 section = phys_page_find(address_space_memory.dispatch,
4228 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08004229
4230 return !(memory_region_is_ram(section->mr) ||
4231 memory_region_is_romd(section->mr));
4232}
4233#endif