blob: 2e56a8ab07ddb8a09fb00d65d8fd87394dbde550 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
Peter Maydell9e119082012-10-29 11:34:32 +100037#include "dma.h"
Avi Kivity62152b82011-07-26 14:26:14 +030038#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000039#if defined(CONFIG_USER_ONLY)
40#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010041#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
42#include <sys/param.h>
43#if __FreeBSD_version >= 700104
44#define HAVE_KINFO_GETVMMAP
45#define sigqueue sigqueue_freebsd /* avoid redefinition */
46#include <sys/time.h>
47#include <sys/proc.h>
48#include <machine/profile.h>
49#define _KERNEL
50#include <sys/user.h>
51#undef _KERNEL
52#undef sigqueue
53#include <libutil.h>
54#endif
55#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010056#else /* !CONFIG_USER_ONLY */
57#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010058#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000059#endif
bellard54936002003-05-13 00:25:15 +000060
Blue Swirl0cac1b62012-04-09 16:50:52 +000061#include "cputlb.h"
62
Avi Kivity7762c2c2012-09-20 16:02:51 +030063#include "memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020064
bellardfd6ce8f2003-05-14 19:00:11 +000065//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000066//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000067//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000068
69/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000070//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
Blue Swirl44209fc2012-12-02 17:25:06 +000082/* Code generation and translation blocks */
blueswir1bdaf78e2008-10-04 07:24:27 +000083static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020084static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000085TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000086static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000087/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050088spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000089
Richard Henderson4438c8a2012-10-16 17:30:13 +100090uint8_t *code_gen_prologue;
blueswir1bdaf78e2008-10-04 07:24:27 +000091static uint8_t *code_gen_buffer;
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +100092static size_t code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +000093/* threshold to flush the translated code buffer */
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +100094static size_t code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +020095static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +000096
pbrooke2eef172008-06-08 01:09:01 +000097#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000098int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000099static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000100
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200101RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300102
103static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300104static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300105
Avi Kivityf6790af2012-10-02 20:13:51 +0200106AddressSpace address_space_io;
107AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +1000108DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +0200109
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200110MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200111static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200112
pbrooke2eef172008-06-08 01:09:01 +0000113#endif
bellard9fa3e852004-01-04 18:06:42 +0000114
Andreas Färber9349b4f2012-03-14 01:38:32 +0100115CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000116/* current CPU in the current thread. It is only valid inside
117 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100118DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000119/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000120 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000121 2 = Adaptive rate instruction counting. */
122int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000123
bellard54936002003-05-13 00:25:15 +0000124typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000125 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000126 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000127 /* in order to optimize self modifying code, we count the number
128 of lookups we do to a given page to use a bitmap */
129 unsigned int code_write_count;
130 uint8_t *code_bitmap;
131#if defined(CONFIG_USER_ONLY)
132 unsigned long flags;
133#endif
bellard54936002003-05-13 00:25:15 +0000134} PageDesc;
135
Paul Brook41c1b1c2010-03-12 16:54:58 +0000136/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800137 while in user mode we want it to be based on virtual addresses. */
138#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000139#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
140# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
141#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800142# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000143#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000144#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800145# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000146#endif
bellard54936002003-05-13 00:25:15 +0000147
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148/* Size of the L2 (and L3, etc) page tables. */
149#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000150#define L2_SIZE (1 << L2_BITS)
151
Avi Kivity3eef53d2012-02-10 14:57:31 +0200152#define P_L2_LEVELS \
153 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
154
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156#define V_L1_BITS_REM \
157 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159#if V_L1_BITS_REM < 4
160#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
161#else
162#define V_L1_BITS V_L1_BITS_REM
163#endif
164
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800165#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
166
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
168
Stefan Weilc6d50672012-03-16 20:23:49 +0100169uintptr_t qemu_real_host_page_size;
170uintptr_t qemu_host_page_size;
171uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000172
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800173/* This is a multi-level map on the virtual address space.
174 The bottom level has pointers to PageDesc. */
175static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000176
pbrooke2eef172008-06-08 01:09:01 +0000177#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200178
Avi Kivity5312bd82012-02-12 18:32:55 +0200179static MemoryRegionSection *phys_sections;
180static unsigned phys_sections_nb, phys_sections_nb_alloc;
181static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200182static uint16_t phys_section_notdirty;
183static uint16_t phys_section_rom;
184static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200185
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200186/* Simple allocator for PhysPageEntry nodes */
187static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
188static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
189
Avi Kivity07f07b32012-02-13 20:45:32 +0200190#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200191
pbrooke2eef172008-06-08 01:09:01 +0000192static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300193static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000194static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000195
Avi Kivity1ec9b902012-01-02 12:47:48 +0200196static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000197#endif
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000198static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
199 tb_page_addr_t phys_page2);
bellard33417e72003-08-10 21:47:01 +0000200
bellarde3db7222005-01-26 22:00:47 +0000201/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000202static int tb_flush_count;
203static int tb_phys_invalidate_count;
204
bellard7cb69ca2008-05-10 10:55:51 +0000205#ifdef _WIN32
Richard Henderson4438c8a2012-10-16 17:30:13 +1000206static inline void map_exec(void *addr, long size)
bellard7cb69ca2008-05-10 10:55:51 +0000207{
208 DWORD old_protect;
209 VirtualProtect(addr, size,
210 PAGE_EXECUTE_READWRITE, &old_protect);
bellard7cb69ca2008-05-10 10:55:51 +0000211}
212#else
Richard Henderson4438c8a2012-10-16 17:30:13 +1000213static inline void map_exec(void *addr, long size)
bellard7cb69ca2008-05-10 10:55:51 +0000214{
bellard43694152008-05-29 09:35:57 +0000215 unsigned long start, end, page_size;
Blue Swirl44209fc2012-12-02 17:25:06 +0000216
bellard43694152008-05-29 09:35:57 +0000217 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000218 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000219 start &= ~(page_size - 1);
Blue Swirl44209fc2012-12-02 17:25:06 +0000220
bellard7cb69ca2008-05-10 10:55:51 +0000221 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000222 end += page_size - 1;
223 end &= ~(page_size - 1);
Blue Swirl44209fc2012-12-02 17:25:06 +0000224
bellard7cb69ca2008-05-10 10:55:51 +0000225 mprotect((void *)start, end - start,
226 PROT_READ | PROT_WRITE | PROT_EXEC);
227}
228#endif
229
bellardb346ff42003-06-15 20:05:50 +0000230static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000231{
bellard83fb7ad2004-07-05 21:25:26 +0000232 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000233 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000234#ifdef _WIN32
235 {
236 SYSTEM_INFO system_info;
237
238 GetSystemInfo(&system_info);
239 qemu_real_host_page_size = system_info.dwPageSize;
240 }
241#else
242 qemu_real_host_page_size = getpagesize();
243#endif
Blue Swirl44209fc2012-12-02 17:25:06 +0000244 if (qemu_host_page_size == 0) {
bellard83fb7ad2004-07-05 21:25:26 +0000245 qemu_host_page_size = qemu_real_host_page_size;
Blue Swirl44209fc2012-12-02 17:25:06 +0000246 }
247 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
bellard83fb7ad2004-07-05 21:25:26 +0000248 qemu_host_page_size = TARGET_PAGE_SIZE;
Blue Swirl44209fc2012-12-02 17:25:06 +0000249 }
bellard83fb7ad2004-07-05 21:25:26 +0000250 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000251
Paul Brook2e9a5712010-05-05 16:32:59 +0100252#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000253 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100254#ifdef HAVE_KINFO_GETVMMAP
255 struct kinfo_vmentry *freep;
256 int i, cnt;
257
258 freep = kinfo_getvmmap(getpid(), &cnt);
259 if (freep) {
260 mmap_lock();
261 for (i = 0; i < cnt; i++) {
262 unsigned long startaddr, endaddr;
263
264 startaddr = freep[i].kve_start;
265 endaddr = freep[i].kve_end;
266 if (h2g_valid(startaddr)) {
267 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
268
269 if (h2g_valid(endaddr)) {
270 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200271 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100272 } else {
273#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
274 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200275 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100276#endif
277 }
278 }
279 }
280 free(freep);
281 mmap_unlock();
282 }
283#else
balrog50a95692007-12-12 01:16:23 +0000284 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000285
pbrook07765902008-05-31 16:33:53 +0000286 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800287
Aurelien Jarnofd436902010-04-10 17:20:36 +0200288 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000289 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800290 mmap_lock();
291
balrog50a95692007-12-12 01:16:23 +0000292 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800293 unsigned long startaddr, endaddr;
294 int n;
295
Blue Swirl44209fc2012-12-02 17:25:06 +0000296 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800297
298 if (n == 2 && h2g_valid(startaddr)) {
299 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
300
301 if (h2g_valid(endaddr)) {
302 endaddr = h2g(endaddr);
303 } else {
304 endaddr = ~0ul;
305 }
306 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000307 }
308 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800309
balrog50a95692007-12-12 01:16:23 +0000310 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800311 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000312 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100313#endif
balrog50a95692007-12-12 01:16:23 +0000314 }
315#endif
bellard54936002003-05-13 00:25:15 +0000316}
317
Paul Brook41c1b1c2010-03-12 16:54:58 +0000318static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000319{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000320 PageDesc *pd;
321 void **lp;
322 int i;
323
pbrook17e23772008-06-09 13:47:45 +0000324#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500325 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800326# define ALLOC(P, SIZE) \
327 do { \
328 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
329 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800330 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000331#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800332# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500333 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000334#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800335
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800336 /* Level 1. Always allocated. */
337 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
338
339 /* Level 2..N-1. */
340 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
341 void **p = *lp;
342
343 if (p == NULL) {
344 if (!alloc) {
345 return NULL;
346 }
347 ALLOC(p, sizeof(void *) * L2_SIZE);
348 *lp = p;
349 }
350
351 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000352 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353
354 pd = *lp;
355 if (pd == NULL) {
356 if (!alloc) {
357 return NULL;
358 }
359 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
360 *lp = pd;
361 }
362
363#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800364
365 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000366}
367
Paul Brook41c1b1c2010-03-12 16:54:58 +0000368static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000369{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800370 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000371}
372
Paul Brook6d9a1302010-02-28 23:55:53 +0000373#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200374
Avi Kivityf7bf5462012-02-13 20:12:05 +0200375static void phys_map_node_reserve(unsigned nodes)
376{
377 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
378 typedef PhysPageEntry Node[L2_SIZE];
379 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
380 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
381 phys_map_nodes_nb + nodes);
382 phys_map_nodes = g_renew(Node, phys_map_nodes,
383 phys_map_nodes_nb_alloc);
384 }
385}
386
387static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200388{
389 unsigned i;
390 uint16_t ret;
391
Avi Kivityf7bf5462012-02-13 20:12:05 +0200392 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200393 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200394 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200395 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200396 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200397 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200398 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200399 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200400}
401
402static void phys_map_nodes_reset(void)
403{
404 phys_map_nodes_nb = 0;
405}
406
Avi Kivityf7bf5462012-02-13 20:12:05 +0200407
Avi Kivitya8170e52012-10-23 12:30:10 +0200408static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
409 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200410 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200411{
412 PhysPageEntry *p;
413 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200414 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200415
Avi Kivity07f07b32012-02-13 20:45:32 +0200416 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200417 lp->ptr = phys_map_node_alloc();
418 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200419 if (level == 0) {
420 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200421 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200422 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200423 }
424 }
425 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200426 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200427 }
Avi Kivity29990972012-02-13 20:21:20 +0200428 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200429
Avi Kivity29990972012-02-13 20:21:20 +0200430 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200431 if ((*index & (step - 1)) == 0 && *nb >= step) {
432 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200433 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200434 *index += step;
435 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200436 } else {
437 phys_page_set_level(lp, index, nb, leaf, level - 1);
438 }
439 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200440 }
441}
442
Avi Kivityac1970f2012-10-03 16:22:53 +0200443static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200444 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200445 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000446{
Avi Kivity29990972012-02-13 20:21:20 +0200447 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200448 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000449
Avi Kivityac1970f2012-10-03 16:22:53 +0200450 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000451}
452
Avi Kivitya8170e52012-10-23 12:30:10 +0200453MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000454{
Avi Kivityac1970f2012-10-03 16:22:53 +0200455 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200456 PhysPageEntry *p;
457 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200458 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200459
Avi Kivity07f07b32012-02-13 20:45:32 +0200460 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200461 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200462 goto not_found;
463 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200464 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200465 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200466 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200467
Avi Kivityc19e8802012-02-13 20:25:31 +0200468 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200469not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200470 return &phys_sections[s_index];
471}
472
Blue Swirle5548612012-04-21 13:08:33 +0000473bool memory_region_is_unassigned(MemoryRegion *mr)
474{
475 return mr != &io_mem_ram && mr != &io_mem_rom
476 && mr != &io_mem_notdirty && !mr->rom_device
477 && mr != &io_mem_watch;
478}
479
pbrookc8a706f2008-06-02 16:16:42 +0000480#define mmap_lock() do { } while(0)
481#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000482#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000483
bellard43694152008-05-29 09:35:57 +0000484#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100485/* Currently it is not recommended to allocate big chunks of data in
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000486 user mode. It will change when a dedicated libc will be used. */
487/* ??? 64-bit hosts ought to have no problem mmaping data outside the
488 region in which the guest needs to run. Revisit this. */
bellard43694152008-05-29 09:35:57 +0000489#define USE_STATIC_CODE_GEN_BUFFER
490#endif
491
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000492/* ??? Should configure for this, not list operating systems here. */
493#if (defined(__linux__) \
494 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
495 || defined(__DragonFly__) || defined(__OpenBSD__) \
496 || defined(__NetBSD__))
497# define USE_MMAP
498#endif
499
Richard Henderson74d590c2012-10-16 17:30:14 +1000500/* Minimum size of the code gen buffer. This number is randomly chosen,
501 but not so small that we can't have a fair number of TB's live. */
502#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
503
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000504/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
505 indicated, this is constrained by the range of direct branches on the
506 host cpu, as used by the TCG implementation of goto_tb. */
507#if defined(__x86_64__)
508# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
509#elif defined(__sparc__)
510# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
511#elif defined(__arm__)
512# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
513#elif defined(__s390x__)
514 /* We have a +- 4GB range on the branches; leave some slop. */
515# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
516#else
517# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
518#endif
519
Richard Henderson3d85a722012-10-16 17:30:11 +1000520#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
521
522#define DEFAULT_CODE_GEN_BUFFER_SIZE \
523 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
524 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000525
526static inline size_t size_code_gen_buffer(size_t tb_size)
527{
528 /* Size the buffer. */
529 if (tb_size == 0) {
530#ifdef USE_STATIC_CODE_GEN_BUFFER
531 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
532#else
533 /* ??? Needs adjustments. */
534 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
535 static buffer, we could size this on RESERVED_VA, on the text
536 segment size of the executable, or continue to use the default. */
537 tb_size = (unsigned long)(ram_size / 4);
538#endif
539 }
540 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
541 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
542 }
543 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
544 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
545 }
546 code_gen_buffer_size = tb_size;
547 return tb_size;
548}
549
bellard43694152008-05-29 09:35:57 +0000550#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200551static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000552 __attribute__((aligned(CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000553
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000554static inline void *alloc_code_gen_buffer(void)
bellard26a5f132008-05-28 12:30:31 +0000555{
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000556 map_exec(static_code_gen_buffer, code_gen_buffer_size);
557 return static_code_gen_buffer;
558}
559#elif defined(USE_MMAP)
560static inline void *alloc_code_gen_buffer(void)
561{
562 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
563 uintptr_t start = 0;
564 void *buf;
blueswir1141ac462008-07-26 15:05:57 +0000565
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000566 /* Constrain the position of the buffer based on the host cpu.
567 Note that these addresses are chosen in concert with the
568 addresses assigned in the relevant linker script file. */
Richard Henderson405def12012-10-16 17:30:12 +1000569# if defined(__PIE__) || defined(__PIC__)
570 /* Don't bother setting a preferred location if we're building
571 a position-independent executable. We're more likely to get
572 an address near the main executable if we let the kernel
573 choose the address. */
574# elif defined(__x86_64__) && defined(MAP_32BIT)
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000575 /* Force the memory down into low memory with the executable.
576 Leave the choice of exact location with the kernel. */
577 flags |= MAP_32BIT;
578 /* Cannot expect to map more than 800MB in low memory. */
579 if (code_gen_buffer_size > 800u * 1024 * 1024) {
580 code_gen_buffer_size = 800u * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000581 }
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000582# elif defined(__sparc__)
583 start = 0x40000000ul;
584# elif defined(__s390x__)
585 start = 0x90000000ul;
586# endif
587
588 buf = mmap((void *)start, code_gen_buffer_size,
589 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
590 return buf == MAP_FAILED ? NULL : buf;
591}
bellard26a5f132008-05-28 12:30:31 +0000592#else
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000593static inline void *alloc_code_gen_buffer(void)
594{
595 void *buf = g_malloc(code_gen_buffer_size);
Blue Swirl44209fc2012-12-02 17:25:06 +0000596
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000597 if (buf) {
598 map_exec(buf, code_gen_buffer_size);
599 }
600 return buf;
601}
602#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
603
604static inline void code_gen_alloc(size_t tb_size)
605{
606 code_gen_buffer_size = size_code_gen_buffer(tb_size);
607 code_gen_buffer = alloc_code_gen_buffer();
608 if (code_gen_buffer == NULL) {
609 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
610 exit(1);
611 }
612
Richard Henderson0be48352012-11-02 09:20:46 +1100613 qemu_madvise(code_gen_buffer, code_gen_buffer_size, QEMU_MADV_HUGEPAGE);
614
Richard Henderson4438c8a2012-10-16 17:30:13 +1000615 /* Steal room for the prologue at the end of the buffer. This ensures
616 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
617 from TB's to the prologue are going to be in range. It also means
618 that we don't need to mark (additional) portions of the data segment
619 as executable. */
620 code_gen_prologue = code_gen_buffer + code_gen_buffer_size - 1024;
621 code_gen_buffer_size -= 1024;
622
Peter Maydella884da82011-06-22 11:58:25 +0100623 code_gen_buffer_max_size = code_gen_buffer_size -
624 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000625 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500626 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000627}
628
629/* Must be called before using the QEMU cpus. 'tb_size' is the size
630 (in bytes) allocated to the translation buffer. Zero means default
631 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200632void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000633{
bellard26a5f132008-05-28 12:30:31 +0000634 cpu_gen_init();
635 code_gen_alloc(tb_size);
636 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700637 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000638 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700639#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
640 /* There's no guest base to take into account, so go ahead and
641 initialize the prologue now. */
642 tcg_prologue_init(&tcg_ctx);
643#endif
bellard26a5f132008-05-28 12:30:31 +0000644}
645
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200646bool tcg_enabled(void)
647{
648 return code_gen_buffer != NULL;
649}
650
651void cpu_exec_init_all(void)
652{
653#if !defined(CONFIG_USER_ONLY)
654 memory_map_init();
655 io_mem_init();
656#endif
657}
658
pbrook9656f322008-07-01 20:01:19 +0000659#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
660
Juan Quintelae59fb372009-09-29 22:48:21 +0200661static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200662{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100663 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200664
aurel323098dba2009-03-07 21:28:24 +0000665 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
666 version_id is increased. */
667 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000668 tlb_flush(env, 1);
669
670 return 0;
671}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200672
673static const VMStateDescription vmstate_cpu_common = {
674 .name = "cpu_common",
675 .version_id = 1,
676 .minimum_version_id = 1,
677 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200678 .post_load = cpu_common_post_load,
679 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100680 VMSTATE_UINT32(halted, CPUArchState),
681 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200682 VMSTATE_END_OF_LIST()
683 }
684};
pbrook9656f322008-07-01 20:01:19 +0000685#endif
686
Andreas Färber9349b4f2012-03-14 01:38:32 +0100687CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400688{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100689 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400690
691 while (env) {
692 if (env->cpu_index == cpu)
693 break;
694 env = env->next_cpu;
695 }
696
697 return env;
698}
699
Andreas Färber9349b4f2012-03-14 01:38:32 +0100700void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000701{
Andreas Färber9f09e182012-05-03 06:59:07 +0200702#ifndef CONFIG_USER_ONLY
703 CPUState *cpu = ENV_GET_CPU(env);
704#endif
Andreas Färber9349b4f2012-03-14 01:38:32 +0100705 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000706 int cpu_index;
707
pbrookc2764712009-03-07 15:24:59 +0000708#if defined(CONFIG_USER_ONLY)
709 cpu_list_lock();
710#endif
bellard6a00d602005-11-21 23:25:50 +0000711 env->next_cpu = NULL;
712 penv = &first_cpu;
713 cpu_index = 0;
714 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700715 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000716 cpu_index++;
717 }
718 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000719 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000720 QTAILQ_INIT(&env->breakpoints);
721 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100722#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200723 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100724#endif
bellard6a00d602005-11-21 23:25:50 +0000725 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000726#if defined(CONFIG_USER_ONLY)
727 cpu_list_unlock();
728#endif
pbrookb3c77242008-06-30 16:31:04 +0000729#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600730 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
731 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000732 cpu_save, cpu_load, env);
733#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000734}
735
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100736/* Allocate a new translation block. Flush the translation buffer if
737 too many translation blocks or too much generated code. */
738static TranslationBlock *tb_alloc(target_ulong pc)
739{
740 TranslationBlock *tb;
741
742 if (nb_tbs >= code_gen_max_blocks ||
Blue Swirl44209fc2012-12-02 17:25:06 +0000743 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size) {
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100744 return NULL;
Blue Swirl44209fc2012-12-02 17:25:06 +0000745 }
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100746 tb = &tbs[nb_tbs++];
747 tb->pc = pc;
748 tb->cflags = 0;
749 return tb;
750}
751
752void tb_free(TranslationBlock *tb)
753{
754 /* In practice this is mostly used for single use temporary TB
755 Ignore the hard cases and just back up if this TB happens to
756 be the last one generated. */
757 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
758 code_gen_ptr = tb->tc_ptr;
759 nb_tbs--;
760 }
761}
762
bellard9fa3e852004-01-04 18:06:42 +0000763static inline void invalidate_page_bitmap(PageDesc *p)
764{
765 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500766 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000767 p->code_bitmap = NULL;
768 }
769 p->code_write_count = 0;
770}
771
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800772/* Set to NULL all the 'first_tb' fields in all PageDescs. */
Blue Swirl44209fc2012-12-02 17:25:06 +0000773static void page_flush_tb_1(int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800774{
775 int i;
776
777 if (*lp == NULL) {
778 return;
779 }
780 if (level == 0) {
781 PageDesc *pd = *lp;
Blue Swirl44209fc2012-12-02 17:25:06 +0000782
Paul Brook7296aba2010-03-14 14:58:46 +0000783 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800784 pd[i].first_tb = NULL;
785 invalidate_page_bitmap(pd + i);
786 }
787 } else {
788 void **pp = *lp;
Blue Swirl44209fc2012-12-02 17:25:06 +0000789
Paul Brook7296aba2010-03-14 14:58:46 +0000790 for (i = 0; i < L2_SIZE; ++i) {
Blue Swirl44209fc2012-12-02 17:25:06 +0000791 page_flush_tb_1(level - 1, pp + i);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800792 }
793 }
794}
795
bellardfd6ce8f2003-05-14 19:00:11 +0000796static void page_flush_tb(void)
797{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800798 int i;
Blue Swirl44209fc2012-12-02 17:25:06 +0000799
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800800 for (i = 0; i < V_L1_SIZE; i++) {
801 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000802 }
803}
804
805/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000806/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100807void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000808{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100809 CPUArchState *env;
Blue Swirl44209fc2012-12-02 17:25:06 +0000810
bellard01243112004-01-04 15:48:17 +0000811#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000812 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
813 (unsigned long)(code_gen_ptr - code_gen_buffer),
814 nb_tbs, nb_tbs > 0 ?
815 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000816#endif
Blue Swirl44209fc2012-12-02 17:25:06 +0000817 if ((unsigned long)(code_gen_ptr - code_gen_buffer)
818 > code_gen_buffer_size) {
pbrooka208e542008-03-31 17:07:36 +0000819 cpu_abort(env1, "Internal error: code buffer overflow\n");
Blue Swirl44209fc2012-12-02 17:25:06 +0000820 }
bellardfd6ce8f2003-05-14 19:00:11 +0000821 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000822
Blue Swirl44209fc2012-12-02 17:25:06 +0000823 for (env = first_cpu; env != NULL; env = env->next_cpu) {
824 memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *));
bellard6a00d602005-11-21 23:25:50 +0000825 }
bellard9fa3e852004-01-04 18:06:42 +0000826
Blue Swirl44209fc2012-12-02 17:25:06 +0000827 memset(tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof(void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000828 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000829
bellardfd6ce8f2003-05-14 19:00:11 +0000830 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000831 /* XXX: flush processor icache at this point if cache flush is
832 expensive */
bellarde3db7222005-01-26 22:00:47 +0000833 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000834}
835
836#ifdef DEBUG_TB_CHECK
837
j_mayerbc98a7e2007-04-04 07:55:12 +0000838static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000839{
840 TranslationBlock *tb;
841 int i;
Blue Swirl44209fc2012-12-02 17:25:06 +0000842
bellardfd6ce8f2003-05-14 19:00:11 +0000843 address &= TARGET_PAGE_MASK;
Blue Swirl44209fc2012-12-02 17:25:06 +0000844 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
845 for (tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000846 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
847 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000848 printf("ERROR invalidate: address=" TARGET_FMT_lx
849 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000850 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000851 }
852 }
853 }
854}
855
856/* verify that all the pages have correct rights for code */
857static void tb_page_check(void)
858{
859 TranslationBlock *tb;
860 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000861
Blue Swirl44209fc2012-12-02 17:25:06 +0000862 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
863 for (tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000864 flags1 = page_get_flags(tb->pc);
865 flags2 = page_get_flags(tb->pc + tb->size - 1);
866 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
867 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000868 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000869 }
870 }
871 }
872}
873
874#endif
875
Blue Swirl44209fc2012-12-02 17:25:06 +0000876
bellardfd6ce8f2003-05-14 19:00:11 +0000877/* invalidate one TB */
878static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
879 int next_offset)
880{
881 TranslationBlock *tb1;
Blue Swirl44209fc2012-12-02 17:25:06 +0000882
883 for (;;) {
bellardfd6ce8f2003-05-14 19:00:11 +0000884 tb1 = *ptb;
885 if (tb1 == tb) {
886 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
887 break;
888 }
889 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
890 }
891}
892
bellard9fa3e852004-01-04 18:06:42 +0000893static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
894{
895 TranslationBlock *tb1;
896 unsigned int n1;
897
Blue Swirl44209fc2012-12-02 17:25:06 +0000898 for (;;) {
bellard9fa3e852004-01-04 18:06:42 +0000899 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200900 n1 = (uintptr_t)tb1 & 3;
901 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000902 if (tb1 == tb) {
903 *ptb = tb1->page_next[n1];
904 break;
905 }
906 ptb = &tb1->page_next[n1];
907 }
908}
909
bellardd4e81642003-05-25 16:46:15 +0000910static inline void tb_jmp_remove(TranslationBlock *tb, int n)
911{
912 TranslationBlock *tb1, **ptb;
913 unsigned int n1;
914
915 ptb = &tb->jmp_next[n];
916 tb1 = *ptb;
917 if (tb1) {
918 /* find tb(n) in circular list */
Blue Swirl44209fc2012-12-02 17:25:06 +0000919 for (;;) {
bellardd4e81642003-05-25 16:46:15 +0000920 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200921 n1 = (uintptr_t)tb1 & 3;
922 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
Blue Swirl44209fc2012-12-02 17:25:06 +0000923 if (n1 == n && tb1 == tb) {
bellardd4e81642003-05-25 16:46:15 +0000924 break;
Blue Swirl44209fc2012-12-02 17:25:06 +0000925 }
bellardd4e81642003-05-25 16:46:15 +0000926 if (n1 == 2) {
927 ptb = &tb1->jmp_first;
928 } else {
929 ptb = &tb1->jmp_next[n1];
930 }
931 }
932 /* now we can suppress tb(n) from the list */
933 *ptb = tb->jmp_next[n];
934
935 tb->jmp_next[n] = NULL;
936 }
937}
938
939/* reset the jump entry 'n' of a TB so that it is not chained to
940 another TB */
941static inline void tb_reset_jump(TranslationBlock *tb, int n)
942{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200943 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000944}
945
Paul Brook41c1b1c2010-03-12 16:54:58 +0000946void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000947{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100948 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000949 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000950 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000951 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000952 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000953
bellard9fa3e852004-01-04 18:06:42 +0000954 /* remove the TB from the hash list */
955 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
956 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000957 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000958 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000959
bellard9fa3e852004-01-04 18:06:42 +0000960 /* remove the TB from the page list */
961 if (tb->page_addr[0] != page_addr) {
962 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
963 tb_page_remove(&p->first_tb, tb);
964 invalidate_page_bitmap(p);
965 }
966 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
967 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
968 tb_page_remove(&p->first_tb, tb);
969 invalidate_page_bitmap(p);
970 }
971
bellard8a40a182005-11-20 10:35:40 +0000972 tb_invalidated_flag = 1;
973
974 /* remove the TB from the hash list */
975 h = tb_jmp_cache_hash_func(tb->pc);
Blue Swirl44209fc2012-12-02 17:25:06 +0000976 for (env = first_cpu; env != NULL; env = env->next_cpu) {
977 if (env->tb_jmp_cache[h] == tb) {
bellard6a00d602005-11-21 23:25:50 +0000978 env->tb_jmp_cache[h] = NULL;
Blue Swirl44209fc2012-12-02 17:25:06 +0000979 }
bellard6a00d602005-11-21 23:25:50 +0000980 }
bellard8a40a182005-11-20 10:35:40 +0000981
982 /* suppress this TB from the two jump lists */
983 tb_jmp_remove(tb, 0);
984 tb_jmp_remove(tb, 1);
985
986 /* suppress any remaining jumps to this TB */
987 tb1 = tb->jmp_first;
Blue Swirl44209fc2012-12-02 17:25:06 +0000988 for (;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200989 n1 = (uintptr_t)tb1 & 3;
Blue Swirl44209fc2012-12-02 17:25:06 +0000990 if (n1 == 2) {
bellard8a40a182005-11-20 10:35:40 +0000991 break;
Blue Swirl44209fc2012-12-02 17:25:06 +0000992 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200993 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000994 tb2 = tb1->jmp_next[n1];
995 tb_reset_jump(tb1, n1);
996 tb1->jmp_next[n1] = NULL;
997 tb1 = tb2;
998 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200999 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +00001000
bellarde3db7222005-01-26 22:00:47 +00001001 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +00001002}
1003
1004static inline void set_bits(uint8_t *tab, int start, int len)
1005{
1006 int end, mask, end1;
1007
1008 end = start + len;
1009 tab += start >> 3;
1010 mask = 0xff << (start & 7);
1011 if ((start & ~7) == (end & ~7)) {
1012 if (start < end) {
1013 mask &= ~(0xff << (end & 7));
1014 *tab |= mask;
1015 }
1016 } else {
1017 *tab++ |= mask;
1018 start = (start + 8) & ~7;
1019 end1 = end & ~7;
1020 while (start < end1) {
1021 *tab++ = 0xff;
1022 start += 8;
1023 }
1024 if (start < end) {
1025 mask = ~(0xff << (end & 7));
1026 *tab |= mask;
1027 }
1028 }
1029}
1030
1031static void build_page_bitmap(PageDesc *p)
1032{
1033 int n, tb_start, tb_end;
1034 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001035
Anthony Liguori7267c092011-08-20 22:09:37 -05001036 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001037
1038 tb = p->first_tb;
1039 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001040 n = (uintptr_t)tb & 3;
1041 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001042 /* NOTE: this is subtle as a TB may span two physical pages */
1043 if (n == 0) {
1044 /* NOTE: tb_end may be after the end of the page, but
1045 it is not a problem */
1046 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1047 tb_end = tb_start + tb->size;
Blue Swirl44209fc2012-12-02 17:25:06 +00001048 if (tb_end > TARGET_PAGE_SIZE) {
bellard9fa3e852004-01-04 18:06:42 +00001049 tb_end = TARGET_PAGE_SIZE;
Blue Swirl44209fc2012-12-02 17:25:06 +00001050 }
bellard9fa3e852004-01-04 18:06:42 +00001051 } else {
1052 tb_start = 0;
1053 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1054 }
1055 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1056 tb = tb->page_next[n];
1057 }
1058}
1059
Andreas Färber9349b4f2012-03-14 01:38:32 +01001060TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001061 target_ulong pc, target_ulong cs_base,
1062 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001063{
1064 TranslationBlock *tb;
1065 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001066 tb_page_addr_t phys_pc, phys_page2;
1067 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001068 int code_gen_size;
1069
Paul Brook41c1b1c2010-03-12 16:54:58 +00001070 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001071 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001072 if (!tb) {
1073 /* flush must be done */
1074 tb_flush(env);
1075 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001076 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001077 /* Don't forget to invalidate previous TB info. */
1078 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001079 }
1080 tc_ptr = code_gen_ptr;
1081 tb->tc_ptr = tc_ptr;
1082 tb->cs_base = cs_base;
1083 tb->flags = flags;
1084 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001085 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001086 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1087 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001088
bellardd720b932004-04-25 17:57:43 +00001089 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001090 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001091 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001092 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001093 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001094 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001095 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001096 return tb;
bellardd720b932004-04-25 17:57:43 +00001097}
ths3b46e622007-09-17 08:09:54 +00001098
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001099/*
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001100 * Invalidate all TBs which intersect with the target physical address range
1101 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1102 * 'is_cpu_write_access' should be true if called from a real cpu write
1103 * access: the virtual CPU will exit the current TB if code is modified inside
1104 * this TB.
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001105 */
1106void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1107 int is_cpu_write_access)
1108{
1109 while (start < end) {
1110 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1111 start &= TARGET_PAGE_MASK;
1112 start += TARGET_PAGE_SIZE;
1113 }
1114}
1115
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001116/*
1117 * Invalidate all TBs which intersect with the target physical address range
1118 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1119 * 'is_cpu_write_access' should be true if called from a real cpu write
1120 * access: the virtual CPU will exit the current TB if code is modified inside
1121 * this TB.
1122 */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001123void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001124 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001125{
aliguori6b917542008-11-18 19:46:41 +00001126 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001127 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001128 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001129 PageDesc *p;
1130 int n;
1131#ifdef TARGET_HAS_PRECISE_SMC
1132 int current_tb_not_found = is_cpu_write_access;
1133 TranslationBlock *current_tb = NULL;
1134 int current_tb_modified = 0;
1135 target_ulong current_pc = 0;
1136 target_ulong current_cs_base = 0;
1137 int current_flags = 0;
1138#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001139
1140 p = page_find(start >> TARGET_PAGE_BITS);
Blue Swirl44209fc2012-12-02 17:25:06 +00001141 if (!p) {
bellard9fa3e852004-01-04 18:06:42 +00001142 return;
Blue Swirl44209fc2012-12-02 17:25:06 +00001143 }
ths5fafdf22007-09-16 21:08:06 +00001144 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001145 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1146 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001147 /* build code bitmap */
1148 build_page_bitmap(p);
1149 }
1150
1151 /* we remove all the TBs in the range [start, end[ */
Blue Swirl44209fc2012-12-02 17:25:06 +00001152 /* XXX: see if in some cases it could be faster to invalidate all
1153 the code */
bellard9fa3e852004-01-04 18:06:42 +00001154 tb = p->first_tb;
1155 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001156 n = (uintptr_t)tb & 3;
1157 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001158 tb_next = tb->page_next[n];
1159 /* NOTE: this is subtle as a TB may span two physical pages */
1160 if (n == 0) {
1161 /* NOTE: tb_end may be after the end of the page, but
1162 it is not a problem */
1163 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1164 tb_end = tb_start + tb->size;
1165 } else {
1166 tb_start = tb->page_addr[1];
1167 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1168 }
1169 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001170#ifdef TARGET_HAS_PRECISE_SMC
1171 if (current_tb_not_found) {
1172 current_tb_not_found = 0;
1173 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001174 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001175 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001176 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001177 }
1178 }
1179 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001180 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001181 /* If we are modifying the current TB, we must stop
1182 its execution. We could be more precise by checking
1183 that the modification is after the current PC, but it
1184 would require a specialized function to partially
1185 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001186
bellardd720b932004-04-25 17:57:43 +00001187 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001188 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001189 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1190 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001191 }
1192#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001193 /* we need to do that to handle the case where a signal
1194 occurs while doing tb_phys_invalidate() */
1195 saved_tb = NULL;
1196 if (env) {
1197 saved_tb = env->current_tb;
1198 env->current_tb = NULL;
1199 }
bellard9fa3e852004-01-04 18:06:42 +00001200 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001201 if (env) {
1202 env->current_tb = saved_tb;
Blue Swirl44209fc2012-12-02 17:25:06 +00001203 if (env->interrupt_request && env->current_tb) {
bellard6f5a9f72005-11-26 20:12:28 +00001204 cpu_interrupt(env, env->interrupt_request);
Blue Swirl44209fc2012-12-02 17:25:06 +00001205 }
bellard6f5a9f72005-11-26 20:12:28 +00001206 }
bellard9fa3e852004-01-04 18:06:42 +00001207 }
1208 tb = tb_next;
1209 }
1210#if !defined(CONFIG_USER_ONLY)
1211 /* if no code remaining, no need to continue to use slow writes */
1212 if (!p->first_tb) {
1213 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001214 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001215 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001216 }
1217 }
1218#endif
1219#ifdef TARGET_HAS_PRECISE_SMC
1220 if (current_tb_modified) {
1221 /* we generate a block containing just the instruction
1222 modifying the memory. It will ensure that it cannot modify
1223 itself */
bellardea1c1802004-06-14 18:56:36 +00001224 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001225 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001226 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001227 }
1228#endif
1229}
1230
1231/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001232static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001233{
1234 PageDesc *p;
1235 int offset, b;
Blue Swirl44209fc2012-12-02 17:25:06 +00001236
bellard59817cc2004-02-16 22:01:13 +00001237#if 0
bellarda4193c82004-06-03 14:01:43 +00001238 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001239 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1240 cpu_single_env->mem_io_vaddr, len,
1241 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001242 cpu_single_env->eip +
1243 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001244 }
1245#endif
bellard9fa3e852004-01-04 18:06:42 +00001246 p = page_find(start >> TARGET_PAGE_BITS);
Blue Swirl44209fc2012-12-02 17:25:06 +00001247 if (!p) {
bellard9fa3e852004-01-04 18:06:42 +00001248 return;
Blue Swirl44209fc2012-12-02 17:25:06 +00001249 }
bellard9fa3e852004-01-04 18:06:42 +00001250 if (p->code_bitmap) {
1251 offset = start & ~TARGET_PAGE_MASK;
1252 b = p->code_bitmap[offset >> 3] >> (offset & 7);
Blue Swirl44209fc2012-12-02 17:25:06 +00001253 if (b & ((1 << len) - 1)) {
bellard9fa3e852004-01-04 18:06:42 +00001254 goto do_invalidate;
Blue Swirl44209fc2012-12-02 17:25:06 +00001255 }
bellard9fa3e852004-01-04 18:06:42 +00001256 } else {
1257 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001258 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001259 }
1260}
1261
bellard9fa3e852004-01-04 18:06:42 +00001262#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001263static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001264 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001265{
aliguori6b917542008-11-18 19:46:41 +00001266 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001267 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001268 int n;
bellardd720b932004-04-25 17:57:43 +00001269#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001270 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001271 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001272 int current_tb_modified = 0;
1273 target_ulong current_pc = 0;
1274 target_ulong current_cs_base = 0;
1275 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001276#endif
bellard9fa3e852004-01-04 18:06:42 +00001277
1278 addr &= TARGET_PAGE_MASK;
1279 p = page_find(addr >> TARGET_PAGE_BITS);
Blue Swirl44209fc2012-12-02 17:25:06 +00001280 if (!p) {
bellardfd6ce8f2003-05-14 19:00:11 +00001281 return;
Blue Swirl44209fc2012-12-02 17:25:06 +00001282 }
bellardfd6ce8f2003-05-14 19:00:11 +00001283 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001284#ifdef TARGET_HAS_PRECISE_SMC
1285 if (tb && pc != 0) {
1286 current_tb = tb_find_pc(pc);
1287 }
1288#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001289 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001290 n = (uintptr_t)tb & 3;
1291 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001292#ifdef TARGET_HAS_PRECISE_SMC
1293 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001294 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001295 /* If we are modifying the current TB, we must stop
1296 its execution. We could be more precise by checking
1297 that the modification is after the current PC, but it
1298 would require a specialized function to partially
1299 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001300
bellardd720b932004-04-25 17:57:43 +00001301 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001302 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001303 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1304 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001305 }
1306#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001307 tb_phys_invalidate(tb, addr);
1308 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001309 }
1310 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001311#ifdef TARGET_HAS_PRECISE_SMC
1312 if (current_tb_modified) {
1313 /* we generate a block containing just the instruction
1314 modifying the memory. It will ensure that it cannot modify
1315 itself */
bellardea1c1802004-06-14 18:56:36 +00001316 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001317 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001318 cpu_resume_from_signal(env, puc);
1319 }
1320#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001321}
bellard9fa3e852004-01-04 18:06:42 +00001322#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001323
1324/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001325static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001326 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001327{
1328 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001329#ifndef CONFIG_USER_ONLY
1330 bool page_already_protected;
1331#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001332
bellard9fa3e852004-01-04 18:06:42 +00001333 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001334 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001335 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001336#ifndef CONFIG_USER_ONLY
1337 page_already_protected = p->first_tb != NULL;
1338#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001339 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001340 invalidate_page_bitmap(p);
1341
bellard107db442004-06-22 18:48:46 +00001342#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001343
bellard9fa3e852004-01-04 18:06:42 +00001344#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001345 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001346 target_ulong addr;
1347 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001348 int prot;
1349
bellardfd6ce8f2003-05-14 19:00:11 +00001350 /* force the host page as non writable (writes will have a
1351 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001352 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001353 prot = 0;
Blue Swirl44209fc2012-12-02 17:25:06 +00001354 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
pbrook53a59602006-03-25 19:31:22 +00001355 addr += TARGET_PAGE_SIZE) {
1356
Blue Swirl44209fc2012-12-02 17:25:06 +00001357 p2 = page_find(addr >> TARGET_PAGE_BITS);
1358 if (!p2) {
pbrook53a59602006-03-25 19:31:22 +00001359 continue;
Blue Swirl44209fc2012-12-02 17:25:06 +00001360 }
pbrook53a59602006-03-25 19:31:22 +00001361 prot |= p2->flags;
1362 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001363 }
ths5fafdf22007-09-16 21:08:06 +00001364 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001365 (prot & PAGE_BITS) & ~PAGE_WRITE);
1366#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001367 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001368 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001369#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001370 }
bellard9fa3e852004-01-04 18:06:42 +00001371#else
1372 /* if some code is already present, then the pages are already
1373 protected. So we handle the case where only the first TB is
1374 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001375 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001376 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001377 }
1378#endif
bellardd720b932004-04-25 17:57:43 +00001379
1380#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001381}
1382
bellard9fa3e852004-01-04 18:06:42 +00001383/* add a new TB and link it to the physical page tables. phys_page2 is
1384 (-1) to indicate that only one page contains the TB. */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001385static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1386 tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001387{
bellard9fa3e852004-01-04 18:06:42 +00001388 unsigned int h;
1389 TranslationBlock **ptb;
1390
pbrookc8a706f2008-06-02 16:16:42 +00001391 /* Grab the mmap lock to stop another thread invalidating this TB
1392 before we are done. */
1393 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001394 /* add in the physical hash table */
1395 h = tb_phys_hash_func(phys_pc);
1396 ptb = &tb_phys_hash[h];
1397 tb->phys_hash_next = *ptb;
1398 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001399
1400 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001401 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
Blue Swirl44209fc2012-12-02 17:25:06 +00001402 if (phys_page2 != -1) {
bellard9fa3e852004-01-04 18:06:42 +00001403 tb_alloc_page(tb, 1, phys_page2);
Blue Swirl44209fc2012-12-02 17:25:06 +00001404 } else {
bellard9fa3e852004-01-04 18:06:42 +00001405 tb->page_addr[1] = -1;
Blue Swirl44209fc2012-12-02 17:25:06 +00001406 }
bellard9fa3e852004-01-04 18:06:42 +00001407
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001408 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001409 tb->jmp_next[0] = NULL;
1410 tb->jmp_next[1] = NULL;
1411
1412 /* init original jump addresses */
Blue Swirl44209fc2012-12-02 17:25:06 +00001413 if (tb->tb_next_offset[0] != 0xffff) {
bellardd4e81642003-05-25 16:46:15 +00001414 tb_reset_jump(tb, 0);
Blue Swirl44209fc2012-12-02 17:25:06 +00001415 }
1416 if (tb->tb_next_offset[1] != 0xffff) {
bellardd4e81642003-05-25 16:46:15 +00001417 tb_reset_jump(tb, 1);
Blue Swirl44209fc2012-12-02 17:25:06 +00001418 }
bellard8a40a182005-11-20 10:35:40 +00001419
1420#ifdef DEBUG_TB_CHECK
1421 tb_page_check();
1422#endif
pbrookc8a706f2008-06-02 16:16:42 +00001423 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001424}
1425
Yeongkyoon Leefdbb84d2012-10-31 16:04:24 +09001426#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
1427/* check whether the given addr is in TCG generated code buffer or not */
1428bool is_tcg_gen_code(uintptr_t tc_ptr)
1429{
1430 /* This can be called during code generation, code_gen_buffer_max_size
1431 is used instead of code_gen_ptr for upper boundary checking */
1432 return (tc_ptr >= (uintptr_t)code_gen_buffer &&
1433 tc_ptr < (uintptr_t)(code_gen_buffer + code_gen_buffer_max_size));
1434}
1435#endif
1436
bellarda513fe12003-05-27 23:29:48 +00001437/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1438 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001439TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001440{
1441 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001442 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001443 TranslationBlock *tb;
1444
Blue Swirl44209fc2012-12-02 17:25:06 +00001445 if (nb_tbs <= 0) {
bellarda513fe12003-05-27 23:29:48 +00001446 return NULL;
Blue Swirl44209fc2012-12-02 17:25:06 +00001447 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001448 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1449 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001450 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001451 }
bellarda513fe12003-05-27 23:29:48 +00001452 /* binary search (cf Knuth) */
1453 m_min = 0;
1454 m_max = nb_tbs - 1;
1455 while (m_min <= m_max) {
1456 m = (m_min + m_max) >> 1;
1457 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001458 v = (uintptr_t)tb->tc_ptr;
Blue Swirl44209fc2012-12-02 17:25:06 +00001459 if (v == tc_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001460 return tb;
Blue Swirl44209fc2012-12-02 17:25:06 +00001461 } else if (tc_ptr < v) {
bellarda513fe12003-05-27 23:29:48 +00001462 m_max = m - 1;
1463 } else {
1464 m_min = m + 1;
1465 }
ths5fafdf22007-09-16 21:08:06 +00001466 }
bellarda513fe12003-05-27 23:29:48 +00001467 return &tbs[m_max];
1468}
bellard75012672003-06-21 13:11:07 +00001469
bellardea041c02003-06-25 16:16:50 +00001470static void tb_reset_jump_recursive(TranslationBlock *tb);
1471
1472static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1473{
1474 TranslationBlock *tb1, *tb_next, **ptb;
1475 unsigned int n1;
1476
1477 tb1 = tb->jmp_next[n];
1478 if (tb1 != NULL) {
1479 /* find head of list */
Blue Swirl44209fc2012-12-02 17:25:06 +00001480 for (;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001481 n1 = (uintptr_t)tb1 & 3;
1482 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
Blue Swirl44209fc2012-12-02 17:25:06 +00001483 if (n1 == 2) {
bellardea041c02003-06-25 16:16:50 +00001484 break;
Blue Swirl44209fc2012-12-02 17:25:06 +00001485 }
bellardea041c02003-06-25 16:16:50 +00001486 tb1 = tb1->jmp_next[n1];
1487 }
1488 /* we are now sure now that tb jumps to tb1 */
1489 tb_next = tb1;
1490
1491 /* remove tb from the jmp_first list */
1492 ptb = &tb_next->jmp_first;
Blue Swirl44209fc2012-12-02 17:25:06 +00001493 for (;;) {
bellardea041c02003-06-25 16:16:50 +00001494 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001495 n1 = (uintptr_t)tb1 & 3;
1496 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
Blue Swirl44209fc2012-12-02 17:25:06 +00001497 if (n1 == n && tb1 == tb) {
bellardea041c02003-06-25 16:16:50 +00001498 break;
Blue Swirl44209fc2012-12-02 17:25:06 +00001499 }
bellardea041c02003-06-25 16:16:50 +00001500 ptb = &tb1->jmp_next[n1];
1501 }
1502 *ptb = tb->jmp_next[n];
1503 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001504
bellardea041c02003-06-25 16:16:50 +00001505 /* suppress the jump to next tb in generated code */
1506 tb_reset_jump(tb, n);
1507
bellard01243112004-01-04 15:48:17 +00001508 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001509 tb_reset_jump_recursive(tb_next);
1510 }
1511}
1512
1513static void tb_reset_jump_recursive(TranslationBlock *tb)
1514{
1515 tb_reset_jump_recursive2(tb, 0);
1516 tb_reset_jump_recursive2(tb, 1);
1517}
1518
bellard1fddef42005-04-17 19:16:13 +00001519#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001520#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001521static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001522{
1523 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1524}
1525#else
Avi Kivitya8170e52012-10-23 12:30:10 +02001526void tb_invalidate_phys_addr(hwaddr addr)
bellardd720b932004-04-25 17:57:43 +00001527{
Anthony Liguoric227f092009-10-01 16:12:16 -05001528 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001529 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001530
Blue Swirl44209fc2012-12-02 17:25:06 +00001531 section = phys_page_find(address_space_memory.dispatch,
1532 addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001533 if (!(memory_region_is_ram(section->mr)
1534 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001535 return;
1536 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001537 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001538 + memory_region_section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001539 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001540}
Max Filippov1e7855a2012-04-10 02:48:17 +04001541
1542static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1543{
Max Filippov9d70c4b2012-05-27 20:21:08 +04001544 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1545 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +04001546}
bellardc27004e2005-01-03 23:35:10 +00001547#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001548#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001549
Paul Brookc527ee82010-03-01 03:31:14 +00001550#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001551void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001552
1553{
1554}
1555
Andreas Färber9349b4f2012-03-14 01:38:32 +01001556int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001557 int flags, CPUWatchpoint **watchpoint)
1558{
1559 return -ENOSYS;
1560}
1561#else
pbrook6658ffb2007-03-16 23:58:11 +00001562/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001563int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001564 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001565{
aliguorib4051332008-11-18 20:14:20 +00001566 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001567 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001568
aliguorib4051332008-11-18 20:14:20 +00001569 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001570 if ((len & (len - 1)) || (addr & ~len_mask) ||
1571 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001572 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1573 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1574 return -EINVAL;
1575 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001576 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001577
aliguoria1d1bb32008-11-18 20:07:32 +00001578 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001579 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001580 wp->flags = flags;
1581
aliguori2dc9f412008-11-18 20:56:59 +00001582 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001583 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001584 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001585 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001586 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001587
pbrook6658ffb2007-03-16 23:58:11 +00001588 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001589
1590 if (watchpoint)
1591 *watchpoint = wp;
1592 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001593}
1594
aliguoria1d1bb32008-11-18 20:07:32 +00001595/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001596int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001597 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001598{
aliguorib4051332008-11-18 20:14:20 +00001599 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001600 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001601
Blue Swirl72cf2d42009-09-12 07:36:22 +00001602 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001603 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001604 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001605 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001606 return 0;
1607 }
1608 }
aliguoria1d1bb32008-11-18 20:07:32 +00001609 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001610}
1611
aliguoria1d1bb32008-11-18 20:07:32 +00001612/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001613void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001614{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001615 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001616
aliguoria1d1bb32008-11-18 20:07:32 +00001617 tlb_flush_page(env, watchpoint->vaddr);
1618
Anthony Liguori7267c092011-08-20 22:09:37 -05001619 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001620}
1621
aliguoria1d1bb32008-11-18 20:07:32 +00001622/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001623void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001624{
aliguoric0ce9982008-11-25 22:13:57 +00001625 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001626
Blue Swirl72cf2d42009-09-12 07:36:22 +00001627 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001628 if (wp->flags & mask)
1629 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001630 }
aliguoria1d1bb32008-11-18 20:07:32 +00001631}
Paul Brookc527ee82010-03-01 03:31:14 +00001632#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001633
1634/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001635int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001636 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001637{
bellard1fddef42005-04-17 19:16:13 +00001638#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001639 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001640
Anthony Liguori7267c092011-08-20 22:09:37 -05001641 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001642
1643 bp->pc = pc;
1644 bp->flags = flags;
1645
aliguori2dc9f412008-11-18 20:56:59 +00001646 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001647 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001648 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001649 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001650 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001651
1652 breakpoint_invalidate(env, pc);
1653
1654 if (breakpoint)
1655 *breakpoint = bp;
1656 return 0;
1657#else
1658 return -ENOSYS;
1659#endif
1660}
1661
1662/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001663int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001664{
1665#if defined(TARGET_HAS_ICE)
1666 CPUBreakpoint *bp;
1667
Blue Swirl72cf2d42009-09-12 07:36:22 +00001668 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001669 if (bp->pc == pc && bp->flags == flags) {
1670 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001671 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001672 }
bellard4c3a88a2003-07-26 12:06:08 +00001673 }
aliguoria1d1bb32008-11-18 20:07:32 +00001674 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001675#else
aliguoria1d1bb32008-11-18 20:07:32 +00001676 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001677#endif
1678}
1679
aliguoria1d1bb32008-11-18 20:07:32 +00001680/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001681void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001682{
bellard1fddef42005-04-17 19:16:13 +00001683#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001684 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001685
aliguoria1d1bb32008-11-18 20:07:32 +00001686 breakpoint_invalidate(env, breakpoint->pc);
1687
Anthony Liguori7267c092011-08-20 22:09:37 -05001688 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001689#endif
1690}
1691
1692/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001693void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001694{
1695#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001696 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001697
Blue Swirl72cf2d42009-09-12 07:36:22 +00001698 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001699 if (bp->flags & mask)
1700 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001701 }
bellard4c3a88a2003-07-26 12:06:08 +00001702#endif
1703}
1704
bellardc33a3462003-07-29 20:50:33 +00001705/* enable or disable single step mode. EXCP_DEBUG is returned by the
1706 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001707void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001708{
bellard1fddef42005-04-17 19:16:13 +00001709#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001710 if (env->singlestep_enabled != enabled) {
1711 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001712 if (kvm_enabled())
1713 kvm_update_guest_debug(env, 0);
1714 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001715 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001716 /* XXX: only flush what is necessary */
1717 tb_flush(env);
1718 }
bellardc33a3462003-07-29 20:50:33 +00001719 }
1720#endif
1721}
1722
Andreas Färber9349b4f2012-03-14 01:38:32 +01001723static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001724{
pbrookd5975362008-06-07 20:50:51 +00001725 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1726 problem and hope the cpu will stop of its own accord. For userspace
1727 emulation this often isn't actually as bad as it sounds. Often
1728 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001729 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001730 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001731
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001732 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001733 tb = env->current_tb;
1734 /* if the cpu is currently executing code, we must unlink it and
1735 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001736 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001737 env->current_tb = NULL;
1738 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001739 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001740 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001741}
1742
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001743#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001744/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001745static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001746{
Andreas Färber60e82572012-05-02 22:23:49 +02001747 CPUState *cpu = ENV_GET_CPU(env);
aurel323098dba2009-03-07 21:28:24 +00001748 int old_mask;
1749
1750 old_mask = env->interrupt_request;
1751 env->interrupt_request |= mask;
1752
aliguori8edac962009-04-24 18:03:45 +00001753 /*
1754 * If called from iothread context, wake the target cpu in
1755 * case its halted.
1756 */
Andreas Färber60e82572012-05-02 22:23:49 +02001757 if (!qemu_cpu_is_self(cpu)) {
Andreas Färberc08d7422012-05-03 04:34:15 +02001758 qemu_cpu_kick(cpu);
aliguori8edac962009-04-24 18:03:45 +00001759 return;
1760 }
aliguori8edac962009-04-24 18:03:45 +00001761
pbrook2e70f6e2008-06-29 01:03:05 +00001762 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001763 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001764 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001765 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001766 cpu_abort(env, "Raised interrupt while not in I/O function");
1767 }
pbrook2e70f6e2008-06-29 01:03:05 +00001768 } else {
aurel323098dba2009-03-07 21:28:24 +00001769 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001770 }
1771}
1772
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001773CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1774
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001775#else /* CONFIG_USER_ONLY */
1776
Andreas Färber9349b4f2012-03-14 01:38:32 +01001777void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001778{
1779 env->interrupt_request |= mask;
1780 cpu_unlink_tb(env);
1781}
1782#endif /* CONFIG_USER_ONLY */
1783
Andreas Färber9349b4f2012-03-14 01:38:32 +01001784void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001785{
1786 env->interrupt_request &= ~mask;
1787}
1788
Andreas Färber9349b4f2012-03-14 01:38:32 +01001789void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001790{
1791 env->exit_request = 1;
1792 cpu_unlink_tb(env);
1793}
1794
Andreas Färber9349b4f2012-03-14 01:38:32 +01001795void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001796{
1797 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001798 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001799
1800 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001801 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001802 fprintf(stderr, "qemu: fatal: ");
1803 vfprintf(stderr, fmt, ap);
1804 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001805 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +00001806 if (qemu_log_enabled()) {
1807 qemu_log("qemu: fatal: ");
1808 qemu_log_vprintf(fmt, ap2);
1809 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001810 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +00001811 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001812 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001813 }
pbrook493ae1f2007-11-23 16:53:59 +00001814 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001815 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001816#if defined(CONFIG_USER_ONLY)
1817 {
1818 struct sigaction act;
1819 sigfillset(&act.sa_mask);
1820 act.sa_handler = SIG_DFL;
1821 sigaction(SIGABRT, &act, NULL);
1822 }
1823#endif
bellard75012672003-06-21 13:11:07 +00001824 abort();
1825}
1826
Andreas Färber9349b4f2012-03-14 01:38:32 +01001827CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001828{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001829 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1830 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001831 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001832#if defined(TARGET_HAS_ICE)
1833 CPUBreakpoint *bp;
1834 CPUWatchpoint *wp;
1835#endif
1836
Andreas Färber9349b4f2012-03-14 01:38:32 +01001837 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001838
1839 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001840 new_env->next_cpu = next_cpu;
1841 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001842
1843 /* Clone all break/watchpoints.
1844 Note: Once we support ptrace with hw-debug register access, make sure
1845 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001846 QTAILQ_INIT(&env->breakpoints);
1847 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001848#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001849 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001850 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1851 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001852 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001853 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1854 wp->flags, NULL);
1855 }
1856#endif
1857
thsc5be9f02007-02-28 20:20:53 +00001858 return new_env;
1859}
1860
bellard01243112004-01-04 15:48:17 +00001861#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001862void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001863{
1864 unsigned int i;
1865
1866 /* Discard jump cache entries for any tb which might potentially
1867 overlap the flushed page. */
1868 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1869 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001870 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001871
1872 i = tb_jmp_cache_hash_page(addr);
1873 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001874 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001875}
1876
Juan Quintelad24981d2012-05-22 00:42:40 +02001877static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1878 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +00001879{
Juan Quintelad24981d2012-05-22 00:42:40 +02001880 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +00001881
bellard1ccde1c2004-02-06 19:46:14 +00001882 /* we modify the TLB cache so that the dirty bit will be set again
1883 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001884 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001885 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001886 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001887 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001888 != (end - 1) - start) {
1889 abort();
1890 }
Blue Swirle5548612012-04-21 13:08:33 +00001891 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001892
1893}
1894
1895/* Note: start and end must be within the same ram block. */
1896void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1897 int dirty_flags)
1898{
1899 uintptr_t length;
1900
1901 start &= TARGET_PAGE_MASK;
1902 end = TARGET_PAGE_ALIGN(end);
1903
1904 length = end - start;
1905 if (length == 0)
1906 return;
1907 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1908
1909 if (tcg_enabled()) {
1910 tlb_reset_dirty_range_all(start, end, length);
1911 }
bellard1ccde1c2004-02-06 19:46:14 +00001912}
1913
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001914static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +00001915{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001916 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001917 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001918 return ret;
aliguori74576192008-10-06 14:02:03 +00001919}
1920
Avi Kivitya8170e52012-10-23 12:30:10 +02001921hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +00001922 MemoryRegionSection *section,
1923 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +02001924 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +00001925 int prot,
1926 target_ulong *address)
1927{
Avi Kivitya8170e52012-10-23 12:30:10 +02001928 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001929 CPUWatchpoint *wp;
1930
Blue Swirlcc5bea62012-04-14 14:56:48 +00001931 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001932 /* Normal RAM. */
1933 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001934 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001935 if (!section->readonly) {
1936 iotlb |= phys_section_notdirty;
1937 } else {
1938 iotlb |= phys_section_rom;
1939 }
1940 } else {
1941 /* IO handlers are currently passed a physical address.
1942 It would be nice to pass an offset from the base address
1943 of that region. This would avoid having to special case RAM,
1944 and avoid full address decoding in every device.
1945 We can't use the high bits of pd for this because
1946 IO_MEM_ROMD uses these as a ram address. */
1947 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001948 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001949 }
1950
1951 /* Make accesses to pages with watchpoints go via the
1952 watchpoint trap routines. */
1953 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1954 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1955 /* Avoid trapping reads of pages with a write breakpoint. */
1956 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1957 iotlb = phys_section_watch + paddr;
1958 *address |= TLB_MMIO;
1959 break;
1960 }
1961 }
1962 }
1963
1964 return iotlb;
1965}
1966
bellard01243112004-01-04 15:48:17 +00001967#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001968/*
1969 * Walks guest process memory "regions" one by one
1970 * and calls callback function 'fn' for each region.
1971 */
Blue Swirl44209fc2012-12-02 17:25:06 +00001972struct walk_memory_regions_data {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001973 walk_memory_regions_fn fn;
1974 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001975 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001976 int prot;
1977};
bellard9fa3e852004-01-04 18:06:42 +00001978
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001979static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001980 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001981{
1982 if (data->start != -1ul) {
1983 int rc = data->fn(data->priv, data->start, end, data->prot);
1984 if (rc != 0) {
1985 return rc;
bellard9fa3e852004-01-04 18:06:42 +00001986 }
bellard33417e72003-08-10 21:47:01 +00001987 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001988
1989 data->start = (new_prot ? end : -1ul);
1990 data->prot = new_prot;
1991
1992 return 0;
1993}
1994
1995static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001996 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001997{
Paul Brookb480d9b2010-03-12 23:23:29 +00001998 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001999 int i, rc;
2000
2001 if (*lp == NULL) {
2002 return walk_memory_regions_end(data, base, 0);
2003 }
2004
2005 if (level == 0) {
2006 PageDesc *pd = *lp;
Blue Swirl44209fc2012-12-02 17:25:06 +00002007
Paul Brook7296aba2010-03-14 14:58:46 +00002008 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002009 int prot = pd[i].flags;
2010
2011 pa = base | (i << TARGET_PAGE_BITS);
2012 if (prot != data->prot) {
2013 rc = walk_memory_regions_end(data, pa, prot);
2014 if (rc != 0) {
2015 return rc;
2016 }
2017 }
2018 }
2019 } else {
2020 void **pp = *lp;
Blue Swirl44209fc2012-12-02 17:25:06 +00002021
Paul Brook7296aba2010-03-14 14:58:46 +00002022 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002023 pa = base | ((abi_ulong)i <<
2024 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002025 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2026 if (rc != 0) {
2027 return rc;
2028 }
2029 }
2030 }
2031
2032 return 0;
2033}
2034
2035int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2036{
2037 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02002038 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002039
2040 data.fn = fn;
2041 data.priv = priv;
2042 data.start = -1ul;
2043 data.prot = 0;
2044
2045 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002046 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002047 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
Blue Swirl44209fc2012-12-02 17:25:06 +00002048
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002049 if (rc != 0) {
2050 return rc;
2051 }
2052 }
2053
2054 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002055}
2056
Paul Brookb480d9b2010-03-12 23:23:29 +00002057static int dump_region(void *priv, abi_ulong start,
2058 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002059{
2060 FILE *f = (FILE *)priv;
2061
Paul Brookb480d9b2010-03-12 23:23:29 +00002062 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2063 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002064 start, end, end - start,
2065 ((prot & PAGE_READ) ? 'r' : '-'),
2066 ((prot & PAGE_WRITE) ? 'w' : '-'),
2067 ((prot & PAGE_EXEC) ? 'x' : '-'));
2068
Blue Swirl44209fc2012-12-02 17:25:06 +00002069 return 0;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002070}
2071
2072/* dump memory mappings */
2073void page_dump(FILE *f)
2074{
2075 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2076 "start", "end", "size", "prot");
2077 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002078}
2079
pbrook53a59602006-03-25 19:31:22 +00002080int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002081{
bellard9fa3e852004-01-04 18:06:42 +00002082 PageDesc *p;
2083
2084 p = page_find(address >> TARGET_PAGE_BITS);
Blue Swirl44209fc2012-12-02 17:25:06 +00002085 if (!p) {
bellard9fa3e852004-01-04 18:06:42 +00002086 return 0;
Blue Swirl44209fc2012-12-02 17:25:06 +00002087 }
bellard9fa3e852004-01-04 18:06:42 +00002088 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002089}
2090
Richard Henderson376a7902010-03-10 15:57:04 -08002091/* Modify the flags of a page and invalidate the code if necessary.
2092 The flag PAGE_WRITE_ORG is positioned automatically depending
2093 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002094void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002095{
Richard Henderson376a7902010-03-10 15:57:04 -08002096 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002097
Richard Henderson376a7902010-03-10 15:57:04 -08002098 /* This function should never be called with addresses outside the
2099 guest address space. If this assert fires, it probably indicates
2100 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002101#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2102 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002103#endif
2104 assert(start < end);
2105
bellard9fa3e852004-01-04 18:06:42 +00002106 start = start & TARGET_PAGE_MASK;
2107 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002108
2109 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002110 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002111 }
2112
2113 for (addr = start, len = end - start;
2114 len != 0;
2115 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2116 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2117
2118 /* If the write protection bit is set, then we invalidate
2119 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002120 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002121 (flags & PAGE_WRITE) &&
2122 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002123 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002124 }
2125 p->flags = flags;
2126 }
bellard9fa3e852004-01-04 18:06:42 +00002127}
2128
ths3d97b402007-11-02 19:02:07 +00002129int page_check_range(target_ulong start, target_ulong len, int flags)
2130{
2131 PageDesc *p;
2132 target_ulong end;
2133 target_ulong addr;
2134
Richard Henderson376a7902010-03-10 15:57:04 -08002135 /* This function should never be called with addresses outside the
2136 guest address space. If this assert fires, it probably indicates
2137 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002138#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2139 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002140#endif
2141
Richard Henderson3e0650a2010-03-29 10:54:42 -07002142 if (len == 0) {
2143 return 0;
2144 }
Richard Henderson376a7902010-03-10 15:57:04 -08002145 if (start + len - 1 < start) {
2146 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002147 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002148 }
balrog55f280c2008-10-28 10:24:11 +00002149
Blue Swirl44209fc2012-12-02 17:25:06 +00002150 /* must do before we loose bits in the next step */
2151 end = TARGET_PAGE_ALIGN(start + len);
ths3d97b402007-11-02 19:02:07 +00002152 start = start & TARGET_PAGE_MASK;
2153
Richard Henderson376a7902010-03-10 15:57:04 -08002154 for (addr = start, len = end - start;
2155 len != 0;
2156 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002157 p = page_find(addr >> TARGET_PAGE_BITS);
Blue Swirl44209fc2012-12-02 17:25:06 +00002158 if (!p) {
ths3d97b402007-11-02 19:02:07 +00002159 return -1;
Blue Swirl44209fc2012-12-02 17:25:06 +00002160 }
2161 if (!(p->flags & PAGE_VALID)) {
ths3d97b402007-11-02 19:02:07 +00002162 return -1;
Blue Swirl44209fc2012-12-02 17:25:06 +00002163 }
ths3d97b402007-11-02 19:02:07 +00002164
Blue Swirl44209fc2012-12-02 17:25:06 +00002165 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
ths3d97b402007-11-02 19:02:07 +00002166 return -1;
Blue Swirl44209fc2012-12-02 17:25:06 +00002167 }
bellarddae32702007-11-14 10:51:00 +00002168 if (flags & PAGE_WRITE) {
Blue Swirl44209fc2012-12-02 17:25:06 +00002169 if (!(p->flags & PAGE_WRITE_ORG)) {
bellarddae32702007-11-14 10:51:00 +00002170 return -1;
Blue Swirl44209fc2012-12-02 17:25:06 +00002171 }
bellarddae32702007-11-14 10:51:00 +00002172 /* unprotect the page if it was put read-only because it
2173 contains translated code */
2174 if (!(p->flags & PAGE_WRITE)) {
Blue Swirl44209fc2012-12-02 17:25:06 +00002175 if (!page_unprotect(addr, 0, NULL)) {
bellarddae32702007-11-14 10:51:00 +00002176 return -1;
Blue Swirl44209fc2012-12-02 17:25:06 +00002177 }
bellarddae32702007-11-14 10:51:00 +00002178 }
2179 return 0;
2180 }
ths3d97b402007-11-02 19:02:07 +00002181 }
2182 return 0;
2183}
2184
bellard9fa3e852004-01-04 18:06:42 +00002185/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002186 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002187int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002188{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002189 unsigned int prot;
2190 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002191 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002192
pbrookc8a706f2008-06-02 16:16:42 +00002193 /* Technically this isn't safe inside a signal handler. However we
2194 know this only ever happens in a synchronous SEGV handler, so in
2195 practice it seems to be ok. */
2196 mmap_lock();
2197
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002198 p = page_find(address >> TARGET_PAGE_BITS);
2199 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002200 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002201 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002202 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002203
bellard9fa3e852004-01-04 18:06:42 +00002204 /* if the page was really writable, then we change its
2205 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002206 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2207 host_start = address & qemu_host_page_mask;
2208 host_end = host_start + qemu_host_page_size;
2209
2210 prot = 0;
2211 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2212 p = page_find(addr >> TARGET_PAGE_BITS);
2213 p->flags |= PAGE_WRITE;
2214 prot |= p->flags;
2215
bellard9fa3e852004-01-04 18:06:42 +00002216 /* and since the content will be modified, we must invalidate
2217 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002218 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002219#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002220 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002221#endif
bellard9fa3e852004-01-04 18:06:42 +00002222 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002223 mprotect((void *)g2h(host_start), qemu_host_page_size,
2224 prot & PAGE_BITS);
2225
2226 mmap_unlock();
2227 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002228 }
pbrookc8a706f2008-06-02 16:16:42 +00002229 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002230 return 0;
2231}
bellard9fa3e852004-01-04 18:06:42 +00002232#endif /* defined(CONFIG_USER_ONLY) */
2233
pbrooke2eef172008-06-08 01:09:01 +00002234#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002235
Paul Brookc04b2b72010-03-01 03:31:14 +00002236#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2237typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002238 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +02002239 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002240 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002241} subpage_t;
2242
Anthony Liguoric227f092009-10-01 16:12:16 -05002243static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002244 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +02002245static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002246static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002247{
Avi Kivity5312bd82012-02-12 18:32:55 +02002248 MemoryRegionSection *section = &phys_sections[section_index];
2249 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002250
2251 if (mr->subpage) {
2252 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2253 memory_region_destroy(&subpage->iomem);
2254 g_free(subpage);
2255 }
2256}
2257
Avi Kivity4346ae32012-02-10 17:00:01 +02002258static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002259{
2260 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002261 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002262
Avi Kivityc19e8802012-02-13 20:25:31 +02002263 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002264 return;
2265 }
2266
Avi Kivityc19e8802012-02-13 20:25:31 +02002267 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002268 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002269 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002270 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002271 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002272 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002273 }
Avi Kivity54688b12012-02-09 17:34:32 +02002274 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002275 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002276 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002277}
2278
Avi Kivityac1970f2012-10-03 16:22:53 +02002279static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +02002280{
Avi Kivityac1970f2012-10-03 16:22:53 +02002281 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002282 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002283}
2284
Avi Kivity5312bd82012-02-12 18:32:55 +02002285static uint16_t phys_section_add(MemoryRegionSection *section)
2286{
2287 if (phys_sections_nb == phys_sections_nb_alloc) {
2288 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2289 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2290 phys_sections_nb_alloc);
2291 }
2292 phys_sections[phys_sections_nb] = *section;
2293 return phys_sections_nb++;
2294}
2295
2296static void phys_sections_clear(void)
2297{
2298 phys_sections_nb = 0;
2299}
2300
Avi Kivityac1970f2012-10-03 16:22:53 +02002301static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02002302{
2303 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02002304 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02002305 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +02002306 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002307 MemoryRegionSection subsection = {
2308 .offset_within_address_space = base,
2309 .size = TARGET_PAGE_SIZE,
2310 };
Avi Kivitya8170e52012-10-23 12:30:10 +02002311 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02002312
Avi Kivityf3705d52012-03-08 16:16:34 +02002313 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002314
Avi Kivityf3705d52012-03-08 16:16:34 +02002315 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002316 subpage = subpage_init(base);
2317 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02002318 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +02002319 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002320 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002321 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002322 }
2323 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -04002324 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +02002325 subpage_register(subpage, start, end, phys_section_add(section));
2326}
2327
2328
Avi Kivityac1970f2012-10-03 16:22:53 +02002329static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002330{
Avi Kivitya8170e52012-10-23 12:30:10 +02002331 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +02002332 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +02002333 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002334 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002335
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002336 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002337
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002338 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +02002339 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +02002340 section_index);
bellard33417e72003-08-10 21:47:01 +00002341}
2342
Avi Kivityac1970f2012-10-03 16:22:53 +02002343static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02002344{
Avi Kivityac1970f2012-10-03 16:22:53 +02002345 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002346 MemoryRegionSection now = *section, remain = *section;
2347
2348 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2349 || (now.size < TARGET_PAGE_SIZE)) {
2350 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2351 - now.offset_within_address_space,
2352 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02002353 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002354 remain.size -= now.size;
2355 remain.offset_within_address_space += now.size;
2356 remain.offset_within_region += now.size;
2357 }
Tyler Hall69b67642012-07-25 18:45:04 -04002358 while (remain.size >= TARGET_PAGE_SIZE) {
2359 now = remain;
2360 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
2361 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +02002362 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04002363 } else {
2364 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +02002365 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04002366 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02002367 remain.size -= now.size;
2368 remain.offset_within_address_space += now.size;
2369 remain.offset_within_region += now.size;
2370 }
2371 now = remain;
2372 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002373 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002374 }
2375}
2376
Sheng Yang62a27442010-01-26 19:21:16 +08002377void qemu_flush_coalesced_mmio_buffer(void)
2378{
2379 if (kvm_enabled())
2380 kvm_flush_coalesced_mmio_buffer();
2381}
2382
Marcelo Tosattic9027602010-03-01 20:25:08 -03002383#if defined(__linux__) && !defined(TARGET_S390X)
2384
2385#include <sys/vfs.h>
2386
2387#define HUGETLBFS_MAGIC 0x958458f6
2388
2389static long gethugepagesize(const char *path)
2390{
2391 struct statfs fs;
2392 int ret;
2393
2394 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002395 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002396 } while (ret != 0 && errno == EINTR);
2397
2398 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002399 perror(path);
2400 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002401 }
2402
2403 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002404 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002405
2406 return fs.f_bsize;
2407}
2408
Alex Williamson04b16652010-07-02 11:13:17 -06002409static void *file_ram_alloc(RAMBlock *block,
2410 ram_addr_t memory,
2411 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002412{
2413 char *filename;
2414 void *area;
2415 int fd;
2416#ifdef MAP_POPULATE
2417 int flags;
2418#endif
2419 unsigned long hpagesize;
2420
2421 hpagesize = gethugepagesize(path);
2422 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002423 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002424 }
2425
2426 if (memory < hpagesize) {
2427 return NULL;
2428 }
2429
2430 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2431 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2432 return NULL;
2433 }
2434
2435 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002436 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002437 }
2438
2439 fd = mkstemp(filename);
2440 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002441 perror("unable to create backing store for hugepages");
2442 free(filename);
2443 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002444 }
2445 unlink(filename);
2446 free(filename);
2447
2448 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2449
2450 /*
2451 * ftruncate is not supported by hugetlbfs in older
2452 * hosts, so don't bother bailing out on errors.
2453 * If anything goes wrong with it under other filesystems,
2454 * mmap will fail.
2455 */
2456 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002457 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002458
2459#ifdef MAP_POPULATE
2460 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2461 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2462 * to sidestep this quirk.
2463 */
2464 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2465 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2466#else
2467 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2468#endif
2469 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002470 perror("file_ram_alloc: can't mmap RAM pages");
2471 close(fd);
2472 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002473 }
Alex Williamson04b16652010-07-02 11:13:17 -06002474 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002475 return area;
2476}
2477#endif
2478
Alex Williamsond17b5282010-06-25 11:08:38 -06002479static ram_addr_t find_ram_offset(ram_addr_t size)
2480{
Alex Williamson04b16652010-07-02 11:13:17 -06002481 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002482 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002483
2484 if (QLIST_EMPTY(&ram_list.blocks))
2485 return 0;
2486
2487 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002488 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002489
2490 end = block->offset + block->length;
2491
2492 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2493 if (next_block->offset >= end) {
2494 next = MIN(next, next_block->offset);
2495 }
2496 }
2497 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002498 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002499 mingap = next - end;
2500 }
2501 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002502
2503 if (offset == RAM_ADDR_MAX) {
2504 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2505 (uint64_t)size);
2506 abort();
2507 }
2508
Alex Williamson04b16652010-07-02 11:13:17 -06002509 return offset;
2510}
2511
Juan Quintela652d7ec2012-07-20 10:37:54 +02002512ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06002513{
Alex Williamsond17b5282010-06-25 11:08:38 -06002514 RAMBlock *block;
2515 ram_addr_t last = 0;
2516
2517 QLIST_FOREACH(block, &ram_list.blocks, next)
2518 last = MAX(last, block->offset + block->length);
2519
2520 return last;
2521}
2522
Jason Baronddb97f12012-08-02 15:44:16 -04002523static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2524{
2525 int ret;
2526 QemuOpts *machine_opts;
2527
2528 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2529 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2530 if (machine_opts &&
2531 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2532 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2533 if (ret) {
2534 perror("qemu_madvise");
2535 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2536 "but dump_guest_core=off specified\n");
2537 }
2538 }
2539}
2540
Avi Kivityc5705a72011-12-20 15:59:12 +02002541void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002542{
2543 RAMBlock *new_block, *block;
2544
Avi Kivityc5705a72011-12-20 15:59:12 +02002545 new_block = NULL;
2546 QLIST_FOREACH(block, &ram_list.blocks, next) {
2547 if (block->offset == addr) {
2548 new_block = block;
2549 break;
2550 }
2551 }
2552 assert(new_block);
2553 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002554
Anthony Liguori09e5ab62012-02-03 12:28:43 -06002555 if (dev) {
2556 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002557 if (id) {
2558 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002559 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002560 }
2561 }
2562 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2563
2564 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002565 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002566 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2567 new_block->idstr);
2568 abort();
2569 }
2570 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002571}
2572
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002573static int memory_try_enable_merging(void *addr, size_t len)
2574{
2575 QemuOpts *opts;
2576
2577 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2578 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
2579 /* disabled by the user */
2580 return 0;
2581 }
2582
2583 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
2584}
2585
Avi Kivityc5705a72011-12-20 15:59:12 +02002586ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2587 MemoryRegion *mr)
2588{
2589 RAMBlock *new_block;
2590
2591 size = TARGET_PAGE_ALIGN(size);
2592 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002593
Avi Kivity7c637362011-12-21 13:09:49 +02002594 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002595 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002596 if (host) {
2597 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002598 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002599 } else {
2600 if (mem_path) {
2601#if defined (__linux__) && !defined(TARGET_S390X)
2602 new_block->host = file_ram_alloc(new_block, size, mem_path);
2603 if (!new_block->host) {
2604 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002605 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002606 }
2607#else
2608 fprintf(stderr, "-mem-path option unsupported\n");
2609 exit(1);
2610#endif
2611 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02002612 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002613 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00002614 } else if (kvm_enabled()) {
2615 /* some s390/kvm configurations have special constraints */
2616 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01002617 } else {
2618 new_block->host = qemu_vmalloc(size);
2619 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002620 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002621 }
2622 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002623 new_block->length = size;
2624
2625 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2626
Anthony Liguori7267c092011-08-20 22:09:37 -05002627 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002628 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04002629 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2630 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02002631 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002632
Jason Baronddb97f12012-08-02 15:44:16 -04002633 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03002634 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04002635
Cam Macdonell84b89d72010-07-26 18:10:57 -06002636 if (kvm_enabled())
2637 kvm_setup_guest_memory(new_block->host, size);
2638
2639 return new_block->offset;
2640}
2641
Avi Kivityc5705a72011-12-20 15:59:12 +02002642ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002643{
Avi Kivityc5705a72011-12-20 15:59:12 +02002644 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002645}
bellarde9a1ab12007-02-08 23:08:38 +00002646
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002647void qemu_ram_free_from_ptr(ram_addr_t addr)
2648{
2649 RAMBlock *block;
2650
2651 QLIST_FOREACH(block, &ram_list.blocks, next) {
2652 if (addr == block->offset) {
2653 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002654 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002655 return;
2656 }
2657 }
2658}
2659
Anthony Liguoric227f092009-10-01 16:12:16 -05002660void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002661{
Alex Williamson04b16652010-07-02 11:13:17 -06002662 RAMBlock *block;
2663
2664 QLIST_FOREACH(block, &ram_list.blocks, next) {
2665 if (addr == block->offset) {
2666 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002667 if (block->flags & RAM_PREALLOC_MASK) {
2668 ;
2669 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002670#if defined (__linux__) && !defined(TARGET_S390X)
2671 if (block->fd) {
2672 munmap(block->host, block->length);
2673 close(block->fd);
2674 } else {
2675 qemu_vfree(block->host);
2676 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002677#else
2678 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002679#endif
2680 } else {
2681#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2682 munmap(block->host, block->length);
2683#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002684 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002685 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002686 } else {
2687 qemu_vfree(block->host);
2688 }
Alex Williamson04b16652010-07-02 11:13:17 -06002689#endif
2690 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002691 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002692 return;
2693 }
2694 }
2695
bellarde9a1ab12007-02-08 23:08:38 +00002696}
2697
Huang Yingcd19cfa2011-03-02 08:56:19 +01002698#ifndef _WIN32
2699void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2700{
2701 RAMBlock *block;
2702 ram_addr_t offset;
2703 int flags;
2704 void *area, *vaddr;
2705
2706 QLIST_FOREACH(block, &ram_list.blocks, next) {
2707 offset = addr - block->offset;
2708 if (offset < block->length) {
2709 vaddr = block->host + offset;
2710 if (block->flags & RAM_PREALLOC_MASK) {
2711 ;
2712 } else {
2713 flags = MAP_FIXED;
2714 munmap(vaddr, length);
2715 if (mem_path) {
2716#if defined(__linux__) && !defined(TARGET_S390X)
2717 if (block->fd) {
2718#ifdef MAP_POPULATE
2719 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2720 MAP_PRIVATE;
2721#else
2722 flags |= MAP_PRIVATE;
2723#endif
2724 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2725 flags, block->fd, offset);
2726 } else {
2727 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2728 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2729 flags, -1, 0);
2730 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002731#else
2732 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002733#endif
2734 } else {
2735#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2736 flags |= MAP_SHARED | MAP_ANONYMOUS;
2737 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2738 flags, -1, 0);
2739#else
2740 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2741 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2742 flags, -1, 0);
2743#endif
2744 }
2745 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002746 fprintf(stderr, "Could not remap addr: "
2747 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002748 length, addr);
2749 exit(1);
2750 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002751 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04002752 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002753 }
2754 return;
2755 }
2756 }
2757}
2758#endif /* !_WIN32 */
2759
pbrookdc828ca2009-04-09 22:21:07 +00002760/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002761 With the exception of the softmmu code in this file, this should
2762 only be used for local memory (e.g. video ram) that the device owns,
2763 and knows it isn't going to access beyond the end of the block.
2764
2765 It should not be used for general purpose DMA.
2766 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2767 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002768void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002769{
pbrook94a6b542009-04-11 17:15:54 +00002770 RAMBlock *block;
2771
Alex Williamsonf471a172010-06-11 11:11:42 -06002772 QLIST_FOREACH(block, &ram_list.blocks, next) {
2773 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002774 /* Move this entry to to start of the list. */
2775 if (block != QLIST_FIRST(&ram_list.blocks)) {
2776 QLIST_REMOVE(block, next);
2777 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2778 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002779 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002780 /* We need to check if the requested address is in the RAM
2781 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002782 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002783 */
2784 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002785 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002786 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002787 block->host =
2788 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002789 }
2790 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002791 return block->host + (addr - block->offset);
2792 }
pbrook94a6b542009-04-11 17:15:54 +00002793 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002794
2795 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2796 abort();
2797
2798 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002799}
2800
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002801/* Return a host pointer to ram allocated with qemu_ram_alloc.
2802 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2803 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002804static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002805{
2806 RAMBlock *block;
2807
2808 QLIST_FOREACH(block, &ram_list.blocks, next) {
2809 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002810 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002811 /* We need to check if the requested address is in the RAM
2812 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002813 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002814 */
2815 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002816 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002817 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002818 block->host =
2819 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002820 }
2821 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002822 return block->host + (addr - block->offset);
2823 }
2824 }
2825
2826 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2827 abort();
2828
2829 return NULL;
2830}
2831
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002832/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2833 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002834static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002835{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002836 if (*size == 0) {
2837 return NULL;
2838 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002839 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002840 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002841 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002842 RAMBlock *block;
2843
2844 QLIST_FOREACH(block, &ram_list.blocks, next) {
2845 if (addr - block->offset < block->length) {
2846 if (addr - block->offset + *size > block->length)
2847 *size = block->length - addr + block->offset;
2848 return block->host + (addr - block->offset);
2849 }
2850 }
2851
2852 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2853 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002854 }
2855}
2856
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002857void qemu_put_ram_ptr(void *addr)
2858{
2859 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002860}
2861
Marcelo Tosattie8902612010-10-11 15:31:19 -03002862int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002863{
pbrook94a6b542009-04-11 17:15:54 +00002864 RAMBlock *block;
2865 uint8_t *host = ptr;
2866
Jan Kiszka868bb332011-06-21 22:59:09 +02002867 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002868 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002869 return 0;
2870 }
2871
Alex Williamsonf471a172010-06-11 11:11:42 -06002872 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002873 /* This case append when the block is not mapped. */
2874 if (block->host == NULL) {
2875 continue;
2876 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002877 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002878 *ram_addr = block->offset + (host - block->host);
2879 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002880 }
pbrook94a6b542009-04-11 17:15:54 +00002881 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002882
Marcelo Tosattie8902612010-10-11 15:31:19 -03002883 return -1;
2884}
Alex Williamsonf471a172010-06-11 11:11:42 -06002885
Marcelo Tosattie8902612010-10-11 15:31:19 -03002886/* Some of the softmmu routines need to translate from a host pointer
2887 (typically a TLB entry) back to a ram offset. */
2888ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2889{
2890 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002891
Marcelo Tosattie8902612010-10-11 15:31:19 -03002892 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2893 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2894 abort();
2895 }
2896 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002897}
2898
Avi Kivitya8170e52012-10-23 12:30:10 +02002899static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002900 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002901{
pbrook67d3b952006-12-18 05:03:52 +00002902#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002903 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002904#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002905#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002906 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002907#endif
2908 return 0;
2909}
2910
Avi Kivitya8170e52012-10-23 12:30:10 +02002911static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002912 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002913{
2914#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002915 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002916#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002917#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002918 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002919#endif
2920}
2921
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002922static const MemoryRegionOps unassigned_mem_ops = {
2923 .read = unassigned_mem_read,
2924 .write = unassigned_mem_write,
2925 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002926};
2927
Avi Kivitya8170e52012-10-23 12:30:10 +02002928static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002929 unsigned size)
2930{
2931 abort();
2932}
2933
Avi Kivitya8170e52012-10-23 12:30:10 +02002934static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002935 uint64_t value, unsigned size)
2936{
2937 abort();
2938}
2939
2940static const MemoryRegionOps error_mem_ops = {
2941 .read = error_mem_read,
2942 .write = error_mem_write,
2943 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002944};
2945
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002946static const MemoryRegionOps rom_mem_ops = {
2947 .read = error_mem_read,
2948 .write = unassigned_mem_write,
2949 .endianness = DEVICE_NATIVE_ENDIAN,
2950};
2951
Avi Kivitya8170e52012-10-23 12:30:10 +02002952static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002953 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002954{
bellard3a7d9292005-08-21 09:26:42 +00002955 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002956 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002957 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2958#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002959 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002960 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002961#endif
2962 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002963 switch (size) {
2964 case 1:
2965 stb_p(qemu_get_ram_ptr(ram_addr), val);
2966 break;
2967 case 2:
2968 stw_p(qemu_get_ram_ptr(ram_addr), val);
2969 break;
2970 case 4:
2971 stl_p(qemu_get_ram_ptr(ram_addr), val);
2972 break;
2973 default:
2974 abort();
2975 }
bellardf23db162005-08-21 19:12:28 +00002976 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002977 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002978 /* we remove the notdirty callback only if the code has been
2979 flushed */
2980 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002981 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002982}
2983
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002984static const MemoryRegionOps notdirty_mem_ops = {
2985 .read = error_mem_read,
2986 .write = notdirty_mem_write,
2987 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002988};
2989
pbrook0f459d12008-06-09 00:20:13 +00002990/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002991static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002992{
Andreas Färber9349b4f2012-03-14 01:38:32 +01002993 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002994 target_ulong pc, cs_base;
2995 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002996 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002997 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002998 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002999
aliguori06d55cc2008-11-18 20:24:06 +00003000 if (env->watchpoint_hit) {
3001 /* We re-entered the check after replacing the TB. Now raise
3002 * the debug interrupt so that is will trigger after the
3003 * current instruction. */
3004 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3005 return;
3006 }
pbrook2e70f6e2008-06-29 01:03:05 +00003007 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003008 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003009 if ((vaddr == (wp->vaddr & len_mask) ||
3010 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003011 wp->flags |= BP_WATCHPOINT_HIT;
3012 if (!env->watchpoint_hit) {
3013 env->watchpoint_hit = wp;
3014 tb = tb_find_pc(env->mem_io_pc);
3015 if (!tb) {
3016 cpu_abort(env, "check_watchpoint: could not find TB for "
3017 "pc=%p", (void *)env->mem_io_pc);
3018 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003019 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003020 tb_phys_invalidate(tb, -1);
3021 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3022 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04003023 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00003024 } else {
3025 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3026 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04003027 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00003028 }
aliguori06d55cc2008-11-18 20:24:06 +00003029 }
aliguori6e140f22008-11-18 20:37:55 +00003030 } else {
3031 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003032 }
3033 }
3034}
3035
pbrook6658ffb2007-03-16 23:58:11 +00003036/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3037 so these check for a hit then pass through to the normal out-of-line
3038 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02003039static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02003040 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003041{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003042 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3043 switch (size) {
3044 case 1: return ldub_phys(addr);
3045 case 2: return lduw_phys(addr);
3046 case 4: return ldl_phys(addr);
3047 default: abort();
3048 }
pbrook6658ffb2007-03-16 23:58:11 +00003049}
3050
Avi Kivitya8170e52012-10-23 12:30:10 +02003051static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02003052 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003053{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003054 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3055 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003056 case 1:
3057 stb_phys(addr, val);
3058 break;
3059 case 2:
3060 stw_phys(addr, val);
3061 break;
3062 case 4:
3063 stl_phys(addr, val);
3064 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003065 default: abort();
3066 }
pbrook6658ffb2007-03-16 23:58:11 +00003067}
3068
Avi Kivity1ec9b902012-01-02 12:47:48 +02003069static const MemoryRegionOps watch_mem_ops = {
3070 .read = watch_mem_read,
3071 .write = watch_mem_write,
3072 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003073};
pbrook6658ffb2007-03-16 23:58:11 +00003074
Avi Kivitya8170e52012-10-23 12:30:10 +02003075static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02003076 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003077{
Avi Kivity70c68e42012-01-02 12:32:48 +02003078 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003079 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003080 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003081#if defined(DEBUG_SUBPAGE)
3082 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3083 mmio, len, addr, idx);
3084#endif
blueswir1db7b5422007-05-26 17:36:03 +00003085
Avi Kivity5312bd82012-02-12 18:32:55 +02003086 section = &phys_sections[mmio->sub_section[idx]];
3087 addr += mmio->base;
3088 addr -= section->offset_within_address_space;
3089 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003090 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003091}
3092
Avi Kivitya8170e52012-10-23 12:30:10 +02003093static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02003094 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003095{
Avi Kivity70c68e42012-01-02 12:32:48 +02003096 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003097 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003098 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003099#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003100 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3101 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003102 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003103#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003104
Avi Kivity5312bd82012-02-12 18:32:55 +02003105 section = &phys_sections[mmio->sub_section[idx]];
3106 addr += mmio->base;
3107 addr -= section->offset_within_address_space;
3108 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003109 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003110}
3111
Avi Kivity70c68e42012-01-02 12:32:48 +02003112static const MemoryRegionOps subpage_ops = {
3113 .read = subpage_read,
3114 .write = subpage_write,
3115 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003116};
3117
Avi Kivitya8170e52012-10-23 12:30:10 +02003118static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02003119 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003120{
3121 ram_addr_t raddr = addr;
3122 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003123 switch (size) {
3124 case 1: return ldub_p(ptr);
3125 case 2: return lduw_p(ptr);
3126 case 4: return ldl_p(ptr);
3127 default: abort();
3128 }
Andreas Färber56384e82011-11-30 16:26:21 +01003129}
3130
Avi Kivitya8170e52012-10-23 12:30:10 +02003131static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02003132 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003133{
3134 ram_addr_t raddr = addr;
3135 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003136 switch (size) {
3137 case 1: return stb_p(ptr, value);
3138 case 2: return stw_p(ptr, value);
3139 case 4: return stl_p(ptr, value);
3140 default: abort();
3141 }
Andreas Färber56384e82011-11-30 16:26:21 +01003142}
3143
Avi Kivityde712f92012-01-02 12:41:07 +02003144static const MemoryRegionOps subpage_ram_ops = {
3145 .read = subpage_ram_read,
3146 .write = subpage_ram_write,
3147 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003148};
3149
Anthony Liguoric227f092009-10-01 16:12:16 -05003150static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003151 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003152{
3153 int idx, eidx;
3154
3155 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3156 return -1;
3157 idx = SUBPAGE_IDX(start);
3158 eidx = SUBPAGE_IDX(end);
3159#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003160 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003161 mmio, start, end, idx, eidx, memory);
3162#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003163 if (memory_region_is_ram(phys_sections[section].mr)) {
3164 MemoryRegionSection new_section = phys_sections[section];
3165 new_section.mr = &io_mem_subpage_ram;
3166 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003167 }
blueswir1db7b5422007-05-26 17:36:03 +00003168 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003169 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003170 }
3171
3172 return 0;
3173}
3174
Avi Kivitya8170e52012-10-23 12:30:10 +02003175static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00003176{
Anthony Liguoric227f092009-10-01 16:12:16 -05003177 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003178
Anthony Liguori7267c092011-08-20 22:09:37 -05003179 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003180
3181 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003182 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3183 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003184 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003185#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003186 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3187 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003188#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003189 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003190
3191 return mmio;
3192}
3193
Avi Kivity5312bd82012-02-12 18:32:55 +02003194static uint16_t dummy_section(MemoryRegion *mr)
3195{
3196 MemoryRegionSection section = {
3197 .mr = mr,
3198 .offset_within_address_space = 0,
3199 .offset_within_region = 0,
3200 .size = UINT64_MAX,
3201 };
3202
3203 return phys_section_add(&section);
3204}
3205
Avi Kivitya8170e52012-10-23 12:30:10 +02003206MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02003207{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003208 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003209}
3210
Avi Kivitye9179ce2009-06-14 11:38:52 +03003211static void io_mem_init(void)
3212{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003213 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003214 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3215 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3216 "unassigned", UINT64_MAX);
3217 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3218 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003219 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3220 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003221 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3222 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003223}
3224
Avi Kivityac1970f2012-10-03 16:22:53 +02003225static void mem_begin(MemoryListener *listener)
3226{
3227 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
3228
3229 destroy_all_mappings(d);
3230 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
3231}
3232
Avi Kivity50c1e142012-02-08 21:36:02 +02003233static void core_begin(MemoryListener *listener)
3234{
Avi Kivity5312bd82012-02-12 18:32:55 +02003235 phys_sections_clear();
3236 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003237 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3238 phys_section_rom = dummy_section(&io_mem_rom);
3239 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003240}
3241
Avi Kivity1d711482012-10-02 18:54:45 +02003242static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02003243{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003244 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003245
3246 /* since each CPU stores ram addresses in its TLB cache, we must
3247 reset the modified entries */
3248 /* XXX: slow ! */
3249 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3250 tlb_flush(env, 1);
3251 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003252}
3253
Avi Kivity93632742012-02-08 16:54:16 +02003254static void core_log_global_start(MemoryListener *listener)
3255{
3256 cpu_physical_memory_set_dirty_tracking(1);
3257}
3258
3259static void core_log_global_stop(MemoryListener *listener)
3260{
3261 cpu_physical_memory_set_dirty_tracking(0);
3262}
3263
Avi Kivity4855d412012-02-08 21:16:05 +02003264static void io_region_add(MemoryListener *listener,
3265 MemoryRegionSection *section)
3266{
Avi Kivitya2d33522012-03-05 17:40:12 +02003267 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3268
3269 mrio->mr = section->mr;
3270 mrio->offset = section->offset_within_region;
3271 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003272 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003273 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003274}
3275
3276static void io_region_del(MemoryListener *listener,
3277 MemoryRegionSection *section)
3278{
3279 isa_unassign_ioport(section->offset_within_address_space, section->size);
3280}
3281
Avi Kivity93632742012-02-08 16:54:16 +02003282static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003283 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02003284 .log_global_start = core_log_global_start,
3285 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02003286 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02003287};
3288
Avi Kivity4855d412012-02-08 21:16:05 +02003289static MemoryListener io_memory_listener = {
3290 .region_add = io_region_add,
3291 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02003292 .priority = 0,
3293};
3294
Avi Kivity1d711482012-10-02 18:54:45 +02003295static MemoryListener tcg_memory_listener = {
3296 .commit = tcg_commit,
3297};
3298
Avi Kivityac1970f2012-10-03 16:22:53 +02003299void address_space_init_dispatch(AddressSpace *as)
3300{
3301 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
3302
3303 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
3304 d->listener = (MemoryListener) {
3305 .begin = mem_begin,
3306 .region_add = mem_add,
3307 .region_nop = mem_add,
3308 .priority = 0,
3309 };
3310 as->dispatch = d;
3311 memory_listener_register(&d->listener, as);
3312}
3313
Avi Kivity83f3c252012-10-07 12:59:55 +02003314void address_space_destroy_dispatch(AddressSpace *as)
3315{
3316 AddressSpaceDispatch *d = as->dispatch;
3317
3318 memory_listener_unregister(&d->listener);
3319 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
3320 g_free(d);
3321 as->dispatch = NULL;
3322}
3323
Avi Kivity62152b82011-07-26 14:26:14 +03003324static void memory_map_init(void)
3325{
Anthony Liguori7267c092011-08-20 22:09:37 -05003326 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003327 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02003328 address_space_init(&address_space_memory, system_memory);
3329 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03003330
Anthony Liguori7267c092011-08-20 22:09:37 -05003331 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003332 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02003333 address_space_init(&address_space_io, system_io);
3334 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02003335
Avi Kivityf6790af2012-10-02 20:13:51 +02003336 memory_listener_register(&core_memory_listener, &address_space_memory);
3337 memory_listener_register(&io_memory_listener, &address_space_io);
3338 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10003339
3340 dma_context_init(&dma_context_memory, &address_space_memory,
3341 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03003342}
3343
3344MemoryRegion *get_system_memory(void)
3345{
3346 return system_memory;
3347}
3348
Avi Kivity309cb472011-08-08 16:09:03 +03003349MemoryRegion *get_system_io(void)
3350{
3351 return system_io;
3352}
3353
pbrooke2eef172008-06-08 01:09:01 +00003354#endif /* !defined(CONFIG_USER_ONLY) */
3355
bellard13eb76e2004-01-24 15:23:36 +00003356/* physical memory access (slow version, mainly for debug) */
3357#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003358int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003359 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003360{
3361 int l, flags;
3362 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003363 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003364
3365 while (len > 0) {
3366 page = addr & TARGET_PAGE_MASK;
3367 l = (page + TARGET_PAGE_SIZE) - addr;
3368 if (l > len)
3369 l = len;
3370 flags = page_get_flags(page);
3371 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003372 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003373 if (is_write) {
3374 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003375 return -1;
bellard579a97f2007-11-11 14:26:47 +00003376 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003377 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003378 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003379 memcpy(p, buf, l);
3380 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003381 } else {
3382 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003383 return -1;
bellard579a97f2007-11-11 14:26:47 +00003384 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003385 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003386 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003387 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003388 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003389 }
3390 len -= l;
3391 buf += l;
3392 addr += l;
3393 }
Paul Brooka68fe892010-03-01 00:08:59 +00003394 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003395}
bellard8df1cd02005-01-28 22:37:22 +00003396
bellard13eb76e2004-01-24 15:23:36 +00003397#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003398
Avi Kivitya8170e52012-10-23 12:30:10 +02003399static void invalidate_and_set_dirty(hwaddr addr,
3400 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003401{
3402 if (!cpu_physical_memory_is_dirty(addr)) {
3403 /* invalidate code */
3404 tb_invalidate_phys_page_range(addr, addr + length, 0);
3405 /* set dirty bit */
3406 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
3407 }
Anthony PERARDe2269392012-10-03 13:49:22 +00003408 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003409}
3410
Avi Kivitya8170e52012-10-23 12:30:10 +02003411void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02003412 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00003413{
Avi Kivityac1970f2012-10-03 16:22:53 +02003414 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003415 int l;
bellard13eb76e2004-01-24 15:23:36 +00003416 uint8_t *ptr;
3417 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02003418 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003419 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003420
bellard13eb76e2004-01-24 15:23:36 +00003421 while (len > 0) {
3422 page = addr & TARGET_PAGE_MASK;
3423 l = (page + TARGET_PAGE_SIZE) - addr;
3424 if (l > len)
3425 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003426 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003427
bellard13eb76e2004-01-24 15:23:36 +00003428 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003429 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02003430 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00003431 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003432 /* XXX: could force cpu_single_env to NULL to avoid
3433 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003434 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003435 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003436 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003437 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003438 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003439 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003440 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003441 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003442 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003443 l = 2;
3444 } else {
bellard1c213d12005-09-03 10:49:04 +00003445 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003446 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003447 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003448 l = 1;
3449 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003450 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003451 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003452 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003453 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003454 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003455 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003456 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003457 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003458 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003459 }
3460 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003461 if (!(memory_region_is_ram(section->mr) ||
3462 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02003463 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00003464 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003465 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003466 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003467 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003468 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003469 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003470 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003471 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003472 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003473 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003474 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003475 l = 2;
3476 } else {
bellard1c213d12005-09-03 10:49:04 +00003477 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003478 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003479 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003480 l = 1;
3481 }
3482 } else {
3483 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003484 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00003485 + memory_region_section_addr(section,
3486 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003487 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003488 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003489 }
3490 }
3491 len -= l;
3492 buf += l;
3493 addr += l;
3494 }
3495}
bellard8df1cd02005-01-28 22:37:22 +00003496
Avi Kivitya8170e52012-10-23 12:30:10 +02003497void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02003498 const uint8_t *buf, int len)
3499{
3500 address_space_rw(as, addr, (uint8_t *)buf, len, true);
3501}
3502
3503/**
3504 * address_space_read: read from an address space.
3505 *
3506 * @as: #AddressSpace to be accessed
3507 * @addr: address within that address space
3508 * @buf: buffer with the data transferred
3509 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003510void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003511{
3512 address_space_rw(as, addr, buf, len, false);
3513}
3514
3515
Avi Kivitya8170e52012-10-23 12:30:10 +02003516void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02003517 int len, int is_write)
3518{
3519 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
3520}
3521
bellardd0ecd2a2006-04-23 17:14:48 +00003522/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02003523void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003524 const uint8_t *buf, int len)
3525{
Avi Kivityac1970f2012-10-03 16:22:53 +02003526 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00003527 int l;
3528 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02003529 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003530 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003531
bellardd0ecd2a2006-04-23 17:14:48 +00003532 while (len > 0) {
3533 page = addr & TARGET_PAGE_MASK;
3534 l = (page + TARGET_PAGE_SIZE) - addr;
3535 if (l > len)
3536 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003537 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003538
Blue Swirlcc5bea62012-04-14 14:56:48 +00003539 if (!(memory_region_is_ram(section->mr) ||
3540 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00003541 /* do nothing */
3542 } else {
3543 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003544 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003545 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003546 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003547 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003548 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003549 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003550 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003551 }
3552 len -= l;
3553 buf += l;
3554 addr += l;
3555 }
3556}
3557
aliguori6d16c2f2009-01-22 16:59:11 +00003558typedef struct {
3559 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02003560 hwaddr addr;
3561 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00003562} BounceBuffer;
3563
3564static BounceBuffer bounce;
3565
aliguoriba223c22009-01-22 16:59:16 +00003566typedef struct MapClient {
3567 void *opaque;
3568 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003569 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003570} MapClient;
3571
Blue Swirl72cf2d42009-09-12 07:36:22 +00003572static QLIST_HEAD(map_client_list, MapClient) map_client_list
3573 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003574
3575void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3576{
Anthony Liguori7267c092011-08-20 22:09:37 -05003577 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003578
3579 client->opaque = opaque;
3580 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003581 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003582 return client;
3583}
3584
Blue Swirl8b9c99d2012-10-28 11:04:51 +00003585static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00003586{
3587 MapClient *client = (MapClient *)_client;
3588
Blue Swirl72cf2d42009-09-12 07:36:22 +00003589 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003590 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003591}
3592
3593static void cpu_notify_map_clients(void)
3594{
3595 MapClient *client;
3596
Blue Swirl72cf2d42009-09-12 07:36:22 +00003597 while (!QLIST_EMPTY(&map_client_list)) {
3598 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003599 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003600 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003601 }
3602}
3603
aliguori6d16c2f2009-01-22 16:59:11 +00003604/* Map a physical memory region into a host virtual address.
3605 * May map a subset of the requested range, given by and returned in *plen.
3606 * May return NULL if resources needed to perform the mapping are exhausted.
3607 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003608 * Use cpu_register_map_client() to know when retrying the map operation is
3609 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003610 */
Avi Kivityac1970f2012-10-03 16:22:53 +02003611void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02003612 hwaddr addr,
3613 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003614 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00003615{
Avi Kivityac1970f2012-10-03 16:22:53 +02003616 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02003617 hwaddr len = *plen;
3618 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003619 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003620 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003621 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003622 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003623 ram_addr_t rlen;
3624 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003625
3626 while (len > 0) {
3627 page = addr & TARGET_PAGE_MASK;
3628 l = (page + TARGET_PAGE_SIZE) - addr;
3629 if (l > len)
3630 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003631 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003632
Avi Kivityf3705d52012-03-08 16:16:34 +02003633 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003634 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003635 break;
3636 }
3637 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3638 bounce.addr = addr;
3639 bounce.len = l;
3640 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02003641 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003642 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003643
3644 *plen = l;
3645 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003646 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003647 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003648 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003649 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003650 }
aliguori6d16c2f2009-01-22 16:59:11 +00003651
3652 len -= l;
3653 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003654 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003655 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003656 rlen = todo;
3657 ret = qemu_ram_ptr_length(raddr, &rlen);
3658 *plen = rlen;
3659 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003660}
3661
Avi Kivityac1970f2012-10-03 16:22:53 +02003662/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003663 * Will also mark the memory as dirty if is_write == 1. access_len gives
3664 * the amount of memory that was actually read or written by the caller.
3665 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003666void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3667 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003668{
3669 if (buffer != bounce.buffer) {
3670 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003671 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003672 while (access_len) {
3673 unsigned l;
3674 l = TARGET_PAGE_SIZE;
3675 if (l > access_len)
3676 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003677 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003678 addr1 += l;
3679 access_len -= l;
3680 }
3681 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003682 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003683 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003684 }
aliguori6d16c2f2009-01-22 16:59:11 +00003685 return;
3686 }
3687 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02003688 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003689 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003690 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003691 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003692 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003693}
bellardd0ecd2a2006-04-23 17:14:48 +00003694
Avi Kivitya8170e52012-10-23 12:30:10 +02003695void *cpu_physical_memory_map(hwaddr addr,
3696 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003697 int is_write)
3698{
3699 return address_space_map(&address_space_memory, addr, plen, is_write);
3700}
3701
Avi Kivitya8170e52012-10-23 12:30:10 +02003702void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3703 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003704{
3705 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3706}
3707
bellard8df1cd02005-01-28 22:37:22 +00003708/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003709static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003710 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003711{
bellard8df1cd02005-01-28 22:37:22 +00003712 uint8_t *ptr;
3713 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003714 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003715
Avi Kivityac1970f2012-10-03 16:22:53 +02003716 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003717
Blue Swirlcc5bea62012-04-14 14:56:48 +00003718 if (!(memory_region_is_ram(section->mr) ||
3719 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00003720 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003721 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003722 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003723#if defined(TARGET_WORDS_BIGENDIAN)
3724 if (endian == DEVICE_LITTLE_ENDIAN) {
3725 val = bswap32(val);
3726 }
3727#else
3728 if (endian == DEVICE_BIG_ENDIAN) {
3729 val = bswap32(val);
3730 }
3731#endif
bellard8df1cd02005-01-28 22:37:22 +00003732 } else {
3733 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003734 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003735 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003736 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003737 switch (endian) {
3738 case DEVICE_LITTLE_ENDIAN:
3739 val = ldl_le_p(ptr);
3740 break;
3741 case DEVICE_BIG_ENDIAN:
3742 val = ldl_be_p(ptr);
3743 break;
3744 default:
3745 val = ldl_p(ptr);
3746 break;
3747 }
bellard8df1cd02005-01-28 22:37:22 +00003748 }
3749 return val;
3750}
3751
Avi Kivitya8170e52012-10-23 12:30:10 +02003752uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003753{
3754 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3755}
3756
Avi Kivitya8170e52012-10-23 12:30:10 +02003757uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003758{
3759 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3760}
3761
Avi Kivitya8170e52012-10-23 12:30:10 +02003762uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003763{
3764 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3765}
3766
bellard84b7b8e2005-11-28 21:19:04 +00003767/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003768static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003769 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003770{
bellard84b7b8e2005-11-28 21:19:04 +00003771 uint8_t *ptr;
3772 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003773 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003774
Avi Kivityac1970f2012-10-03 16:22:53 +02003775 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003776
Blue Swirlcc5bea62012-04-14 14:56:48 +00003777 if (!(memory_region_is_ram(section->mr) ||
3778 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00003779 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003780 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003781
3782 /* XXX This is broken when device endian != cpu endian.
3783 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003784#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003785 val = io_mem_read(section->mr, addr, 4) << 32;
3786 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003787#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003788 val = io_mem_read(section->mr, addr, 4);
3789 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003790#endif
3791 } else {
3792 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003793 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003794 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003795 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003796 switch (endian) {
3797 case DEVICE_LITTLE_ENDIAN:
3798 val = ldq_le_p(ptr);
3799 break;
3800 case DEVICE_BIG_ENDIAN:
3801 val = ldq_be_p(ptr);
3802 break;
3803 default:
3804 val = ldq_p(ptr);
3805 break;
3806 }
bellard84b7b8e2005-11-28 21:19:04 +00003807 }
3808 return val;
3809}
3810
Avi Kivitya8170e52012-10-23 12:30:10 +02003811uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003812{
3813 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3814}
3815
Avi Kivitya8170e52012-10-23 12:30:10 +02003816uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003817{
3818 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3819}
3820
Avi Kivitya8170e52012-10-23 12:30:10 +02003821uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003822{
3823 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3824}
3825
bellardaab33092005-10-30 20:48:42 +00003826/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02003827uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00003828{
3829 uint8_t val;
3830 cpu_physical_memory_read(addr, &val, 1);
3831 return val;
3832}
3833
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003834/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003835static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003836 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003837{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003838 uint8_t *ptr;
3839 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003840 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003841
Avi Kivityac1970f2012-10-03 16:22:53 +02003842 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003843
Blue Swirlcc5bea62012-04-14 14:56:48 +00003844 if (!(memory_region_is_ram(section->mr) ||
3845 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003846 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003847 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003848 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003849#if defined(TARGET_WORDS_BIGENDIAN)
3850 if (endian == DEVICE_LITTLE_ENDIAN) {
3851 val = bswap16(val);
3852 }
3853#else
3854 if (endian == DEVICE_BIG_ENDIAN) {
3855 val = bswap16(val);
3856 }
3857#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003858 } else {
3859 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003860 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003861 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003862 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003863 switch (endian) {
3864 case DEVICE_LITTLE_ENDIAN:
3865 val = lduw_le_p(ptr);
3866 break;
3867 case DEVICE_BIG_ENDIAN:
3868 val = lduw_be_p(ptr);
3869 break;
3870 default:
3871 val = lduw_p(ptr);
3872 break;
3873 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003874 }
3875 return val;
bellardaab33092005-10-30 20:48:42 +00003876}
3877
Avi Kivitya8170e52012-10-23 12:30:10 +02003878uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003879{
3880 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3881}
3882
Avi Kivitya8170e52012-10-23 12:30:10 +02003883uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003884{
3885 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3886}
3887
Avi Kivitya8170e52012-10-23 12:30:10 +02003888uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003889{
3890 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3891}
3892
bellard8df1cd02005-01-28 22:37:22 +00003893/* warning: addr must be aligned. The ram page is not masked as dirty
3894 and the code inside is not invalidated. It is useful if the dirty
3895 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02003896void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003897{
bellard8df1cd02005-01-28 22:37:22 +00003898 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003899 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003900
Avi Kivityac1970f2012-10-03 16:22:53 +02003901 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003902
Avi Kivityf3705d52012-03-08 16:16:34 +02003903 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003904 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003905 if (memory_region_is_ram(section->mr)) {
3906 section = &phys_sections[phys_section_rom];
3907 }
3908 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003909 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003910 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003911 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003912 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003913 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003914 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003915
3916 if (unlikely(in_migration)) {
3917 if (!cpu_physical_memory_is_dirty(addr1)) {
3918 /* invalidate code */
3919 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3920 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003921 cpu_physical_memory_set_dirty_flags(
3922 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003923 }
3924 }
bellard8df1cd02005-01-28 22:37:22 +00003925 }
3926}
3927
Avi Kivitya8170e52012-10-23 12:30:10 +02003928void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003929{
j_mayerbc98a7e2007-04-04 07:55:12 +00003930 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003931 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00003932
Avi Kivityac1970f2012-10-03 16:22:53 +02003933 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003934
Avi Kivityf3705d52012-03-08 16:16:34 +02003935 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003936 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003937 if (memory_region_is_ram(section->mr)) {
3938 section = &phys_sections[phys_section_rom];
3939 }
j_mayerbc98a7e2007-04-04 07:55:12 +00003940#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003941 io_mem_write(section->mr, addr, val >> 32, 4);
3942 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003943#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003944 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3945 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003946#endif
3947 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003948 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003949 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003950 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00003951 stq_p(ptr, val);
3952 }
3953}
3954
bellard8df1cd02005-01-28 22:37:22 +00003955/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003956static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003957 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003958{
bellard8df1cd02005-01-28 22:37:22 +00003959 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003960 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003961
Avi Kivityac1970f2012-10-03 16:22:53 +02003962 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003963
Avi Kivityf3705d52012-03-08 16:16:34 +02003964 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003965 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003966 if (memory_region_is_ram(section->mr)) {
3967 section = &phys_sections[phys_section_rom];
3968 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003969#if defined(TARGET_WORDS_BIGENDIAN)
3970 if (endian == DEVICE_LITTLE_ENDIAN) {
3971 val = bswap32(val);
3972 }
3973#else
3974 if (endian == DEVICE_BIG_ENDIAN) {
3975 val = bswap32(val);
3976 }
3977#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02003978 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003979 } else {
3980 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003981 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003982 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00003983 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003984 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003985 switch (endian) {
3986 case DEVICE_LITTLE_ENDIAN:
3987 stl_le_p(ptr, val);
3988 break;
3989 case DEVICE_BIG_ENDIAN:
3990 stl_be_p(ptr, val);
3991 break;
3992 default:
3993 stl_p(ptr, val);
3994 break;
3995 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003996 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00003997 }
3998}
3999
Avi Kivitya8170e52012-10-23 12:30:10 +02004000void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004001{
4002 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4003}
4004
Avi Kivitya8170e52012-10-23 12:30:10 +02004005void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004006{
4007 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4008}
4009
Avi Kivitya8170e52012-10-23 12:30:10 +02004010void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004011{
4012 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4013}
4014
bellardaab33092005-10-30 20:48:42 +00004015/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02004016void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004017{
4018 uint8_t v = val;
4019 cpu_physical_memory_write(addr, &v, 1);
4020}
4021
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004022/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02004023static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004024 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004025{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004026 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004027 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004028
Avi Kivityac1970f2012-10-03 16:22:53 +02004029 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004030
Avi Kivityf3705d52012-03-08 16:16:34 +02004031 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00004032 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004033 if (memory_region_is_ram(section->mr)) {
4034 section = &phys_sections[phys_section_rom];
4035 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004036#if defined(TARGET_WORDS_BIGENDIAN)
4037 if (endian == DEVICE_LITTLE_ENDIAN) {
4038 val = bswap16(val);
4039 }
4040#else
4041 if (endian == DEVICE_BIG_ENDIAN) {
4042 val = bswap16(val);
4043 }
4044#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004045 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004046 } else {
4047 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004048 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004049 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004050 /* RAM case */
4051 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004052 switch (endian) {
4053 case DEVICE_LITTLE_ENDIAN:
4054 stw_le_p(ptr, val);
4055 break;
4056 case DEVICE_BIG_ENDIAN:
4057 stw_be_p(ptr, val);
4058 break;
4059 default:
4060 stw_p(ptr, val);
4061 break;
4062 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00004063 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004064 }
bellardaab33092005-10-30 20:48:42 +00004065}
4066
Avi Kivitya8170e52012-10-23 12:30:10 +02004067void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004068{
4069 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4070}
4071
Avi Kivitya8170e52012-10-23 12:30:10 +02004072void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004073{
4074 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4075}
4076
Avi Kivitya8170e52012-10-23 12:30:10 +02004077void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004078{
4079 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4080}
4081
bellardaab33092005-10-30 20:48:42 +00004082/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02004083void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004084{
4085 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004086 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004087}
4088
Avi Kivitya8170e52012-10-23 12:30:10 +02004089void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004090{
4091 val = cpu_to_le64(val);
4092 cpu_physical_memory_write(addr, &val, 8);
4093}
4094
Avi Kivitya8170e52012-10-23 12:30:10 +02004095void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004096{
4097 val = cpu_to_be64(val);
4098 cpu_physical_memory_write(addr, &val, 8);
4099}
4100
aliguori5e2972f2009-03-28 17:51:36 +00004101/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004102int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004103 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004104{
4105 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02004106 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004107 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004108
4109 while (len > 0) {
4110 page = addr & TARGET_PAGE_MASK;
4111 phys_addr = cpu_get_phys_page_debug(env, page);
4112 /* if no physical page mapped, return an error */
4113 if (phys_addr == -1)
4114 return -1;
4115 l = (page + TARGET_PAGE_SIZE) - addr;
4116 if (l > len)
4117 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004118 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004119 if (is_write)
4120 cpu_physical_memory_write_rom(phys_addr, buf, l);
4121 else
aliguori5e2972f2009-03-28 17:51:36 +00004122 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004123 len -= l;
4124 buf += l;
4125 addr += l;
4126 }
4127 return 0;
4128}
Paul Brooka68fe892010-03-01 00:08:59 +00004129#endif
bellard13eb76e2004-01-24 15:23:36 +00004130
pbrook2e70f6e2008-06-29 01:03:05 +00004131/* in deterministic execution mode, instructions doing device I/Os
4132 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004133void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004134{
4135 TranslationBlock *tb;
4136 uint32_t n, cflags;
4137 target_ulong pc, cs_base;
4138 uint64_t flags;
4139
Blue Swirl20503962012-04-09 14:20:20 +00004140 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004141 if (!tb) {
Blue Swirl44209fc2012-12-02 17:25:06 +00004142 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004143 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004144 }
4145 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004146 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004147 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004148 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004149 n = n - env->icount_decr.u16.low;
4150 /* Generate a new TB ending on the I/O insn. */
4151 n++;
4152 /* On MIPS and SH, delay slot instructions can only be restarted if
4153 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004154 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004155 branch. */
4156#if defined(TARGET_MIPS)
4157 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4158 env->active_tc.PC -= 4;
4159 env->icount_decr.u16.low++;
4160 env->hflags &= ~MIPS_HFLAG_BMASK;
4161 }
4162#elif defined(TARGET_SH4)
4163 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4164 && n > 1) {
4165 env->pc -= 2;
4166 env->icount_decr.u16.low++;
4167 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4168 }
4169#endif
4170 /* This should never happen. */
Blue Swirl44209fc2012-12-02 17:25:06 +00004171 if (n > CF_COUNT_MASK) {
pbrook2e70f6e2008-06-29 01:03:05 +00004172 cpu_abort(env, "TB too big during recompile");
Blue Swirl44209fc2012-12-02 17:25:06 +00004173 }
pbrook2e70f6e2008-06-29 01:03:05 +00004174
4175 cflags = n | CF_LAST_IO;
4176 pc = tb->pc;
4177 cs_base = tb->cs_base;
4178 flags = tb->flags;
4179 tb_phys_invalidate(tb, -1);
4180 /* FIXME: In theory this could raise an exception. In practice
4181 we have already translated the block once so it's probably ok. */
4182 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004183 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004184 the first in the TB) then we end up generating a whole new TB and
4185 repeating the fault, which is horribly inefficient.
4186 Better would be to execute just this insn uncached, or generate a
4187 second new TB. */
4188 cpu_resume_from_signal(env, NULL);
4189}
4190
Paul Brookb3755a92010-03-12 16:54:58 +00004191#if !defined(CONFIG_USER_ONLY)
4192
Stefan Weil055403b2010-10-22 23:03:32 +02004193void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004194{
4195 int i, target_code_size, max_target_code_size;
4196 int direct_jmp_count, direct_jmp2_count, cross_page;
4197 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004198
bellarde3db7222005-01-26 22:00:47 +00004199 target_code_size = 0;
4200 max_target_code_size = 0;
4201 cross_page = 0;
4202 direct_jmp_count = 0;
4203 direct_jmp2_count = 0;
Blue Swirl44209fc2012-12-02 17:25:06 +00004204 for (i = 0; i < nb_tbs; i++) {
bellarde3db7222005-01-26 22:00:47 +00004205 tb = &tbs[i];
4206 target_code_size += tb->size;
Blue Swirl44209fc2012-12-02 17:25:06 +00004207 if (tb->size > max_target_code_size) {
bellarde3db7222005-01-26 22:00:47 +00004208 max_target_code_size = tb->size;
Blue Swirl44209fc2012-12-02 17:25:06 +00004209 }
4210 if (tb->page_addr[1] != -1) {
bellarde3db7222005-01-26 22:00:47 +00004211 cross_page++;
Blue Swirl44209fc2012-12-02 17:25:06 +00004212 }
bellarde3db7222005-01-26 22:00:47 +00004213 if (tb->tb_next_offset[0] != 0xffff) {
4214 direct_jmp_count++;
4215 if (tb->tb_next_offset[1] != 0xffff) {
4216 direct_jmp2_count++;
4217 }
4218 }
4219 }
4220 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004221 cpu_fprintf(f, "Translation buffer state:\n");
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +10004222 cpu_fprintf(f, "gen code size %td/%zd\n",
bellard26a5f132008-05-28 12:30:31 +00004223 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
Blue Swirl44209fc2012-12-02 17:25:06 +00004224 cpu_fprintf(f, "TB count %d/%d\n",
bellard26a5f132008-05-28 12:30:31 +00004225 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004226 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004227 nb_tbs ? target_code_size / nb_tbs : 0,
4228 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004229 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004230 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
Blue Swirl44209fc2012-12-02 17:25:06 +00004231 target_code_size ? (double) (code_gen_ptr - code_gen_buffer)
4232 / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004233 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4234 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004235 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4236 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004237 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004238 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4239 direct_jmp2_count,
4240 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004241 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004242 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4243 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4244 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004245 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004246}
4247
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004248/*
4249 * A helper function for the _utterly broken_ virtio device model to find out if
4250 * it's running on a big endian machine. Don't do this at home kids!
4251 */
4252bool virtio_is_big_endian(void);
4253bool virtio_is_big_endian(void)
4254{
4255#if defined(TARGET_WORDS_BIGENDIAN)
4256 return true;
4257#else
4258 return false;
4259#endif
4260}
4261
bellard61382a52003-10-27 21:22:23 +00004262#endif
Wen Congyang76f35532012-05-07 12:04:18 +08004263
4264#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02004265bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08004266{
4267 MemoryRegionSection *section;
4268
Avi Kivityac1970f2012-10-03 16:22:53 +02004269 section = phys_page_find(address_space_memory.dispatch,
4270 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08004271
4272 return !(memory_region_is_ram(section->mr) ||
4273 memory_region_is_romd(section->mr));
4274}
4275#endif