blob: e4bf6d7abb94def1598bc324288f5f83faef6735 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Blue Swirl0cac1b62012-04-09 16:50:52 +000060#include "cputlb.h"
61
Avi Kivity67d95c12011-12-15 15:25:22 +020062#define WANT_EXEC_OBSOLETE
63#include "exec-obsolete.h"
64
bellardfd6ce8f2003-05-14 19:00:11 +000065//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000066//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000067//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000068
69/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000070//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weil68409812012-04-04 07:45:21 +020096#elif defined(_WIN32) && !defined(_WIN64)
Stefan Weilf8e2af12009-06-18 23:04:48 +020097#define code_gen_section \
98 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000099#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000107/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000108static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200109static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000110
pbrooke2eef172008-06-08 01:09:01 +0000111#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000112int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000113static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000114
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300116
117static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300118static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300119
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200121static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200122
pbrooke2eef172008-06-08 01:09:01 +0000123#endif
bellard9fa3e852004-01-04 18:06:42 +0000124
Andreas Färber9349b4f2012-03-14 01:38:32 +0100125CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100128DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000129/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000130 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000133
bellard54936002003-05-13 00:25:15 +0000134typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000135 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000136 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
bellard54936002003-05-13 00:25:15 +0000144} PageDesc;
145
Paul Brook41c1b1c2010-03-12 16:54:58 +0000146/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000153#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000154#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000156#endif
bellard54936002003-05-13 00:25:15 +0000157
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000160#define L2_SIZE (1 << L2_BITS)
161
Avi Kivity3eef53d2012-02-10 14:57:31 +0200162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800165/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
Stefan Weilc6d50672012-03-16 20:23:49 +0100179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000182
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000186
pbrooke2eef172008-06-08 01:09:01 +0000187#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200188typedef struct PhysPageEntry PhysPageEntry;
189
Avi Kivity5312bd82012-02-12 18:32:55 +0200190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200196
Avi Kivity4346ae32012-02-10 17:00:01 +0200197struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200201};
202
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
Avi Kivity07f07b32012-02-13 20:45:32 +0200207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800209/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200210 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000212
pbrooke2eef172008-06-08 01:09:01 +0000213static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300214static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000215
Avi Kivity1ec9b902012-01-02 12:47:48 +0200216static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000217#endif
bellard33417e72003-08-10 21:47:01 +0000218
bellard34865132003-10-05 14:28:56 +0000219/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200220#ifdef WIN32
221static const char *logfilename = "qemu.log";
222#else
blueswir1d9b630f2008-10-05 09:57:08 +0000223static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200224#endif
bellard34865132003-10-05 14:28:56 +0000225FILE *logfile;
226int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000227static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000228
bellarde3db7222005-01-26 22:00:47 +0000229/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000230static int tb_flush_count;
231static int tb_phys_invalidate_count;
232
bellard7cb69ca2008-05-10 10:55:51 +0000233#ifdef _WIN32
234static void map_exec(void *addr, long size)
235{
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
239
240}
241#else
242static void map_exec(void *addr, long size)
243{
bellard43694152008-05-29 09:35:57 +0000244 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000245
bellard43694152008-05-29 09:35:57 +0000246 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000247 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000248 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000249
250 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000251 end += page_size - 1;
252 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000253
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
256}
257#endif
258
bellardb346ff42003-06-15 20:05:50 +0000259static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000260{
bellard83fb7ad2004-07-05 21:25:26 +0000261 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000262 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000263#ifdef _WIN32
264 {
265 SYSTEM_INFO system_info;
266
267 GetSystemInfo(&system_info);
268 qemu_real_host_page_size = system_info.dwPageSize;
269 }
270#else
271 qemu_real_host_page_size = getpagesize();
272#endif
bellard83fb7ad2004-07-05 21:25:26 +0000273 if (qemu_host_page_size == 0)
274 qemu_host_page_size = qemu_real_host_page_size;
275 if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000277 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000278
Paul Brook2e9a5712010-05-05 16:32:59 +0100279#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000280 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100281#ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry *freep;
283 int i, cnt;
284
285 freep = kinfo_getvmmap(getpid(), &cnt);
286 if (freep) {
287 mmap_lock();
288 for (i = 0; i < cnt; i++) {
289 unsigned long startaddr, endaddr;
290
291 startaddr = freep[i].kve_start;
292 endaddr = freep[i].kve_end;
293 if (h2g_valid(startaddr)) {
294 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295
296 if (h2g_valid(endaddr)) {
297 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100299 } else {
300#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100303#endif
304 }
305 }
306 }
307 free(freep);
308 mmap_unlock();
309 }
310#else
balrog50a95692007-12-12 01:16:23 +0000311 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000312
pbrook07765902008-05-31 16:33:53 +0000313 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800314
Aurelien Jarnofd436902010-04-10 17:20:36 +0200315 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000316 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800317 mmap_lock();
318
balrog50a95692007-12-12 01:16:23 +0000319 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800320 unsigned long startaddr, endaddr;
321 int n;
322
323 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324
325 if (n == 2 && h2g_valid(startaddr)) {
326 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327
328 if (h2g_valid(endaddr)) {
329 endaddr = h2g(endaddr);
330 } else {
331 endaddr = ~0ul;
332 }
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000334 }
335 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800336
balrog50a95692007-12-12 01:16:23 +0000337 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800338 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000339 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100340#endif
balrog50a95692007-12-12 01:16:23 +0000341 }
342#endif
bellard54936002003-05-13 00:25:15 +0000343}
344
Paul Brook41c1b1c2010-03-12 16:54:58 +0000345static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000346{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000347 PageDesc *pd;
348 void **lp;
349 int i;
350
pbrook17e23772008-06-09 13:47:45 +0000351#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500352 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353# define ALLOC(P, SIZE) \
354 do { \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800357 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000358#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500360 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000361#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800363 /* Level 1. Always allocated. */
364 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365
366 /* Level 2..N-1. */
367 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 void **p = *lp;
369
370 if (p == NULL) {
371 if (!alloc) {
372 return NULL;
373 }
374 ALLOC(p, sizeof(void *) * L2_SIZE);
375 *lp = p;
376 }
377
378 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000379 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800380
381 pd = *lp;
382 if (pd == NULL) {
383 if (!alloc) {
384 return NULL;
385 }
386 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 *lp = pd;
388 }
389
390#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800391
392 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000393}
394
Paul Brook41c1b1c2010-03-12 16:54:58 +0000395static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000396{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800397 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000398}
399
Paul Brook6d9a1302010-02-28 23:55:53 +0000400#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200401
Avi Kivityf7bf5462012-02-13 20:12:05 +0200402static void phys_map_node_reserve(unsigned nodes)
403{
404 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
405 typedef PhysPageEntry Node[L2_SIZE];
406 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
407 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
408 phys_map_nodes_nb + nodes);
409 phys_map_nodes = g_renew(Node, phys_map_nodes,
410 phys_map_nodes_nb_alloc);
411 }
412}
413
414static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200415{
416 unsigned i;
417 uint16_t ret;
418
Avi Kivityf7bf5462012-02-13 20:12:05 +0200419 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200420 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200421 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200422 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200423 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200424 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200425 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200426 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200427}
428
429static void phys_map_nodes_reset(void)
430{
431 phys_map_nodes_nb = 0;
432}
433
Avi Kivityf7bf5462012-02-13 20:12:05 +0200434
Avi Kivity29990972012-02-13 20:21:20 +0200435static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
436 target_phys_addr_t *nb, uint16_t leaf,
437 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200438{
439 PhysPageEntry *p;
440 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200441 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200442
Avi Kivity07f07b32012-02-13 20:45:32 +0200443 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200444 lp->ptr = phys_map_node_alloc();
445 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200446 if (level == 0) {
447 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200448 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200449 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200450 }
451 }
452 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200453 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200454 }
Avi Kivity29990972012-02-13 20:21:20 +0200455 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200456
Avi Kivity29990972012-02-13 20:21:20 +0200457 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200458 if ((*index & (step - 1)) == 0 && *nb >= step) {
459 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200460 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200461 *index += step;
462 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200463 } else {
464 phys_page_set_level(lp, index, nb, leaf, level - 1);
465 }
466 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200467 }
468}
469
Avi Kivity29990972012-02-13 20:21:20 +0200470static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
471 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000472{
Avi Kivity29990972012-02-13 20:21:20 +0200473 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200474 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000475
Avi Kivity29990972012-02-13 20:21:20 +0200476 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000477}
478
Blue Swirl0cac1b62012-04-09 16:50:52 +0000479MemoryRegionSection *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000480{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200481 PhysPageEntry lp = phys_map;
482 PhysPageEntry *p;
483 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200484 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200485
Avi Kivity07f07b32012-02-13 20:45:32 +0200486 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200487 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200488 goto not_found;
489 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200490 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200491 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200492 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200493
Avi Kivityc19e8802012-02-13 20:25:31 +0200494 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200495not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200496 return &phys_sections[s_index];
497}
498
Blue Swirle5548612012-04-21 13:08:33 +0000499bool memory_region_is_unassigned(MemoryRegion *mr)
500{
501 return mr != &io_mem_ram && mr != &io_mem_rom
502 && mr != &io_mem_notdirty && !mr->rom_device
503 && mr != &io_mem_watch;
504}
505
Blue Swirl0cac1b62012-04-09 16:50:52 +0000506target_phys_addr_t section_addr(MemoryRegionSection *section,
507 target_phys_addr_t addr)
Avi Kivityf3705d52012-03-08 16:16:34 +0200508{
509 addr -= section->offset_within_address_space;
510 addr += section->offset_within_region;
511 return addr;
bellard92e873b2004-05-21 14:52:29 +0000512}
513
pbrookc8a706f2008-06-02 16:16:42 +0000514#define mmap_lock() do { } while(0)
515#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000516#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000517
bellard43694152008-05-29 09:35:57 +0000518#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
519
520#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100521/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000522 user mode. It will change when a dedicated libc will be used */
523#define USE_STATIC_CODE_GEN_BUFFER
524#endif
525
526#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200527static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
528 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000529#endif
530
blueswir18fcd3692008-08-17 20:26:25 +0000531static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000532{
bellard43694152008-05-29 09:35:57 +0000533#ifdef USE_STATIC_CODE_GEN_BUFFER
534 code_gen_buffer = static_code_gen_buffer;
535 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
536 map_exec(code_gen_buffer, code_gen_buffer_size);
537#else
bellard26a5f132008-05-28 12:30:31 +0000538 code_gen_buffer_size = tb_size;
539 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000540#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000541 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
542#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100543 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000544 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000545#endif
bellard26a5f132008-05-28 12:30:31 +0000546 }
547 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
548 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
549 /* The code gen buffer location may have constraints depending on
550 the host cpu and OS */
551#if defined(__linux__)
552 {
553 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000554 void *start = NULL;
555
bellard26a5f132008-05-28 12:30:31 +0000556 flags = MAP_PRIVATE | MAP_ANONYMOUS;
557#if defined(__x86_64__)
558 flags |= MAP_32BIT;
559 /* Cannot map more than that */
560 if (code_gen_buffer_size > (800 * 1024 * 1024))
561 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000562#elif defined(__sparc_v9__)
563 // Map the buffer below 2G, so we can use direct calls and branches
564 flags |= MAP_FIXED;
565 start = (void *) 0x60000000UL;
566 if (code_gen_buffer_size > (512 * 1024 * 1024))
567 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000568#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100569 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000570 if (code_gen_buffer_size > 16 * 1024 * 1024)
571 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700572#elif defined(__s390x__)
573 /* Map the buffer so that we can use direct calls and branches. */
574 /* We have a +- 4GB range on the branches; leave some slop. */
575 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
576 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
577 }
578 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000579#endif
blueswir1141ac462008-07-26 15:05:57 +0000580 code_gen_buffer = mmap(start, code_gen_buffer_size,
581 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000582 flags, -1, 0);
583 if (code_gen_buffer == MAP_FAILED) {
584 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
585 exit(1);
586 }
587 }
Bradcbb608a2010-12-20 21:25:40 -0500588#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000589 || defined(__DragonFly__) || defined(__OpenBSD__) \
590 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000591 {
592 int flags;
593 void *addr = NULL;
594 flags = MAP_PRIVATE | MAP_ANONYMOUS;
595#if defined(__x86_64__)
596 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
597 * 0x40000000 is free */
598 flags |= MAP_FIXED;
599 addr = (void *)0x40000000;
600 /* Cannot map more than that */
601 if (code_gen_buffer_size > (800 * 1024 * 1024))
602 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000603#elif defined(__sparc_v9__)
604 // Map the buffer below 2G, so we can use direct calls and branches
605 flags |= MAP_FIXED;
606 addr = (void *) 0x60000000UL;
607 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
608 code_gen_buffer_size = (512 * 1024 * 1024);
609 }
aliguori06e67a82008-09-27 15:32:41 +0000610#endif
611 code_gen_buffer = mmap(addr, code_gen_buffer_size,
612 PROT_WRITE | PROT_READ | PROT_EXEC,
613 flags, -1, 0);
614 if (code_gen_buffer == MAP_FAILED) {
615 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
616 exit(1);
617 }
618 }
bellard26a5f132008-05-28 12:30:31 +0000619#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500620 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000621 map_exec(code_gen_buffer, code_gen_buffer_size);
622#endif
bellard43694152008-05-29 09:35:57 +0000623#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000624 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100625 code_gen_buffer_max_size = code_gen_buffer_size -
626 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000627 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500628 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000629}
630
631/* Must be called before using the QEMU cpus. 'tb_size' is the size
632 (in bytes) allocated to the translation buffer. Zero means default
633 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200634void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000635{
bellard26a5f132008-05-28 12:30:31 +0000636 cpu_gen_init();
637 code_gen_alloc(tb_size);
638 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700639 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000640 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700641#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
642 /* There's no guest base to take into account, so go ahead and
643 initialize the prologue now. */
644 tcg_prologue_init(&tcg_ctx);
645#endif
bellard26a5f132008-05-28 12:30:31 +0000646}
647
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200648bool tcg_enabled(void)
649{
650 return code_gen_buffer != NULL;
651}
652
653void cpu_exec_init_all(void)
654{
655#if !defined(CONFIG_USER_ONLY)
656 memory_map_init();
657 io_mem_init();
658#endif
659}
660
pbrook9656f322008-07-01 20:01:19 +0000661#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
662
Juan Quintelae59fb372009-09-29 22:48:21 +0200663static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200664{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100665 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200666
aurel323098dba2009-03-07 21:28:24 +0000667 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
668 version_id is increased. */
669 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000670 tlb_flush(env, 1);
671
672 return 0;
673}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200674
675static const VMStateDescription vmstate_cpu_common = {
676 .name = "cpu_common",
677 .version_id = 1,
678 .minimum_version_id = 1,
679 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200680 .post_load = cpu_common_post_load,
681 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100682 VMSTATE_UINT32(halted, CPUArchState),
683 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200684 VMSTATE_END_OF_LIST()
685 }
686};
pbrook9656f322008-07-01 20:01:19 +0000687#endif
688
Andreas Färber9349b4f2012-03-14 01:38:32 +0100689CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400690{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100691 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400692
693 while (env) {
694 if (env->cpu_index == cpu)
695 break;
696 env = env->next_cpu;
697 }
698
699 return env;
700}
701
Andreas Färber9349b4f2012-03-14 01:38:32 +0100702void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000703{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100704 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000705 int cpu_index;
706
pbrookc2764712009-03-07 15:24:59 +0000707#if defined(CONFIG_USER_ONLY)
708 cpu_list_lock();
709#endif
bellard6a00d602005-11-21 23:25:50 +0000710 env->next_cpu = NULL;
711 penv = &first_cpu;
712 cpu_index = 0;
713 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700714 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000715 cpu_index++;
716 }
717 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000718 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000719 QTAILQ_INIT(&env->breakpoints);
720 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100721#ifndef CONFIG_USER_ONLY
722 env->thread_id = qemu_get_thread_id();
723#endif
bellard6a00d602005-11-21 23:25:50 +0000724 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000725#if defined(CONFIG_USER_ONLY)
726 cpu_list_unlock();
727#endif
pbrookb3c77242008-06-30 16:31:04 +0000728#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600729 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
730 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000731 cpu_save, cpu_load, env);
732#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000733}
734
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100735/* Allocate a new translation block. Flush the translation buffer if
736 too many translation blocks or too much generated code. */
737static TranslationBlock *tb_alloc(target_ulong pc)
738{
739 TranslationBlock *tb;
740
741 if (nb_tbs >= code_gen_max_blocks ||
742 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
743 return NULL;
744 tb = &tbs[nb_tbs++];
745 tb->pc = pc;
746 tb->cflags = 0;
747 return tb;
748}
749
750void tb_free(TranslationBlock *tb)
751{
752 /* In practice this is mostly used for single use temporary TB
753 Ignore the hard cases and just back up if this TB happens to
754 be the last one generated. */
755 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
756 code_gen_ptr = tb->tc_ptr;
757 nb_tbs--;
758 }
759}
760
bellard9fa3e852004-01-04 18:06:42 +0000761static inline void invalidate_page_bitmap(PageDesc *p)
762{
763 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500764 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000765 p->code_bitmap = NULL;
766 }
767 p->code_write_count = 0;
768}
769
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800770/* Set to NULL all the 'first_tb' fields in all PageDescs. */
771
772static void page_flush_tb_1 (int level, void **lp)
773{
774 int i;
775
776 if (*lp == NULL) {
777 return;
778 }
779 if (level == 0) {
780 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000781 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800782 pd[i].first_tb = NULL;
783 invalidate_page_bitmap(pd + i);
784 }
785 } else {
786 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000787 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800788 page_flush_tb_1 (level - 1, pp + i);
789 }
790 }
791}
792
bellardfd6ce8f2003-05-14 19:00:11 +0000793static void page_flush_tb(void)
794{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800795 int i;
796 for (i = 0; i < V_L1_SIZE; i++) {
797 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000798 }
799}
800
801/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000802/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100803void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000804{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100805 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000806#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000807 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
808 (unsigned long)(code_gen_ptr - code_gen_buffer),
809 nb_tbs, nb_tbs > 0 ?
810 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000811#endif
bellard26a5f132008-05-28 12:30:31 +0000812 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000813 cpu_abort(env1, "Internal error: code buffer overflow\n");
814
bellardfd6ce8f2003-05-14 19:00:11 +0000815 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000816
bellard6a00d602005-11-21 23:25:50 +0000817 for(env = first_cpu; env != NULL; env = env->next_cpu) {
818 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
819 }
bellard9fa3e852004-01-04 18:06:42 +0000820
bellard8a8a6082004-10-03 13:36:49 +0000821 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000822 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000823
bellardfd6ce8f2003-05-14 19:00:11 +0000824 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000825 /* XXX: flush processor icache at this point if cache flush is
826 expensive */
bellarde3db7222005-01-26 22:00:47 +0000827 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000828}
829
830#ifdef DEBUG_TB_CHECK
831
j_mayerbc98a7e2007-04-04 07:55:12 +0000832static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000833{
834 TranslationBlock *tb;
835 int i;
836 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000837 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
838 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000839 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
840 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000841 printf("ERROR invalidate: address=" TARGET_FMT_lx
842 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000843 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000844 }
845 }
846 }
847}
848
849/* verify that all the pages have correct rights for code */
850static void tb_page_check(void)
851{
852 TranslationBlock *tb;
853 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000854
pbrook99773bd2006-04-16 15:14:59 +0000855 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
856 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000857 flags1 = page_get_flags(tb->pc);
858 flags2 = page_get_flags(tb->pc + tb->size - 1);
859 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
860 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000861 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000862 }
863 }
864 }
865}
866
867#endif
868
869/* invalidate one TB */
870static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
871 int next_offset)
872{
873 TranslationBlock *tb1;
874 for(;;) {
875 tb1 = *ptb;
876 if (tb1 == tb) {
877 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
878 break;
879 }
880 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
881 }
882}
883
bellard9fa3e852004-01-04 18:06:42 +0000884static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
885{
886 TranslationBlock *tb1;
887 unsigned int n1;
888
889 for(;;) {
890 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200891 n1 = (uintptr_t)tb1 & 3;
892 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000893 if (tb1 == tb) {
894 *ptb = tb1->page_next[n1];
895 break;
896 }
897 ptb = &tb1->page_next[n1];
898 }
899}
900
bellardd4e81642003-05-25 16:46:15 +0000901static inline void tb_jmp_remove(TranslationBlock *tb, int n)
902{
903 TranslationBlock *tb1, **ptb;
904 unsigned int n1;
905
906 ptb = &tb->jmp_next[n];
907 tb1 = *ptb;
908 if (tb1) {
909 /* find tb(n) in circular list */
910 for(;;) {
911 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200912 n1 = (uintptr_t)tb1 & 3;
913 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardd4e81642003-05-25 16:46:15 +0000914 if (n1 == n && tb1 == tb)
915 break;
916 if (n1 == 2) {
917 ptb = &tb1->jmp_first;
918 } else {
919 ptb = &tb1->jmp_next[n1];
920 }
921 }
922 /* now we can suppress tb(n) from the list */
923 *ptb = tb->jmp_next[n];
924
925 tb->jmp_next[n] = NULL;
926 }
927}
928
929/* reset the jump entry 'n' of a TB so that it is not chained to
930 another TB */
931static inline void tb_reset_jump(TranslationBlock *tb, int n)
932{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200933 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000934}
935
Paul Brook41c1b1c2010-03-12 16:54:58 +0000936void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000937{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100938 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000939 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000940 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000941 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000942 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000943
bellard9fa3e852004-01-04 18:06:42 +0000944 /* remove the TB from the hash list */
945 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
946 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000947 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000948 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000949
bellard9fa3e852004-01-04 18:06:42 +0000950 /* remove the TB from the page list */
951 if (tb->page_addr[0] != page_addr) {
952 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
953 tb_page_remove(&p->first_tb, tb);
954 invalidate_page_bitmap(p);
955 }
956 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
957 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
958 tb_page_remove(&p->first_tb, tb);
959 invalidate_page_bitmap(p);
960 }
961
bellard8a40a182005-11-20 10:35:40 +0000962 tb_invalidated_flag = 1;
963
964 /* remove the TB from the hash list */
965 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000966 for(env = first_cpu; env != NULL; env = env->next_cpu) {
967 if (env->tb_jmp_cache[h] == tb)
968 env->tb_jmp_cache[h] = NULL;
969 }
bellard8a40a182005-11-20 10:35:40 +0000970
971 /* suppress this TB from the two jump lists */
972 tb_jmp_remove(tb, 0);
973 tb_jmp_remove(tb, 1);
974
975 /* suppress any remaining jumps to this TB */
976 tb1 = tb->jmp_first;
977 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200978 n1 = (uintptr_t)tb1 & 3;
bellard8a40a182005-11-20 10:35:40 +0000979 if (n1 == 2)
980 break;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200981 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000982 tb2 = tb1->jmp_next[n1];
983 tb_reset_jump(tb1, n1);
984 tb1->jmp_next[n1] = NULL;
985 tb1 = tb2;
986 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200987 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +0000988
bellarde3db7222005-01-26 22:00:47 +0000989 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000990}
991
992static inline void set_bits(uint8_t *tab, int start, int len)
993{
994 int end, mask, end1;
995
996 end = start + len;
997 tab += start >> 3;
998 mask = 0xff << (start & 7);
999 if ((start & ~7) == (end & ~7)) {
1000 if (start < end) {
1001 mask &= ~(0xff << (end & 7));
1002 *tab |= mask;
1003 }
1004 } else {
1005 *tab++ |= mask;
1006 start = (start + 8) & ~7;
1007 end1 = end & ~7;
1008 while (start < end1) {
1009 *tab++ = 0xff;
1010 start += 8;
1011 }
1012 if (start < end) {
1013 mask = ~(0xff << (end & 7));
1014 *tab |= mask;
1015 }
1016 }
1017}
1018
1019static void build_page_bitmap(PageDesc *p)
1020{
1021 int n, tb_start, tb_end;
1022 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001023
Anthony Liguori7267c092011-08-20 22:09:37 -05001024 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001025
1026 tb = p->first_tb;
1027 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001028 n = (uintptr_t)tb & 3;
1029 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001030 /* NOTE: this is subtle as a TB may span two physical pages */
1031 if (n == 0) {
1032 /* NOTE: tb_end may be after the end of the page, but
1033 it is not a problem */
1034 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1035 tb_end = tb_start + tb->size;
1036 if (tb_end > TARGET_PAGE_SIZE)
1037 tb_end = TARGET_PAGE_SIZE;
1038 } else {
1039 tb_start = 0;
1040 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1041 }
1042 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1043 tb = tb->page_next[n];
1044 }
1045}
1046
Andreas Färber9349b4f2012-03-14 01:38:32 +01001047TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001048 target_ulong pc, target_ulong cs_base,
1049 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001050{
1051 TranslationBlock *tb;
1052 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001053 tb_page_addr_t phys_pc, phys_page2;
1054 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001055 int code_gen_size;
1056
Paul Brook41c1b1c2010-03-12 16:54:58 +00001057 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001058 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001059 if (!tb) {
1060 /* flush must be done */
1061 tb_flush(env);
1062 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001063 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001064 /* Don't forget to invalidate previous TB info. */
1065 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001066 }
1067 tc_ptr = code_gen_ptr;
1068 tb->tc_ptr = tc_ptr;
1069 tb->cs_base = cs_base;
1070 tb->flags = flags;
1071 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001072 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001073 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1074 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001075
bellardd720b932004-04-25 17:57:43 +00001076 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001077 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001078 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001079 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001080 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001081 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001082 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001083 return tb;
bellardd720b932004-04-25 17:57:43 +00001084}
ths3b46e622007-09-17 08:09:54 +00001085
bellard9fa3e852004-01-04 18:06:42 +00001086/* invalidate all TBs which intersect with the target physical page
1087 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001088 the same physical page. 'is_cpu_write_access' should be true if called
1089 from a real cpu write access: the virtual CPU will exit the current
1090 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001091void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001092 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001093{
aliguori6b917542008-11-18 19:46:41 +00001094 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001095 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001096 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001097 PageDesc *p;
1098 int n;
1099#ifdef TARGET_HAS_PRECISE_SMC
1100 int current_tb_not_found = is_cpu_write_access;
1101 TranslationBlock *current_tb = NULL;
1102 int current_tb_modified = 0;
1103 target_ulong current_pc = 0;
1104 target_ulong current_cs_base = 0;
1105 int current_flags = 0;
1106#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001107
1108 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001109 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001110 return;
ths5fafdf22007-09-16 21:08:06 +00001111 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001112 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1113 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001114 /* build code bitmap */
1115 build_page_bitmap(p);
1116 }
1117
1118 /* we remove all the TBs in the range [start, end[ */
1119 /* XXX: see if in some cases it could be faster to invalidate all the code */
1120 tb = p->first_tb;
1121 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001122 n = (uintptr_t)tb & 3;
1123 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001124 tb_next = tb->page_next[n];
1125 /* NOTE: this is subtle as a TB may span two physical pages */
1126 if (n == 0) {
1127 /* NOTE: tb_end may be after the end of the page, but
1128 it is not a problem */
1129 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1130 tb_end = tb_start + tb->size;
1131 } else {
1132 tb_start = tb->page_addr[1];
1133 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1134 }
1135 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001136#ifdef TARGET_HAS_PRECISE_SMC
1137 if (current_tb_not_found) {
1138 current_tb_not_found = 0;
1139 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001140 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001141 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001142 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001143 }
1144 }
1145 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001146 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001147 /* If we are modifying the current TB, we must stop
1148 its execution. We could be more precise by checking
1149 that the modification is after the current PC, but it
1150 would require a specialized function to partially
1151 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001152
bellardd720b932004-04-25 17:57:43 +00001153 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001154 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001155 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1156 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001157 }
1158#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001159 /* we need to do that to handle the case where a signal
1160 occurs while doing tb_phys_invalidate() */
1161 saved_tb = NULL;
1162 if (env) {
1163 saved_tb = env->current_tb;
1164 env->current_tb = NULL;
1165 }
bellard9fa3e852004-01-04 18:06:42 +00001166 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001167 if (env) {
1168 env->current_tb = saved_tb;
1169 if (env->interrupt_request && env->current_tb)
1170 cpu_interrupt(env, env->interrupt_request);
1171 }
bellard9fa3e852004-01-04 18:06:42 +00001172 }
1173 tb = tb_next;
1174 }
1175#if !defined(CONFIG_USER_ONLY)
1176 /* if no code remaining, no need to continue to use slow writes */
1177 if (!p->first_tb) {
1178 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001179 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001180 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001181 }
1182 }
1183#endif
1184#ifdef TARGET_HAS_PRECISE_SMC
1185 if (current_tb_modified) {
1186 /* we generate a block containing just the instruction
1187 modifying the memory. It will ensure that it cannot modify
1188 itself */
bellardea1c1802004-06-14 18:56:36 +00001189 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001190 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001191 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001192 }
1193#endif
1194}
1195
1196/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001197static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001198{
1199 PageDesc *p;
1200 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001201#if 0
bellarda4193c82004-06-03 14:01:43 +00001202 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001203 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1204 cpu_single_env->mem_io_vaddr, len,
1205 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001206 cpu_single_env->eip +
1207 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001208 }
1209#endif
bellard9fa3e852004-01-04 18:06:42 +00001210 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001211 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001212 return;
1213 if (p->code_bitmap) {
1214 offset = start & ~TARGET_PAGE_MASK;
1215 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1216 if (b & ((1 << len) - 1))
1217 goto do_invalidate;
1218 } else {
1219 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001220 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001221 }
1222}
1223
bellard9fa3e852004-01-04 18:06:42 +00001224#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001225static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001226 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001227{
aliguori6b917542008-11-18 19:46:41 +00001228 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001229 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001230 int n;
bellardd720b932004-04-25 17:57:43 +00001231#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001232 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001233 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001234 int current_tb_modified = 0;
1235 target_ulong current_pc = 0;
1236 target_ulong current_cs_base = 0;
1237 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001238#endif
bellard9fa3e852004-01-04 18:06:42 +00001239
1240 addr &= TARGET_PAGE_MASK;
1241 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001242 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001243 return;
1244 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001245#ifdef TARGET_HAS_PRECISE_SMC
1246 if (tb && pc != 0) {
1247 current_tb = tb_find_pc(pc);
1248 }
1249#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001250 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001251 n = (uintptr_t)tb & 3;
1252 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001253#ifdef TARGET_HAS_PRECISE_SMC
1254 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001255 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001256 /* If we are modifying the current TB, we must stop
1257 its execution. We could be more precise by checking
1258 that the modification is after the current PC, but it
1259 would require a specialized function to partially
1260 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001261
bellardd720b932004-04-25 17:57:43 +00001262 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001263 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001264 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1265 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001266 }
1267#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001268 tb_phys_invalidate(tb, addr);
1269 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001270 }
1271 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001272#ifdef TARGET_HAS_PRECISE_SMC
1273 if (current_tb_modified) {
1274 /* we generate a block containing just the instruction
1275 modifying the memory. It will ensure that it cannot modify
1276 itself */
bellardea1c1802004-06-14 18:56:36 +00001277 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001278 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001279 cpu_resume_from_signal(env, puc);
1280 }
1281#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001282}
bellard9fa3e852004-01-04 18:06:42 +00001283#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001284
1285/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001286static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001287 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001288{
1289 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001290#ifndef CONFIG_USER_ONLY
1291 bool page_already_protected;
1292#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001293
bellard9fa3e852004-01-04 18:06:42 +00001294 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001295 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001296 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001297#ifndef CONFIG_USER_ONLY
1298 page_already_protected = p->first_tb != NULL;
1299#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001300 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001301 invalidate_page_bitmap(p);
1302
bellard107db442004-06-22 18:48:46 +00001303#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001304
bellard9fa3e852004-01-04 18:06:42 +00001305#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001306 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001307 target_ulong addr;
1308 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001309 int prot;
1310
bellardfd6ce8f2003-05-14 19:00:11 +00001311 /* force the host page as non writable (writes will have a
1312 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001313 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001314 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001315 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1316 addr += TARGET_PAGE_SIZE) {
1317
1318 p2 = page_find (addr >> TARGET_PAGE_BITS);
1319 if (!p2)
1320 continue;
1321 prot |= p2->flags;
1322 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001323 }
ths5fafdf22007-09-16 21:08:06 +00001324 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001325 (prot & PAGE_BITS) & ~PAGE_WRITE);
1326#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001327 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001328 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001329#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001330 }
bellard9fa3e852004-01-04 18:06:42 +00001331#else
1332 /* if some code is already present, then the pages are already
1333 protected. So we handle the case where only the first TB is
1334 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001335 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001336 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001337 }
1338#endif
bellardd720b932004-04-25 17:57:43 +00001339
1340#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001341}
1342
bellard9fa3e852004-01-04 18:06:42 +00001343/* add a new TB and link it to the physical page tables. phys_page2 is
1344 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001345void tb_link_page(TranslationBlock *tb,
1346 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001347{
bellard9fa3e852004-01-04 18:06:42 +00001348 unsigned int h;
1349 TranslationBlock **ptb;
1350
pbrookc8a706f2008-06-02 16:16:42 +00001351 /* Grab the mmap lock to stop another thread invalidating this TB
1352 before we are done. */
1353 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001354 /* add in the physical hash table */
1355 h = tb_phys_hash_func(phys_pc);
1356 ptb = &tb_phys_hash[h];
1357 tb->phys_hash_next = *ptb;
1358 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001359
1360 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001361 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1362 if (phys_page2 != -1)
1363 tb_alloc_page(tb, 1, phys_page2);
1364 else
1365 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001366
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001367 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001368 tb->jmp_next[0] = NULL;
1369 tb->jmp_next[1] = NULL;
1370
1371 /* init original jump addresses */
1372 if (tb->tb_next_offset[0] != 0xffff)
1373 tb_reset_jump(tb, 0);
1374 if (tb->tb_next_offset[1] != 0xffff)
1375 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001376
1377#ifdef DEBUG_TB_CHECK
1378 tb_page_check();
1379#endif
pbrookc8a706f2008-06-02 16:16:42 +00001380 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001381}
1382
bellarda513fe12003-05-27 23:29:48 +00001383/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1384 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001385TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001386{
1387 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001388 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001389 TranslationBlock *tb;
1390
1391 if (nb_tbs <= 0)
1392 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001393 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1394 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001395 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001396 }
bellarda513fe12003-05-27 23:29:48 +00001397 /* binary search (cf Knuth) */
1398 m_min = 0;
1399 m_max = nb_tbs - 1;
1400 while (m_min <= m_max) {
1401 m = (m_min + m_max) >> 1;
1402 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001403 v = (uintptr_t)tb->tc_ptr;
bellarda513fe12003-05-27 23:29:48 +00001404 if (v == tc_ptr)
1405 return tb;
1406 else if (tc_ptr < v) {
1407 m_max = m - 1;
1408 } else {
1409 m_min = m + 1;
1410 }
ths5fafdf22007-09-16 21:08:06 +00001411 }
bellarda513fe12003-05-27 23:29:48 +00001412 return &tbs[m_max];
1413}
bellard75012672003-06-21 13:11:07 +00001414
bellardea041c02003-06-25 16:16:50 +00001415static void tb_reset_jump_recursive(TranslationBlock *tb);
1416
1417static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1418{
1419 TranslationBlock *tb1, *tb_next, **ptb;
1420 unsigned int n1;
1421
1422 tb1 = tb->jmp_next[n];
1423 if (tb1 != NULL) {
1424 /* find head of list */
1425 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001426 n1 = (uintptr_t)tb1 & 3;
1427 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001428 if (n1 == 2)
1429 break;
1430 tb1 = tb1->jmp_next[n1];
1431 }
1432 /* we are now sure now that tb jumps to tb1 */
1433 tb_next = tb1;
1434
1435 /* remove tb from the jmp_first list */
1436 ptb = &tb_next->jmp_first;
1437 for(;;) {
1438 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001439 n1 = (uintptr_t)tb1 & 3;
1440 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001441 if (n1 == n && tb1 == tb)
1442 break;
1443 ptb = &tb1->jmp_next[n1];
1444 }
1445 *ptb = tb->jmp_next[n];
1446 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001447
bellardea041c02003-06-25 16:16:50 +00001448 /* suppress the jump to next tb in generated code */
1449 tb_reset_jump(tb, n);
1450
bellard01243112004-01-04 15:48:17 +00001451 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001452 tb_reset_jump_recursive(tb_next);
1453 }
1454}
1455
1456static void tb_reset_jump_recursive(TranslationBlock *tb)
1457{
1458 tb_reset_jump_recursive2(tb, 0);
1459 tb_reset_jump_recursive2(tb, 1);
1460}
1461
bellard1fddef42005-04-17 19:16:13 +00001462#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001463#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001464static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001465{
1466 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1467}
1468#else
Max Filippov1e7855a2012-04-10 02:48:17 +04001469void tb_invalidate_phys_addr(target_phys_addr_t addr)
bellardd720b932004-04-25 17:57:43 +00001470{
Anthony Liguoric227f092009-10-01 16:12:16 -05001471 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001472 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001473
Avi Kivity06ef3522012-02-13 16:11:22 +02001474 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001475 if (!(memory_region_is_ram(section->mr)
1476 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001477 return;
1478 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001479 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1480 + section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001481 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001482}
Max Filippov1e7855a2012-04-10 02:48:17 +04001483
1484static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1485{
1486 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc));
1487}
bellardc27004e2005-01-03 23:35:10 +00001488#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001489#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001490
Paul Brookc527ee82010-03-01 03:31:14 +00001491#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001492void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001493
1494{
1495}
1496
Andreas Färber9349b4f2012-03-14 01:38:32 +01001497int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001498 int flags, CPUWatchpoint **watchpoint)
1499{
1500 return -ENOSYS;
1501}
1502#else
pbrook6658ffb2007-03-16 23:58:11 +00001503/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001504int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001505 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001506{
aliguorib4051332008-11-18 20:14:20 +00001507 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001508 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001509
aliguorib4051332008-11-18 20:14:20 +00001510 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001511 if ((len & (len - 1)) || (addr & ~len_mask) ||
1512 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001513 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1514 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1515 return -EINVAL;
1516 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001517 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001518
aliguoria1d1bb32008-11-18 20:07:32 +00001519 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001520 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001521 wp->flags = flags;
1522
aliguori2dc9f412008-11-18 20:56:59 +00001523 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001524 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001525 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001526 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001527 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001528
pbrook6658ffb2007-03-16 23:58:11 +00001529 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001530
1531 if (watchpoint)
1532 *watchpoint = wp;
1533 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001534}
1535
aliguoria1d1bb32008-11-18 20:07:32 +00001536/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001537int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001538 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001539{
aliguorib4051332008-11-18 20:14:20 +00001540 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001541 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001542
Blue Swirl72cf2d42009-09-12 07:36:22 +00001543 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001544 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001545 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001546 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001547 return 0;
1548 }
1549 }
aliguoria1d1bb32008-11-18 20:07:32 +00001550 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001551}
1552
aliguoria1d1bb32008-11-18 20:07:32 +00001553/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001554void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001555{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001556 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001557
aliguoria1d1bb32008-11-18 20:07:32 +00001558 tlb_flush_page(env, watchpoint->vaddr);
1559
Anthony Liguori7267c092011-08-20 22:09:37 -05001560 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001561}
1562
aliguoria1d1bb32008-11-18 20:07:32 +00001563/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001564void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001565{
aliguoric0ce9982008-11-25 22:13:57 +00001566 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001567
Blue Swirl72cf2d42009-09-12 07:36:22 +00001568 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001569 if (wp->flags & mask)
1570 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001571 }
aliguoria1d1bb32008-11-18 20:07:32 +00001572}
Paul Brookc527ee82010-03-01 03:31:14 +00001573#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001574
1575/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001576int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001577 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001578{
bellard1fddef42005-04-17 19:16:13 +00001579#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001580 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001581
Anthony Liguori7267c092011-08-20 22:09:37 -05001582 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001583
1584 bp->pc = pc;
1585 bp->flags = flags;
1586
aliguori2dc9f412008-11-18 20:56:59 +00001587 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001588 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001589 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001590 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001591 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001592
1593 breakpoint_invalidate(env, pc);
1594
1595 if (breakpoint)
1596 *breakpoint = bp;
1597 return 0;
1598#else
1599 return -ENOSYS;
1600#endif
1601}
1602
1603/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001604int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001605{
1606#if defined(TARGET_HAS_ICE)
1607 CPUBreakpoint *bp;
1608
Blue Swirl72cf2d42009-09-12 07:36:22 +00001609 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001610 if (bp->pc == pc && bp->flags == flags) {
1611 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001612 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001613 }
bellard4c3a88a2003-07-26 12:06:08 +00001614 }
aliguoria1d1bb32008-11-18 20:07:32 +00001615 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001616#else
aliguoria1d1bb32008-11-18 20:07:32 +00001617 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001618#endif
1619}
1620
aliguoria1d1bb32008-11-18 20:07:32 +00001621/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001622void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001623{
bellard1fddef42005-04-17 19:16:13 +00001624#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001625 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001626
aliguoria1d1bb32008-11-18 20:07:32 +00001627 breakpoint_invalidate(env, breakpoint->pc);
1628
Anthony Liguori7267c092011-08-20 22:09:37 -05001629 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001630#endif
1631}
1632
1633/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001634void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001635{
1636#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001637 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001638
Blue Swirl72cf2d42009-09-12 07:36:22 +00001639 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001640 if (bp->flags & mask)
1641 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001642 }
bellard4c3a88a2003-07-26 12:06:08 +00001643#endif
1644}
1645
bellardc33a3462003-07-29 20:50:33 +00001646/* enable or disable single step mode. EXCP_DEBUG is returned by the
1647 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001648void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001649{
bellard1fddef42005-04-17 19:16:13 +00001650#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001651 if (env->singlestep_enabled != enabled) {
1652 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001653 if (kvm_enabled())
1654 kvm_update_guest_debug(env, 0);
1655 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001656 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001657 /* XXX: only flush what is necessary */
1658 tb_flush(env);
1659 }
bellardc33a3462003-07-29 20:50:33 +00001660 }
1661#endif
1662}
1663
bellard34865132003-10-05 14:28:56 +00001664/* enable or disable low levels log */
1665void cpu_set_log(int log_flags)
1666{
1667 loglevel = log_flags;
1668 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001669 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001670 if (!logfile) {
1671 perror(logfilename);
1672 _exit(1);
1673 }
bellard9fa3e852004-01-04 18:06:42 +00001674#if !defined(CONFIG_SOFTMMU)
1675 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1676 {
blueswir1b55266b2008-09-20 08:07:15 +00001677 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001678 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1679 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001680#elif defined(_WIN32)
1681 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1682 setvbuf(logfile, NULL, _IONBF, 0);
1683#else
bellard34865132003-10-05 14:28:56 +00001684 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001685#endif
pbrooke735b912007-06-30 13:53:24 +00001686 log_append = 1;
1687 }
1688 if (!loglevel && logfile) {
1689 fclose(logfile);
1690 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001691 }
1692}
1693
1694void cpu_set_log_filename(const char *filename)
1695{
1696 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001697 if (logfile) {
1698 fclose(logfile);
1699 logfile = NULL;
1700 }
1701 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001702}
bellardc33a3462003-07-29 20:50:33 +00001703
Andreas Färber9349b4f2012-03-14 01:38:32 +01001704static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001705{
pbrookd5975362008-06-07 20:50:51 +00001706 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1707 problem and hope the cpu will stop of its own accord. For userspace
1708 emulation this often isn't actually as bad as it sounds. Often
1709 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001710 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001711 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001712
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001713 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001714 tb = env->current_tb;
1715 /* if the cpu is currently executing code, we must unlink it and
1716 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001717 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001718 env->current_tb = NULL;
1719 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001720 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001721 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001722}
1723
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001724#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001725/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001726static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001727{
1728 int old_mask;
1729
1730 old_mask = env->interrupt_request;
1731 env->interrupt_request |= mask;
1732
aliguori8edac962009-04-24 18:03:45 +00001733 /*
1734 * If called from iothread context, wake the target cpu in
1735 * case its halted.
1736 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001737 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001738 qemu_cpu_kick(env);
1739 return;
1740 }
aliguori8edac962009-04-24 18:03:45 +00001741
pbrook2e70f6e2008-06-29 01:03:05 +00001742 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001743 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001744 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001745 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001746 cpu_abort(env, "Raised interrupt while not in I/O function");
1747 }
pbrook2e70f6e2008-06-29 01:03:05 +00001748 } else {
aurel323098dba2009-03-07 21:28:24 +00001749 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001750 }
1751}
1752
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001753CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1754
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001755#else /* CONFIG_USER_ONLY */
1756
Andreas Färber9349b4f2012-03-14 01:38:32 +01001757void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001758{
1759 env->interrupt_request |= mask;
1760 cpu_unlink_tb(env);
1761}
1762#endif /* CONFIG_USER_ONLY */
1763
Andreas Färber9349b4f2012-03-14 01:38:32 +01001764void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001765{
1766 env->interrupt_request &= ~mask;
1767}
1768
Andreas Färber9349b4f2012-03-14 01:38:32 +01001769void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001770{
1771 env->exit_request = 1;
1772 cpu_unlink_tb(env);
1773}
1774
blueswir1c7cd6a32008-10-02 18:27:46 +00001775const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001776 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001777 "show generated host assembly code for each compiled TB" },
1778 { CPU_LOG_TB_IN_ASM, "in_asm",
1779 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001780 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001781 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001782 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001783 "show micro ops "
1784#ifdef TARGET_I386
1785 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001786#endif
blueswir1e01a1152008-03-14 17:37:11 +00001787 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001788 { CPU_LOG_INT, "int",
1789 "show interrupts/exceptions in short format" },
1790 { CPU_LOG_EXEC, "exec",
1791 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001792 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001793 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001794#ifdef TARGET_I386
1795 { CPU_LOG_PCALL, "pcall",
1796 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001797 { CPU_LOG_RESET, "cpu_reset",
1798 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001799#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001800#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001801 { CPU_LOG_IOPORT, "ioport",
1802 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001803#endif
bellardf193c792004-03-21 17:06:25 +00001804 { 0, NULL, NULL },
1805};
1806
1807static int cmp1(const char *s1, int n, const char *s2)
1808{
1809 if (strlen(s2) != n)
1810 return 0;
1811 return memcmp(s1, s2, n) == 0;
1812}
ths3b46e622007-09-17 08:09:54 +00001813
bellardf193c792004-03-21 17:06:25 +00001814/* takes a comma separated list of log masks. Return 0 if error. */
1815int cpu_str_to_log_mask(const char *str)
1816{
blueswir1c7cd6a32008-10-02 18:27:46 +00001817 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001818 int mask;
1819 const char *p, *p1;
1820
1821 p = str;
1822 mask = 0;
1823 for(;;) {
1824 p1 = strchr(p, ',');
1825 if (!p1)
1826 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001827 if(cmp1(p,p1-p,"all")) {
1828 for(item = cpu_log_items; item->mask != 0; item++) {
1829 mask |= item->mask;
1830 }
1831 } else {
1832 for(item = cpu_log_items; item->mask != 0; item++) {
1833 if (cmp1(p, p1 - p, item->name))
1834 goto found;
1835 }
1836 return 0;
bellardf193c792004-03-21 17:06:25 +00001837 }
bellardf193c792004-03-21 17:06:25 +00001838 found:
1839 mask |= item->mask;
1840 if (*p1 != ',')
1841 break;
1842 p = p1 + 1;
1843 }
1844 return mask;
1845}
bellardea041c02003-06-25 16:16:50 +00001846
Andreas Färber9349b4f2012-03-14 01:38:32 +01001847void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001848{
1849 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001850 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001851
1852 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001853 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001854 fprintf(stderr, "qemu: fatal: ");
1855 vfprintf(stderr, fmt, ap);
1856 fprintf(stderr, "\n");
1857#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001858 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1859#else
1860 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001861#endif
aliguori93fcfe32009-01-15 22:34:14 +00001862 if (qemu_log_enabled()) {
1863 qemu_log("qemu: fatal: ");
1864 qemu_log_vprintf(fmt, ap2);
1865 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001866#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001867 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001868#else
aliguori93fcfe32009-01-15 22:34:14 +00001869 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001870#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001871 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001872 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001873 }
pbrook493ae1f2007-11-23 16:53:59 +00001874 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001875 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001876#if defined(CONFIG_USER_ONLY)
1877 {
1878 struct sigaction act;
1879 sigfillset(&act.sa_mask);
1880 act.sa_handler = SIG_DFL;
1881 sigaction(SIGABRT, &act, NULL);
1882 }
1883#endif
bellard75012672003-06-21 13:11:07 +00001884 abort();
1885}
1886
Andreas Färber9349b4f2012-03-14 01:38:32 +01001887CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001888{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001889 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1890 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001891 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001892#if defined(TARGET_HAS_ICE)
1893 CPUBreakpoint *bp;
1894 CPUWatchpoint *wp;
1895#endif
1896
Andreas Färber9349b4f2012-03-14 01:38:32 +01001897 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001898
1899 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001900 new_env->next_cpu = next_cpu;
1901 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001902
1903 /* Clone all break/watchpoints.
1904 Note: Once we support ptrace with hw-debug register access, make sure
1905 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001906 QTAILQ_INIT(&env->breakpoints);
1907 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001908#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001909 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001910 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1911 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001912 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001913 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1914 wp->flags, NULL);
1915 }
1916#endif
1917
thsc5be9f02007-02-28 20:20:53 +00001918 return new_env;
1919}
1920
bellard01243112004-01-04 15:48:17 +00001921#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001922void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001923{
1924 unsigned int i;
1925
1926 /* Discard jump cache entries for any tb which might potentially
1927 overlap the flushed page. */
1928 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1929 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001930 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001931
1932 i = tb_jmp_cache_hash_page(addr);
1933 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001934 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001935}
1936
pbrook5579c7f2009-04-11 14:47:08 +00001937/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001938void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001939 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001940{
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001941 uintptr_t length, start1;
bellard1ccde1c2004-02-06 19:46:14 +00001942
1943 start &= TARGET_PAGE_MASK;
1944 end = TARGET_PAGE_ALIGN(end);
1945
1946 length = end - start;
1947 if (length == 0)
1948 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001949 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001950
bellard1ccde1c2004-02-06 19:46:14 +00001951 /* we modify the TLB cache so that the dirty bit will be set again
1952 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001953 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001954 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001955 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001956 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001957 != (end - 1) - start) {
1958 abort();
1959 }
Blue Swirle5548612012-04-21 13:08:33 +00001960 cpu_tlb_reset_dirty_all(start1, length);
bellard1ccde1c2004-02-06 19:46:14 +00001961}
1962
aliguori74576192008-10-06 14:02:03 +00001963int cpu_physical_memory_set_dirty_tracking(int enable)
1964{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001965 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001966 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001967 return ret;
aliguori74576192008-10-06 14:02:03 +00001968}
1969
Blue Swirle5548612012-04-21 13:08:33 +00001970target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1971 MemoryRegionSection *section,
1972 target_ulong vaddr,
1973 target_phys_addr_t paddr,
1974 int prot,
1975 target_ulong *address)
1976{
1977 target_phys_addr_t iotlb;
1978 CPUWatchpoint *wp;
1979
1980 if (is_ram_rom(section)) {
1981 /* Normal RAM. */
1982 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1983 + section_addr(section, paddr);
1984 if (!section->readonly) {
1985 iotlb |= phys_section_notdirty;
1986 } else {
1987 iotlb |= phys_section_rom;
1988 }
1989 } else {
1990 /* IO handlers are currently passed a physical address.
1991 It would be nice to pass an offset from the base address
1992 of that region. This would avoid having to special case RAM,
1993 and avoid full address decoding in every device.
1994 We can't use the high bits of pd for this because
1995 IO_MEM_ROMD uses these as a ram address. */
1996 iotlb = section - phys_sections;
1997 iotlb += section_addr(section, paddr);
1998 }
1999
2000 /* Make accesses to pages with watchpoints go via the
2001 watchpoint trap routines. */
2002 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2003 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2004 /* Avoid trapping reads of pages with a write breakpoint. */
2005 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2006 iotlb = phys_section_watch + paddr;
2007 *address |= TLB_MMIO;
2008 break;
2009 }
2010 }
2011 }
2012
2013 return iotlb;
2014}
2015
bellard01243112004-01-04 15:48:17 +00002016#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002017/*
2018 * Walks guest process memory "regions" one by one
2019 * and calls callback function 'fn' for each region.
2020 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002021
2022struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002023{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002024 walk_memory_regions_fn fn;
2025 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02002026 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002027 int prot;
2028};
bellard9fa3e852004-01-04 18:06:42 +00002029
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002030static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002031 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002032{
2033 if (data->start != -1ul) {
2034 int rc = data->fn(data->priv, data->start, end, data->prot);
2035 if (rc != 0) {
2036 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002037 }
bellard33417e72003-08-10 21:47:01 +00002038 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002039
2040 data->start = (new_prot ? end : -1ul);
2041 data->prot = new_prot;
2042
2043 return 0;
2044}
2045
2046static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002047 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002048{
Paul Brookb480d9b2010-03-12 23:23:29 +00002049 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002050 int i, rc;
2051
2052 if (*lp == NULL) {
2053 return walk_memory_regions_end(data, base, 0);
2054 }
2055
2056 if (level == 0) {
2057 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002058 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002059 int prot = pd[i].flags;
2060
2061 pa = base | (i << TARGET_PAGE_BITS);
2062 if (prot != data->prot) {
2063 rc = walk_memory_regions_end(data, pa, prot);
2064 if (rc != 0) {
2065 return rc;
2066 }
2067 }
2068 }
2069 } else {
2070 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002071 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002072 pa = base | ((abi_ulong)i <<
2073 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002074 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2075 if (rc != 0) {
2076 return rc;
2077 }
2078 }
2079 }
2080
2081 return 0;
2082}
2083
2084int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2085{
2086 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02002087 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002088
2089 data.fn = fn;
2090 data.priv = priv;
2091 data.start = -1ul;
2092 data.prot = 0;
2093
2094 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002095 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002096 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2097 if (rc != 0) {
2098 return rc;
2099 }
2100 }
2101
2102 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002103}
2104
Paul Brookb480d9b2010-03-12 23:23:29 +00002105static int dump_region(void *priv, abi_ulong start,
2106 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002107{
2108 FILE *f = (FILE *)priv;
2109
Paul Brookb480d9b2010-03-12 23:23:29 +00002110 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2111 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002112 start, end, end - start,
2113 ((prot & PAGE_READ) ? 'r' : '-'),
2114 ((prot & PAGE_WRITE) ? 'w' : '-'),
2115 ((prot & PAGE_EXEC) ? 'x' : '-'));
2116
2117 return (0);
2118}
2119
2120/* dump memory mappings */
2121void page_dump(FILE *f)
2122{
2123 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2124 "start", "end", "size", "prot");
2125 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002126}
2127
pbrook53a59602006-03-25 19:31:22 +00002128int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002129{
bellard9fa3e852004-01-04 18:06:42 +00002130 PageDesc *p;
2131
2132 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002133 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002134 return 0;
2135 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002136}
2137
Richard Henderson376a7902010-03-10 15:57:04 -08002138/* Modify the flags of a page and invalidate the code if necessary.
2139 The flag PAGE_WRITE_ORG is positioned automatically depending
2140 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002141void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002142{
Richard Henderson376a7902010-03-10 15:57:04 -08002143 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002144
Richard Henderson376a7902010-03-10 15:57:04 -08002145 /* This function should never be called with addresses outside the
2146 guest address space. If this assert fires, it probably indicates
2147 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002148#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2149 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002150#endif
2151 assert(start < end);
2152
bellard9fa3e852004-01-04 18:06:42 +00002153 start = start & TARGET_PAGE_MASK;
2154 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002155
2156 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002157 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002158 }
2159
2160 for (addr = start, len = end - start;
2161 len != 0;
2162 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2163 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2164
2165 /* If the write protection bit is set, then we invalidate
2166 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002167 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002168 (flags & PAGE_WRITE) &&
2169 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002170 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002171 }
2172 p->flags = flags;
2173 }
bellard9fa3e852004-01-04 18:06:42 +00002174}
2175
ths3d97b402007-11-02 19:02:07 +00002176int page_check_range(target_ulong start, target_ulong len, int flags)
2177{
2178 PageDesc *p;
2179 target_ulong end;
2180 target_ulong addr;
2181
Richard Henderson376a7902010-03-10 15:57:04 -08002182 /* This function should never be called with addresses outside the
2183 guest address space. If this assert fires, it probably indicates
2184 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002185#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2186 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002187#endif
2188
Richard Henderson3e0650a2010-03-29 10:54:42 -07002189 if (len == 0) {
2190 return 0;
2191 }
Richard Henderson376a7902010-03-10 15:57:04 -08002192 if (start + len - 1 < start) {
2193 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002194 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002195 }
balrog55f280c2008-10-28 10:24:11 +00002196
ths3d97b402007-11-02 19:02:07 +00002197 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2198 start = start & TARGET_PAGE_MASK;
2199
Richard Henderson376a7902010-03-10 15:57:04 -08002200 for (addr = start, len = end - start;
2201 len != 0;
2202 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002203 p = page_find(addr >> TARGET_PAGE_BITS);
2204 if( !p )
2205 return -1;
2206 if( !(p->flags & PAGE_VALID) )
2207 return -1;
2208
bellarddae32702007-11-14 10:51:00 +00002209 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002210 return -1;
bellarddae32702007-11-14 10:51:00 +00002211 if (flags & PAGE_WRITE) {
2212 if (!(p->flags & PAGE_WRITE_ORG))
2213 return -1;
2214 /* unprotect the page if it was put read-only because it
2215 contains translated code */
2216 if (!(p->flags & PAGE_WRITE)) {
2217 if (!page_unprotect(addr, 0, NULL))
2218 return -1;
2219 }
2220 return 0;
2221 }
ths3d97b402007-11-02 19:02:07 +00002222 }
2223 return 0;
2224}
2225
bellard9fa3e852004-01-04 18:06:42 +00002226/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002227 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002228int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002229{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002230 unsigned int prot;
2231 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002232 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002233
pbrookc8a706f2008-06-02 16:16:42 +00002234 /* Technically this isn't safe inside a signal handler. However we
2235 know this only ever happens in a synchronous SEGV handler, so in
2236 practice it seems to be ok. */
2237 mmap_lock();
2238
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002239 p = page_find(address >> TARGET_PAGE_BITS);
2240 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002241 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002242 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002243 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002244
bellard9fa3e852004-01-04 18:06:42 +00002245 /* if the page was really writable, then we change its
2246 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002247 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2248 host_start = address & qemu_host_page_mask;
2249 host_end = host_start + qemu_host_page_size;
2250
2251 prot = 0;
2252 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2253 p = page_find(addr >> TARGET_PAGE_BITS);
2254 p->flags |= PAGE_WRITE;
2255 prot |= p->flags;
2256
bellard9fa3e852004-01-04 18:06:42 +00002257 /* and since the content will be modified, we must invalidate
2258 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002259 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002260#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002261 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002262#endif
bellard9fa3e852004-01-04 18:06:42 +00002263 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002264 mprotect((void *)g2h(host_start), qemu_host_page_size,
2265 prot & PAGE_BITS);
2266
2267 mmap_unlock();
2268 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002269 }
pbrookc8a706f2008-06-02 16:16:42 +00002270 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002271 return 0;
2272}
bellard9fa3e852004-01-04 18:06:42 +00002273#endif /* defined(CONFIG_USER_ONLY) */
2274
pbrooke2eef172008-06-08 01:09:01 +00002275#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002276
Paul Brookc04b2b72010-03-01 03:31:14 +00002277#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2278typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002279 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002280 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002281 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002282} subpage_t;
2283
Anthony Liguoric227f092009-10-01 16:12:16 -05002284static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002285 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002286static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002287static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002288{
Avi Kivity5312bd82012-02-12 18:32:55 +02002289 MemoryRegionSection *section = &phys_sections[section_index];
2290 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002291
2292 if (mr->subpage) {
2293 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2294 memory_region_destroy(&subpage->iomem);
2295 g_free(subpage);
2296 }
2297}
2298
Avi Kivity4346ae32012-02-10 17:00:01 +02002299static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002300{
2301 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002302 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002303
Avi Kivityc19e8802012-02-13 20:25:31 +02002304 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002305 return;
2306 }
2307
Avi Kivityc19e8802012-02-13 20:25:31 +02002308 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002309 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002310 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002311 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002312 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002313 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002314 }
Avi Kivity54688b12012-02-09 17:34:32 +02002315 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002316 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002317 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002318}
2319
2320static void destroy_all_mappings(void)
2321{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002322 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002323 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002324}
2325
Avi Kivity5312bd82012-02-12 18:32:55 +02002326static uint16_t phys_section_add(MemoryRegionSection *section)
2327{
2328 if (phys_sections_nb == phys_sections_nb_alloc) {
2329 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2330 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2331 phys_sections_nb_alloc);
2332 }
2333 phys_sections[phys_sections_nb] = *section;
2334 return phys_sections_nb++;
2335}
2336
2337static void phys_sections_clear(void)
2338{
2339 phys_sections_nb = 0;
2340}
2341
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002342/* register physical memory.
2343 For RAM, 'size' must be a multiple of the target page size.
2344 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002345 io memory page. The address used when calling the IO function is
2346 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002347 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002348 before calculating this offset. This should not be a problem unless
2349 the low bits of start_addr and region_offset differ. */
Avi Kivity0f0cb162012-02-13 17:14:32 +02002350static void register_subpage(MemoryRegionSection *section)
2351{
2352 subpage_t *subpage;
2353 target_phys_addr_t base = section->offset_within_address_space
2354 & TARGET_PAGE_MASK;
Avi Kivityf3705d52012-03-08 16:16:34 +02002355 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002356 MemoryRegionSection subsection = {
2357 .offset_within_address_space = base,
2358 .size = TARGET_PAGE_SIZE,
2359 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002360 target_phys_addr_t start, end;
2361
Avi Kivityf3705d52012-03-08 16:16:34 +02002362 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002363
Avi Kivityf3705d52012-03-08 16:16:34 +02002364 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002365 subpage = subpage_init(base);
2366 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002367 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2368 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002369 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002370 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002371 }
2372 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2373 end = start + section->size;
2374 subpage_register(subpage, start, end, phys_section_add(section));
2375}
2376
2377
2378static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002379{
Avi Kivitydd811242012-01-02 12:17:03 +02002380 target_phys_addr_t start_addr = section->offset_within_address_space;
2381 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002382 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002383 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002384
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002385 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002386
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002387 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002388 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2389 section_index);
bellard33417e72003-08-10 21:47:01 +00002390}
2391
Avi Kivity0f0cb162012-02-13 17:14:32 +02002392void cpu_register_physical_memory_log(MemoryRegionSection *section,
2393 bool readonly)
2394{
2395 MemoryRegionSection now = *section, remain = *section;
2396
2397 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2398 || (now.size < TARGET_PAGE_SIZE)) {
2399 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2400 - now.offset_within_address_space,
2401 now.size);
2402 register_subpage(&now);
2403 remain.size -= now.size;
2404 remain.offset_within_address_space += now.size;
2405 remain.offset_within_region += now.size;
2406 }
2407 now = remain;
2408 now.size &= TARGET_PAGE_MASK;
2409 if (now.size) {
2410 register_multipage(&now);
2411 remain.size -= now.size;
2412 remain.offset_within_address_space += now.size;
2413 remain.offset_within_region += now.size;
2414 }
2415 now = remain;
2416 if (now.size) {
2417 register_subpage(&now);
2418 }
2419}
2420
2421
Anthony Liguoric227f092009-10-01 16:12:16 -05002422void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002423{
2424 if (kvm_enabled())
2425 kvm_coalesce_mmio_region(addr, size);
2426}
2427
Anthony Liguoric227f092009-10-01 16:12:16 -05002428void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002429{
2430 if (kvm_enabled())
2431 kvm_uncoalesce_mmio_region(addr, size);
2432}
2433
Sheng Yang62a27442010-01-26 19:21:16 +08002434void qemu_flush_coalesced_mmio_buffer(void)
2435{
2436 if (kvm_enabled())
2437 kvm_flush_coalesced_mmio_buffer();
2438}
2439
Marcelo Tosattic9027602010-03-01 20:25:08 -03002440#if defined(__linux__) && !defined(TARGET_S390X)
2441
2442#include <sys/vfs.h>
2443
2444#define HUGETLBFS_MAGIC 0x958458f6
2445
2446static long gethugepagesize(const char *path)
2447{
2448 struct statfs fs;
2449 int ret;
2450
2451 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002452 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002453 } while (ret != 0 && errno == EINTR);
2454
2455 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002456 perror(path);
2457 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002458 }
2459
2460 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002461 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002462
2463 return fs.f_bsize;
2464}
2465
Alex Williamson04b16652010-07-02 11:13:17 -06002466static void *file_ram_alloc(RAMBlock *block,
2467 ram_addr_t memory,
2468 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002469{
2470 char *filename;
2471 void *area;
2472 int fd;
2473#ifdef MAP_POPULATE
2474 int flags;
2475#endif
2476 unsigned long hpagesize;
2477
2478 hpagesize = gethugepagesize(path);
2479 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002480 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002481 }
2482
2483 if (memory < hpagesize) {
2484 return NULL;
2485 }
2486
2487 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2488 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2489 return NULL;
2490 }
2491
2492 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002493 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002494 }
2495
2496 fd = mkstemp(filename);
2497 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002498 perror("unable to create backing store for hugepages");
2499 free(filename);
2500 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002501 }
2502 unlink(filename);
2503 free(filename);
2504
2505 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2506
2507 /*
2508 * ftruncate is not supported by hugetlbfs in older
2509 * hosts, so don't bother bailing out on errors.
2510 * If anything goes wrong with it under other filesystems,
2511 * mmap will fail.
2512 */
2513 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002514 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002515
2516#ifdef MAP_POPULATE
2517 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2518 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2519 * to sidestep this quirk.
2520 */
2521 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2522 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2523#else
2524 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2525#endif
2526 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002527 perror("file_ram_alloc: can't mmap RAM pages");
2528 close(fd);
2529 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002530 }
Alex Williamson04b16652010-07-02 11:13:17 -06002531 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002532 return area;
2533}
2534#endif
2535
Alex Williamsond17b5282010-06-25 11:08:38 -06002536static ram_addr_t find_ram_offset(ram_addr_t size)
2537{
Alex Williamson04b16652010-07-02 11:13:17 -06002538 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002539 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002540
2541 if (QLIST_EMPTY(&ram_list.blocks))
2542 return 0;
2543
2544 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002545 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002546
2547 end = block->offset + block->length;
2548
2549 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2550 if (next_block->offset >= end) {
2551 next = MIN(next, next_block->offset);
2552 }
2553 }
2554 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002555 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002556 mingap = next - end;
2557 }
2558 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002559
2560 if (offset == RAM_ADDR_MAX) {
2561 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2562 (uint64_t)size);
2563 abort();
2564 }
2565
Alex Williamson04b16652010-07-02 11:13:17 -06002566 return offset;
2567}
2568
2569static ram_addr_t last_ram_offset(void)
2570{
Alex Williamsond17b5282010-06-25 11:08:38 -06002571 RAMBlock *block;
2572 ram_addr_t last = 0;
2573
2574 QLIST_FOREACH(block, &ram_list.blocks, next)
2575 last = MAX(last, block->offset + block->length);
2576
2577 return last;
2578}
2579
Avi Kivityc5705a72011-12-20 15:59:12 +02002580void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002581{
2582 RAMBlock *new_block, *block;
2583
Avi Kivityc5705a72011-12-20 15:59:12 +02002584 new_block = NULL;
2585 QLIST_FOREACH(block, &ram_list.blocks, next) {
2586 if (block->offset == addr) {
2587 new_block = block;
2588 break;
2589 }
2590 }
2591 assert(new_block);
2592 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002593
2594 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2595 char *id = dev->parent_bus->info->get_dev_path(dev);
2596 if (id) {
2597 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002598 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002599 }
2600 }
2601 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2602
2603 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002604 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002605 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2606 new_block->idstr);
2607 abort();
2608 }
2609 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002610}
2611
2612ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2613 MemoryRegion *mr)
2614{
2615 RAMBlock *new_block;
2616
2617 size = TARGET_PAGE_ALIGN(size);
2618 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002619
Avi Kivity7c637362011-12-21 13:09:49 +02002620 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002621 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002622 if (host) {
2623 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002624 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002625 } else {
2626 if (mem_path) {
2627#if defined (__linux__) && !defined(TARGET_S390X)
2628 new_block->host = file_ram_alloc(new_block, size, mem_path);
2629 if (!new_block->host) {
2630 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002631 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002632 }
2633#else
2634 fprintf(stderr, "-mem-path option unsupported\n");
2635 exit(1);
2636#endif
2637 } else {
2638#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002639 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2640 an system defined value, which is at least 256GB. Larger systems
2641 have larger values. We put the guest between the end of data
2642 segment (system break) and this value. We use 32GB as a base to
2643 have enough room for the system break to grow. */
2644 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002645 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002646 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002647 if (new_block->host == MAP_FAILED) {
2648 fprintf(stderr, "Allocating RAM failed\n");
2649 abort();
2650 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002651#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002652 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002653 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002654 } else {
2655 new_block->host = qemu_vmalloc(size);
2656 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002657#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002658 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002659 }
2660 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002661 new_block->length = size;
2662
2663 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2664
Anthony Liguori7267c092011-08-20 22:09:37 -05002665 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002666 last_ram_offset() >> TARGET_PAGE_BITS);
2667 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2668 0xff, size >> TARGET_PAGE_BITS);
2669
2670 if (kvm_enabled())
2671 kvm_setup_guest_memory(new_block->host, size);
2672
2673 return new_block->offset;
2674}
2675
Avi Kivityc5705a72011-12-20 15:59:12 +02002676ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002677{
Avi Kivityc5705a72011-12-20 15:59:12 +02002678 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002679}
bellarde9a1ab12007-02-08 23:08:38 +00002680
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002681void qemu_ram_free_from_ptr(ram_addr_t addr)
2682{
2683 RAMBlock *block;
2684
2685 QLIST_FOREACH(block, &ram_list.blocks, next) {
2686 if (addr == block->offset) {
2687 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002688 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002689 return;
2690 }
2691 }
2692}
2693
Anthony Liguoric227f092009-10-01 16:12:16 -05002694void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002695{
Alex Williamson04b16652010-07-02 11:13:17 -06002696 RAMBlock *block;
2697
2698 QLIST_FOREACH(block, &ram_list.blocks, next) {
2699 if (addr == block->offset) {
2700 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002701 if (block->flags & RAM_PREALLOC_MASK) {
2702 ;
2703 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002704#if defined (__linux__) && !defined(TARGET_S390X)
2705 if (block->fd) {
2706 munmap(block->host, block->length);
2707 close(block->fd);
2708 } else {
2709 qemu_vfree(block->host);
2710 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002711#else
2712 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002713#endif
2714 } else {
2715#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2716 munmap(block->host, block->length);
2717#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002718 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002719 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002720 } else {
2721 qemu_vfree(block->host);
2722 }
Alex Williamson04b16652010-07-02 11:13:17 -06002723#endif
2724 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002725 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002726 return;
2727 }
2728 }
2729
bellarde9a1ab12007-02-08 23:08:38 +00002730}
2731
Huang Yingcd19cfa2011-03-02 08:56:19 +01002732#ifndef _WIN32
2733void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2734{
2735 RAMBlock *block;
2736 ram_addr_t offset;
2737 int flags;
2738 void *area, *vaddr;
2739
2740 QLIST_FOREACH(block, &ram_list.blocks, next) {
2741 offset = addr - block->offset;
2742 if (offset < block->length) {
2743 vaddr = block->host + offset;
2744 if (block->flags & RAM_PREALLOC_MASK) {
2745 ;
2746 } else {
2747 flags = MAP_FIXED;
2748 munmap(vaddr, length);
2749 if (mem_path) {
2750#if defined(__linux__) && !defined(TARGET_S390X)
2751 if (block->fd) {
2752#ifdef MAP_POPULATE
2753 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2754 MAP_PRIVATE;
2755#else
2756 flags |= MAP_PRIVATE;
2757#endif
2758 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2759 flags, block->fd, offset);
2760 } else {
2761 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2762 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2763 flags, -1, 0);
2764 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002765#else
2766 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002767#endif
2768 } else {
2769#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2770 flags |= MAP_SHARED | MAP_ANONYMOUS;
2771 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2772 flags, -1, 0);
2773#else
2774 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2775 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2776 flags, -1, 0);
2777#endif
2778 }
2779 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002780 fprintf(stderr, "Could not remap addr: "
2781 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002782 length, addr);
2783 exit(1);
2784 }
2785 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2786 }
2787 return;
2788 }
2789 }
2790}
2791#endif /* !_WIN32 */
2792
pbrookdc828ca2009-04-09 22:21:07 +00002793/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002794 With the exception of the softmmu code in this file, this should
2795 only be used for local memory (e.g. video ram) that the device owns,
2796 and knows it isn't going to access beyond the end of the block.
2797
2798 It should not be used for general purpose DMA.
2799 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2800 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002801void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002802{
pbrook94a6b542009-04-11 17:15:54 +00002803 RAMBlock *block;
2804
Alex Williamsonf471a172010-06-11 11:11:42 -06002805 QLIST_FOREACH(block, &ram_list.blocks, next) {
2806 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002807 /* Move this entry to to start of the list. */
2808 if (block != QLIST_FIRST(&ram_list.blocks)) {
2809 QLIST_REMOVE(block, next);
2810 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2811 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002812 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002813 /* We need to check if the requested address is in the RAM
2814 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002815 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002816 */
2817 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002818 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002819 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002820 block->host =
2821 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002822 }
2823 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002824 return block->host + (addr - block->offset);
2825 }
pbrook94a6b542009-04-11 17:15:54 +00002826 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002827
2828 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2829 abort();
2830
2831 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002832}
2833
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002834/* Return a host pointer to ram allocated with qemu_ram_alloc.
2835 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2836 */
2837void *qemu_safe_ram_ptr(ram_addr_t addr)
2838{
2839 RAMBlock *block;
2840
2841 QLIST_FOREACH(block, &ram_list.blocks, next) {
2842 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002843 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002844 /* We need to check if the requested address is in the RAM
2845 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002846 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002847 */
2848 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002849 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002850 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002851 block->host =
2852 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002853 }
2854 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002855 return block->host + (addr - block->offset);
2856 }
2857 }
2858
2859 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2860 abort();
2861
2862 return NULL;
2863}
2864
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002865/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2866 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002867void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002868{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002869 if (*size == 0) {
2870 return NULL;
2871 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002872 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002873 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002874 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002875 RAMBlock *block;
2876
2877 QLIST_FOREACH(block, &ram_list.blocks, next) {
2878 if (addr - block->offset < block->length) {
2879 if (addr - block->offset + *size > block->length)
2880 *size = block->length - addr + block->offset;
2881 return block->host + (addr - block->offset);
2882 }
2883 }
2884
2885 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2886 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002887 }
2888}
2889
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002890void qemu_put_ram_ptr(void *addr)
2891{
2892 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002893}
2894
Marcelo Tosattie8902612010-10-11 15:31:19 -03002895int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002896{
pbrook94a6b542009-04-11 17:15:54 +00002897 RAMBlock *block;
2898 uint8_t *host = ptr;
2899
Jan Kiszka868bb332011-06-21 22:59:09 +02002900 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002901 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002902 return 0;
2903 }
2904
Alex Williamsonf471a172010-06-11 11:11:42 -06002905 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002906 /* This case append when the block is not mapped. */
2907 if (block->host == NULL) {
2908 continue;
2909 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002910 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002911 *ram_addr = block->offset + (host - block->host);
2912 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002913 }
pbrook94a6b542009-04-11 17:15:54 +00002914 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002915
Marcelo Tosattie8902612010-10-11 15:31:19 -03002916 return -1;
2917}
Alex Williamsonf471a172010-06-11 11:11:42 -06002918
Marcelo Tosattie8902612010-10-11 15:31:19 -03002919/* Some of the softmmu routines need to translate from a host pointer
2920 (typically a TLB entry) back to a ram offset. */
2921ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2922{
2923 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002924
Marcelo Tosattie8902612010-10-11 15:31:19 -03002925 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2926 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2927 abort();
2928 }
2929 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002930}
2931
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002932static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2933 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002934{
pbrook67d3b952006-12-18 05:03:52 +00002935#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002936 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002937#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002938#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002939 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002940#endif
2941 return 0;
2942}
2943
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002944static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2945 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002946{
2947#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002948 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002949#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002950#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002951 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002952#endif
2953}
2954
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002955static const MemoryRegionOps unassigned_mem_ops = {
2956 .read = unassigned_mem_read,
2957 .write = unassigned_mem_write,
2958 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002959};
2960
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002961static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2962 unsigned size)
2963{
2964 abort();
2965}
2966
2967static void error_mem_write(void *opaque, target_phys_addr_t addr,
2968 uint64_t value, unsigned size)
2969{
2970 abort();
2971}
2972
2973static const MemoryRegionOps error_mem_ops = {
2974 .read = error_mem_read,
2975 .write = error_mem_write,
2976 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002977};
2978
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002979static const MemoryRegionOps rom_mem_ops = {
2980 .read = error_mem_read,
2981 .write = unassigned_mem_write,
2982 .endianness = DEVICE_NATIVE_ENDIAN,
2983};
2984
2985static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2986 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002987{
bellard3a7d9292005-08-21 09:26:42 +00002988 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002989 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002990 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2991#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002992 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002993 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002994#endif
2995 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002996 switch (size) {
2997 case 1:
2998 stb_p(qemu_get_ram_ptr(ram_addr), val);
2999 break;
3000 case 2:
3001 stw_p(qemu_get_ram_ptr(ram_addr), val);
3002 break;
3003 case 4:
3004 stl_p(qemu_get_ram_ptr(ram_addr), val);
3005 break;
3006 default:
3007 abort();
3008 }
bellardf23db162005-08-21 19:12:28 +00003009 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003010 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003011 /* we remove the notdirty callback only if the code has been
3012 flushed */
3013 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003014 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003015}
3016
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003017static const MemoryRegionOps notdirty_mem_ops = {
3018 .read = error_mem_read,
3019 .write = notdirty_mem_write,
3020 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003021};
3022
pbrook0f459d12008-06-09 00:20:13 +00003023/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003024static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003025{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003026 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003027 target_ulong pc, cs_base;
3028 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003029 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003030 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003031 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003032
aliguori06d55cc2008-11-18 20:24:06 +00003033 if (env->watchpoint_hit) {
3034 /* We re-entered the check after replacing the TB. Now raise
3035 * the debug interrupt so that is will trigger after the
3036 * current instruction. */
3037 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3038 return;
3039 }
pbrook2e70f6e2008-06-29 01:03:05 +00003040 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003041 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003042 if ((vaddr == (wp->vaddr & len_mask) ||
3043 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003044 wp->flags |= BP_WATCHPOINT_HIT;
3045 if (!env->watchpoint_hit) {
3046 env->watchpoint_hit = wp;
3047 tb = tb_find_pc(env->mem_io_pc);
3048 if (!tb) {
3049 cpu_abort(env, "check_watchpoint: could not find TB for "
3050 "pc=%p", (void *)env->mem_io_pc);
3051 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003052 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003053 tb_phys_invalidate(tb, -1);
3054 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3055 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04003056 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00003057 } else {
3058 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3059 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04003060 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00003061 }
aliguori06d55cc2008-11-18 20:24:06 +00003062 }
aliguori6e140f22008-11-18 20:37:55 +00003063 } else {
3064 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003065 }
3066 }
3067}
3068
pbrook6658ffb2007-03-16 23:58:11 +00003069/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3070 so these check for a hit then pass through to the normal out-of-line
3071 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003072static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3073 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003074{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003075 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3076 switch (size) {
3077 case 1: return ldub_phys(addr);
3078 case 2: return lduw_phys(addr);
3079 case 4: return ldl_phys(addr);
3080 default: abort();
3081 }
pbrook6658ffb2007-03-16 23:58:11 +00003082}
3083
Avi Kivity1ec9b902012-01-02 12:47:48 +02003084static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3085 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003086{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003087 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3088 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003089 case 1:
3090 stb_phys(addr, val);
3091 break;
3092 case 2:
3093 stw_phys(addr, val);
3094 break;
3095 case 4:
3096 stl_phys(addr, val);
3097 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003098 default: abort();
3099 }
pbrook6658ffb2007-03-16 23:58:11 +00003100}
3101
Avi Kivity1ec9b902012-01-02 12:47:48 +02003102static const MemoryRegionOps watch_mem_ops = {
3103 .read = watch_mem_read,
3104 .write = watch_mem_write,
3105 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003106};
pbrook6658ffb2007-03-16 23:58:11 +00003107
Avi Kivity70c68e42012-01-02 12:32:48 +02003108static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3109 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003110{
Avi Kivity70c68e42012-01-02 12:32:48 +02003111 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003112 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003113 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003114#if defined(DEBUG_SUBPAGE)
3115 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3116 mmio, len, addr, idx);
3117#endif
blueswir1db7b5422007-05-26 17:36:03 +00003118
Avi Kivity5312bd82012-02-12 18:32:55 +02003119 section = &phys_sections[mmio->sub_section[idx]];
3120 addr += mmio->base;
3121 addr -= section->offset_within_address_space;
3122 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003123 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003124}
3125
Avi Kivity70c68e42012-01-02 12:32:48 +02003126static void subpage_write(void *opaque, target_phys_addr_t addr,
3127 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003128{
Avi Kivity70c68e42012-01-02 12:32:48 +02003129 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003130 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003131 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003132#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003133 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3134 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003135 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003136#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003137
Avi Kivity5312bd82012-02-12 18:32:55 +02003138 section = &phys_sections[mmio->sub_section[idx]];
3139 addr += mmio->base;
3140 addr -= section->offset_within_address_space;
3141 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003142 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003143}
3144
Avi Kivity70c68e42012-01-02 12:32:48 +02003145static const MemoryRegionOps subpage_ops = {
3146 .read = subpage_read,
3147 .write = subpage_write,
3148 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003149};
3150
Avi Kivityde712f92012-01-02 12:41:07 +02003151static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3152 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003153{
3154 ram_addr_t raddr = addr;
3155 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003156 switch (size) {
3157 case 1: return ldub_p(ptr);
3158 case 2: return lduw_p(ptr);
3159 case 4: return ldl_p(ptr);
3160 default: abort();
3161 }
Andreas Färber56384e82011-11-30 16:26:21 +01003162}
3163
Avi Kivityde712f92012-01-02 12:41:07 +02003164static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3165 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003166{
3167 ram_addr_t raddr = addr;
3168 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003169 switch (size) {
3170 case 1: return stb_p(ptr, value);
3171 case 2: return stw_p(ptr, value);
3172 case 4: return stl_p(ptr, value);
3173 default: abort();
3174 }
Andreas Färber56384e82011-11-30 16:26:21 +01003175}
3176
Avi Kivityde712f92012-01-02 12:41:07 +02003177static const MemoryRegionOps subpage_ram_ops = {
3178 .read = subpage_ram_read,
3179 .write = subpage_ram_write,
3180 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003181};
3182
Anthony Liguoric227f092009-10-01 16:12:16 -05003183static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003184 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003185{
3186 int idx, eidx;
3187
3188 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3189 return -1;
3190 idx = SUBPAGE_IDX(start);
3191 eidx = SUBPAGE_IDX(end);
3192#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003193 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003194 mmio, start, end, idx, eidx, memory);
3195#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003196 if (memory_region_is_ram(phys_sections[section].mr)) {
3197 MemoryRegionSection new_section = phys_sections[section];
3198 new_section.mr = &io_mem_subpage_ram;
3199 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003200 }
blueswir1db7b5422007-05-26 17:36:03 +00003201 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003202 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003203 }
3204
3205 return 0;
3206}
3207
Avi Kivity0f0cb162012-02-13 17:14:32 +02003208static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003209{
Anthony Liguoric227f092009-10-01 16:12:16 -05003210 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003211
Anthony Liguori7267c092011-08-20 22:09:37 -05003212 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003213
3214 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003215 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3216 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003217 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003218#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003219 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3220 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003221#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003222 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003223
3224 return mmio;
3225}
3226
Avi Kivity5312bd82012-02-12 18:32:55 +02003227static uint16_t dummy_section(MemoryRegion *mr)
3228{
3229 MemoryRegionSection section = {
3230 .mr = mr,
3231 .offset_within_address_space = 0,
3232 .offset_within_region = 0,
3233 .size = UINT64_MAX,
3234 };
3235
3236 return phys_section_add(&section);
3237}
3238
Avi Kivity37ec01d2012-03-08 18:08:35 +02003239MemoryRegion *iotlb_to_region(target_phys_addr_t index)
Avi Kivityaa102232012-03-08 17:06:55 +02003240{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003241 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003242}
3243
Avi Kivitye9179ce2009-06-14 11:38:52 +03003244static void io_mem_init(void)
3245{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003246 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003247 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3248 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3249 "unassigned", UINT64_MAX);
3250 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3251 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003252 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3253 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003254 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3255 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003256}
3257
Avi Kivity50c1e142012-02-08 21:36:02 +02003258static void core_begin(MemoryListener *listener)
3259{
Avi Kivity54688b12012-02-09 17:34:32 +02003260 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003261 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003262 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003263 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003264 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3265 phys_section_rom = dummy_section(&io_mem_rom);
3266 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003267}
3268
3269static void core_commit(MemoryListener *listener)
3270{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003271 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003272
3273 /* since each CPU stores ram addresses in its TLB cache, we must
3274 reset the modified entries */
3275 /* XXX: slow ! */
3276 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3277 tlb_flush(env, 1);
3278 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003279}
3280
Avi Kivity93632742012-02-08 16:54:16 +02003281static void core_region_add(MemoryListener *listener,
3282 MemoryRegionSection *section)
3283{
Avi Kivity4855d412012-02-08 21:16:05 +02003284 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003285}
3286
3287static void core_region_del(MemoryListener *listener,
3288 MemoryRegionSection *section)
3289{
Avi Kivity93632742012-02-08 16:54:16 +02003290}
3291
Avi Kivity50c1e142012-02-08 21:36:02 +02003292static void core_region_nop(MemoryListener *listener,
3293 MemoryRegionSection *section)
3294{
Avi Kivity54688b12012-02-09 17:34:32 +02003295 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003296}
3297
Avi Kivity93632742012-02-08 16:54:16 +02003298static void core_log_start(MemoryListener *listener,
3299 MemoryRegionSection *section)
3300{
3301}
3302
3303static void core_log_stop(MemoryListener *listener,
3304 MemoryRegionSection *section)
3305{
3306}
3307
3308static void core_log_sync(MemoryListener *listener,
3309 MemoryRegionSection *section)
3310{
3311}
3312
3313static void core_log_global_start(MemoryListener *listener)
3314{
3315 cpu_physical_memory_set_dirty_tracking(1);
3316}
3317
3318static void core_log_global_stop(MemoryListener *listener)
3319{
3320 cpu_physical_memory_set_dirty_tracking(0);
3321}
3322
3323static void core_eventfd_add(MemoryListener *listener,
3324 MemoryRegionSection *section,
3325 bool match_data, uint64_t data, int fd)
3326{
3327}
3328
3329static void core_eventfd_del(MemoryListener *listener,
3330 MemoryRegionSection *section,
3331 bool match_data, uint64_t data, int fd)
3332{
3333}
3334
Avi Kivity50c1e142012-02-08 21:36:02 +02003335static void io_begin(MemoryListener *listener)
3336{
3337}
3338
3339static void io_commit(MemoryListener *listener)
3340{
3341}
3342
Avi Kivity4855d412012-02-08 21:16:05 +02003343static void io_region_add(MemoryListener *listener,
3344 MemoryRegionSection *section)
3345{
Avi Kivitya2d33522012-03-05 17:40:12 +02003346 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3347
3348 mrio->mr = section->mr;
3349 mrio->offset = section->offset_within_region;
3350 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003351 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003352 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003353}
3354
3355static void io_region_del(MemoryListener *listener,
3356 MemoryRegionSection *section)
3357{
3358 isa_unassign_ioport(section->offset_within_address_space, section->size);
3359}
3360
Avi Kivity50c1e142012-02-08 21:36:02 +02003361static void io_region_nop(MemoryListener *listener,
3362 MemoryRegionSection *section)
3363{
3364}
3365
Avi Kivity4855d412012-02-08 21:16:05 +02003366static void io_log_start(MemoryListener *listener,
3367 MemoryRegionSection *section)
3368{
3369}
3370
3371static void io_log_stop(MemoryListener *listener,
3372 MemoryRegionSection *section)
3373{
3374}
3375
3376static void io_log_sync(MemoryListener *listener,
3377 MemoryRegionSection *section)
3378{
3379}
3380
3381static void io_log_global_start(MemoryListener *listener)
3382{
3383}
3384
3385static void io_log_global_stop(MemoryListener *listener)
3386{
3387}
3388
3389static void io_eventfd_add(MemoryListener *listener,
3390 MemoryRegionSection *section,
3391 bool match_data, uint64_t data, int fd)
3392{
3393}
3394
3395static void io_eventfd_del(MemoryListener *listener,
3396 MemoryRegionSection *section,
3397 bool match_data, uint64_t data, int fd)
3398{
3399}
3400
Avi Kivity93632742012-02-08 16:54:16 +02003401static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003402 .begin = core_begin,
3403 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003404 .region_add = core_region_add,
3405 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003406 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003407 .log_start = core_log_start,
3408 .log_stop = core_log_stop,
3409 .log_sync = core_log_sync,
3410 .log_global_start = core_log_global_start,
3411 .log_global_stop = core_log_global_stop,
3412 .eventfd_add = core_eventfd_add,
3413 .eventfd_del = core_eventfd_del,
3414 .priority = 0,
3415};
3416
Avi Kivity4855d412012-02-08 21:16:05 +02003417static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003418 .begin = io_begin,
3419 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003420 .region_add = io_region_add,
3421 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003422 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003423 .log_start = io_log_start,
3424 .log_stop = io_log_stop,
3425 .log_sync = io_log_sync,
3426 .log_global_start = io_log_global_start,
3427 .log_global_stop = io_log_global_stop,
3428 .eventfd_add = io_eventfd_add,
3429 .eventfd_del = io_eventfd_del,
3430 .priority = 0,
3431};
3432
Avi Kivity62152b82011-07-26 14:26:14 +03003433static void memory_map_init(void)
3434{
Anthony Liguori7267c092011-08-20 22:09:37 -05003435 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003436 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003437 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003438
Anthony Liguori7267c092011-08-20 22:09:37 -05003439 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003440 memory_region_init(system_io, "io", 65536);
3441 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003442
Avi Kivity4855d412012-02-08 21:16:05 +02003443 memory_listener_register(&core_memory_listener, system_memory);
3444 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003445}
3446
3447MemoryRegion *get_system_memory(void)
3448{
3449 return system_memory;
3450}
3451
Avi Kivity309cb472011-08-08 16:09:03 +03003452MemoryRegion *get_system_io(void)
3453{
3454 return system_io;
3455}
3456
pbrooke2eef172008-06-08 01:09:01 +00003457#endif /* !defined(CONFIG_USER_ONLY) */
3458
bellard13eb76e2004-01-24 15:23:36 +00003459/* physical memory access (slow version, mainly for debug) */
3460#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003461int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003462 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003463{
3464 int l, flags;
3465 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003466 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003467
3468 while (len > 0) {
3469 page = addr & TARGET_PAGE_MASK;
3470 l = (page + TARGET_PAGE_SIZE) - addr;
3471 if (l > len)
3472 l = len;
3473 flags = page_get_flags(page);
3474 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003475 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003476 if (is_write) {
3477 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003478 return -1;
bellard579a97f2007-11-11 14:26:47 +00003479 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003480 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003481 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003482 memcpy(p, buf, l);
3483 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003484 } else {
3485 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003486 return -1;
bellard579a97f2007-11-11 14:26:47 +00003487 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003488 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003489 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003490 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003491 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003492 }
3493 len -= l;
3494 buf += l;
3495 addr += l;
3496 }
Paul Brooka68fe892010-03-01 00:08:59 +00003497 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003498}
bellard8df1cd02005-01-28 22:37:22 +00003499
bellard13eb76e2004-01-24 15:23:36 +00003500#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003501void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003502 int len, int is_write)
3503{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003504 int l;
bellard13eb76e2004-01-24 15:23:36 +00003505 uint8_t *ptr;
3506 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003507 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003508 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003509
bellard13eb76e2004-01-24 15:23:36 +00003510 while (len > 0) {
3511 page = addr & TARGET_PAGE_MASK;
3512 l = (page + TARGET_PAGE_SIZE) - addr;
3513 if (l > len)
3514 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003515 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003516
bellard13eb76e2004-01-24 15:23:36 +00003517 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003518 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003519 target_phys_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003520 addr1 = section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003521 /* XXX: could force cpu_single_env to NULL to avoid
3522 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003523 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003524 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003525 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003526 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003527 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003528 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003529 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003530 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003531 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003532 l = 2;
3533 } else {
bellard1c213d12005-09-03 10:49:04 +00003534 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003535 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003536 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003537 l = 1;
3538 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003539 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003540 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003541 addr1 = memory_region_get_ram_addr(section->mr)
3542 + section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003543 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003544 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003545 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003546 if (!cpu_physical_memory_is_dirty(addr1)) {
3547 /* invalidate code */
3548 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3549 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003550 cpu_physical_memory_set_dirty_flags(
3551 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003552 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003553 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003554 }
3555 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003556 if (!is_ram_rom_romd(section)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003557 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003558 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003559 addr1 = section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003560 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003561 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003562 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003563 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003564 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003565 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003566 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003567 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003568 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003569 l = 2;
3570 } else {
bellard1c213d12005-09-03 10:49:04 +00003571 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003572 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003573 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003574 l = 1;
3575 }
3576 } else {
3577 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003578 ptr = qemu_get_ram_ptr(section->mr->ram_addr
3579 + section_addr(section, addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003580 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003581 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003582 }
3583 }
3584 len -= l;
3585 buf += l;
3586 addr += l;
3587 }
3588}
bellard8df1cd02005-01-28 22:37:22 +00003589
bellardd0ecd2a2006-04-23 17:14:48 +00003590/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003591void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003592 const uint8_t *buf, int len)
3593{
3594 int l;
3595 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003596 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003597 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003598
bellardd0ecd2a2006-04-23 17:14:48 +00003599 while (len > 0) {
3600 page = addr & TARGET_PAGE_MASK;
3601 l = (page + TARGET_PAGE_SIZE) - addr;
3602 if (l > len)
3603 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003604 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003605
Avi Kivityf3705d52012-03-08 16:16:34 +02003606 if (!is_ram_rom_romd(section)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003607 /* do nothing */
3608 } else {
3609 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003610 addr1 = memory_region_get_ram_addr(section->mr)
3611 + section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003612 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003613 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003614 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003615 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003616 }
3617 len -= l;
3618 buf += l;
3619 addr += l;
3620 }
3621}
3622
aliguori6d16c2f2009-01-22 16:59:11 +00003623typedef struct {
3624 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003625 target_phys_addr_t addr;
3626 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003627} BounceBuffer;
3628
3629static BounceBuffer bounce;
3630
aliguoriba223c22009-01-22 16:59:16 +00003631typedef struct MapClient {
3632 void *opaque;
3633 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003634 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003635} MapClient;
3636
Blue Swirl72cf2d42009-09-12 07:36:22 +00003637static QLIST_HEAD(map_client_list, MapClient) map_client_list
3638 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003639
3640void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3641{
Anthony Liguori7267c092011-08-20 22:09:37 -05003642 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003643
3644 client->opaque = opaque;
3645 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003646 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003647 return client;
3648}
3649
3650void cpu_unregister_map_client(void *_client)
3651{
3652 MapClient *client = (MapClient *)_client;
3653
Blue Swirl72cf2d42009-09-12 07:36:22 +00003654 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003655 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003656}
3657
3658static void cpu_notify_map_clients(void)
3659{
3660 MapClient *client;
3661
Blue Swirl72cf2d42009-09-12 07:36:22 +00003662 while (!QLIST_EMPTY(&map_client_list)) {
3663 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003664 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003665 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003666 }
3667}
3668
aliguori6d16c2f2009-01-22 16:59:11 +00003669/* Map a physical memory region into a host virtual address.
3670 * May map a subset of the requested range, given by and returned in *plen.
3671 * May return NULL if resources needed to perform the mapping are exhausted.
3672 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003673 * Use cpu_register_map_client() to know when retrying the map operation is
3674 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003675 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003676void *cpu_physical_memory_map(target_phys_addr_t addr,
3677 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003678 int is_write)
3679{
Anthony Liguoric227f092009-10-01 16:12:16 -05003680 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003681 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003682 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003683 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003684 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003685 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003686 ram_addr_t rlen;
3687 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003688
3689 while (len > 0) {
3690 page = addr & TARGET_PAGE_MASK;
3691 l = (page + TARGET_PAGE_SIZE) - addr;
3692 if (l > len)
3693 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003694 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003695
Avi Kivityf3705d52012-03-08 16:16:34 +02003696 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003697 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003698 break;
3699 }
3700 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3701 bounce.addr = addr;
3702 bounce.len = l;
3703 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003704 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003705 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003706
3707 *plen = l;
3708 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003709 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003710 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003711 raddr = memory_region_get_ram_addr(section->mr)
3712 + section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003713 }
aliguori6d16c2f2009-01-22 16:59:11 +00003714
3715 len -= l;
3716 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003717 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003718 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003719 rlen = todo;
3720 ret = qemu_ram_ptr_length(raddr, &rlen);
3721 *plen = rlen;
3722 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003723}
3724
3725/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3726 * Will also mark the memory as dirty if is_write == 1. access_len gives
3727 * the amount of memory that was actually read or written by the caller.
3728 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003729void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3730 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003731{
3732 if (buffer != bounce.buffer) {
3733 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003734 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003735 while (access_len) {
3736 unsigned l;
3737 l = TARGET_PAGE_SIZE;
3738 if (l > access_len)
3739 l = access_len;
3740 if (!cpu_physical_memory_is_dirty(addr1)) {
3741 /* invalidate code */
3742 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3743 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003744 cpu_physical_memory_set_dirty_flags(
3745 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003746 }
3747 addr1 += l;
3748 access_len -= l;
3749 }
3750 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003751 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003752 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003753 }
aliguori6d16c2f2009-01-22 16:59:11 +00003754 return;
3755 }
3756 if (is_write) {
3757 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3758 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003759 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003760 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003761 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003762}
bellardd0ecd2a2006-04-23 17:14:48 +00003763
bellard8df1cd02005-01-28 22:37:22 +00003764/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003765static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3766 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003767{
bellard8df1cd02005-01-28 22:37:22 +00003768 uint8_t *ptr;
3769 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003770 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003771
Avi Kivity06ef3522012-02-13 16:11:22 +02003772 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003773
Avi Kivityf3705d52012-03-08 16:16:34 +02003774 if (!is_ram_rom_romd(section)) {
bellard8df1cd02005-01-28 22:37:22 +00003775 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003776 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003777 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003778#if defined(TARGET_WORDS_BIGENDIAN)
3779 if (endian == DEVICE_LITTLE_ENDIAN) {
3780 val = bswap32(val);
3781 }
3782#else
3783 if (endian == DEVICE_BIG_ENDIAN) {
3784 val = bswap32(val);
3785 }
3786#endif
bellard8df1cd02005-01-28 22:37:22 +00003787 } else {
3788 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003789 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003790 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02003791 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003792 switch (endian) {
3793 case DEVICE_LITTLE_ENDIAN:
3794 val = ldl_le_p(ptr);
3795 break;
3796 case DEVICE_BIG_ENDIAN:
3797 val = ldl_be_p(ptr);
3798 break;
3799 default:
3800 val = ldl_p(ptr);
3801 break;
3802 }
bellard8df1cd02005-01-28 22:37:22 +00003803 }
3804 return val;
3805}
3806
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003807uint32_t ldl_phys(target_phys_addr_t addr)
3808{
3809 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3810}
3811
3812uint32_t ldl_le_phys(target_phys_addr_t addr)
3813{
3814 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3815}
3816
3817uint32_t ldl_be_phys(target_phys_addr_t addr)
3818{
3819 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3820}
3821
bellard84b7b8e2005-11-28 21:19:04 +00003822/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003823static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3824 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003825{
bellard84b7b8e2005-11-28 21:19:04 +00003826 uint8_t *ptr;
3827 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003828 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003829
Avi Kivity06ef3522012-02-13 16:11:22 +02003830 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003831
Avi Kivityf3705d52012-03-08 16:16:34 +02003832 if (!is_ram_rom_romd(section)) {
bellard84b7b8e2005-11-28 21:19:04 +00003833 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003834 addr = section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003835
3836 /* XXX This is broken when device endian != cpu endian.
3837 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003838#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003839 val = io_mem_read(section->mr, addr, 4) << 32;
3840 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003841#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003842 val = io_mem_read(section->mr, addr, 4);
3843 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003844#endif
3845 } else {
3846 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003847 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003848 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02003849 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003850 switch (endian) {
3851 case DEVICE_LITTLE_ENDIAN:
3852 val = ldq_le_p(ptr);
3853 break;
3854 case DEVICE_BIG_ENDIAN:
3855 val = ldq_be_p(ptr);
3856 break;
3857 default:
3858 val = ldq_p(ptr);
3859 break;
3860 }
bellard84b7b8e2005-11-28 21:19:04 +00003861 }
3862 return val;
3863}
3864
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003865uint64_t ldq_phys(target_phys_addr_t addr)
3866{
3867 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3868}
3869
3870uint64_t ldq_le_phys(target_phys_addr_t addr)
3871{
3872 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3873}
3874
3875uint64_t ldq_be_phys(target_phys_addr_t addr)
3876{
3877 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3878}
3879
bellardaab33092005-10-30 20:48:42 +00003880/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003881uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003882{
3883 uint8_t val;
3884 cpu_physical_memory_read(addr, &val, 1);
3885 return val;
3886}
3887
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003888/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003889static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3890 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003891{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003892 uint8_t *ptr;
3893 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003894 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003895
Avi Kivity06ef3522012-02-13 16:11:22 +02003896 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003897
Avi Kivityf3705d52012-03-08 16:16:34 +02003898 if (!is_ram_rom_romd(section)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003899 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003900 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003901 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003902#if defined(TARGET_WORDS_BIGENDIAN)
3903 if (endian == DEVICE_LITTLE_ENDIAN) {
3904 val = bswap16(val);
3905 }
3906#else
3907 if (endian == DEVICE_BIG_ENDIAN) {
3908 val = bswap16(val);
3909 }
3910#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003911 } else {
3912 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003913 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003914 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02003915 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003916 switch (endian) {
3917 case DEVICE_LITTLE_ENDIAN:
3918 val = lduw_le_p(ptr);
3919 break;
3920 case DEVICE_BIG_ENDIAN:
3921 val = lduw_be_p(ptr);
3922 break;
3923 default:
3924 val = lduw_p(ptr);
3925 break;
3926 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003927 }
3928 return val;
bellardaab33092005-10-30 20:48:42 +00003929}
3930
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003931uint32_t lduw_phys(target_phys_addr_t addr)
3932{
3933 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3934}
3935
3936uint32_t lduw_le_phys(target_phys_addr_t addr)
3937{
3938 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3939}
3940
3941uint32_t lduw_be_phys(target_phys_addr_t addr)
3942{
3943 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3944}
3945
bellard8df1cd02005-01-28 22:37:22 +00003946/* warning: addr must be aligned. The ram page is not masked as dirty
3947 and the code inside is not invalidated. It is useful if the dirty
3948 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003949void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003950{
bellard8df1cd02005-01-28 22:37:22 +00003951 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003952 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003953
Avi Kivity06ef3522012-02-13 16:11:22 +02003954 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003955
Avi Kivityf3705d52012-03-08 16:16:34 +02003956 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003957 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003958 if (memory_region_is_ram(section->mr)) {
3959 section = &phys_sections[phys_section_rom];
3960 }
3961 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003962 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003963 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003964 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02003965 + section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003966 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003967 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003968
3969 if (unlikely(in_migration)) {
3970 if (!cpu_physical_memory_is_dirty(addr1)) {
3971 /* invalidate code */
3972 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3973 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003974 cpu_physical_memory_set_dirty_flags(
3975 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003976 }
3977 }
bellard8df1cd02005-01-28 22:37:22 +00003978 }
3979}
3980
Anthony Liguoric227f092009-10-01 16:12:16 -05003981void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003982{
j_mayerbc98a7e2007-04-04 07:55:12 +00003983 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003984 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00003985
Avi Kivity06ef3522012-02-13 16:11:22 +02003986 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003987
Avi Kivityf3705d52012-03-08 16:16:34 +02003988 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003989 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003990 if (memory_region_is_ram(section->mr)) {
3991 section = &phys_sections[phys_section_rom];
3992 }
j_mayerbc98a7e2007-04-04 07:55:12 +00003993#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003994 io_mem_write(section->mr, addr, val >> 32, 4);
3995 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003996#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003997 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3998 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003999#endif
4000 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004001 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004002 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004003 + section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00004004 stq_p(ptr, val);
4005 }
4006}
4007
bellard8df1cd02005-01-28 22:37:22 +00004008/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004009static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4010 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004011{
bellard8df1cd02005-01-28 22:37:22 +00004012 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004013 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004014
Avi Kivity06ef3522012-02-13 16:11:22 +02004015 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004016
Avi Kivityf3705d52012-03-08 16:16:34 +02004017 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004018 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004019 if (memory_region_is_ram(section->mr)) {
4020 section = &phys_sections[phys_section_rom];
4021 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004022#if defined(TARGET_WORDS_BIGENDIAN)
4023 if (endian == DEVICE_LITTLE_ENDIAN) {
4024 val = bswap32(val);
4025 }
4026#else
4027 if (endian == DEVICE_BIG_ENDIAN) {
4028 val = bswap32(val);
4029 }
4030#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004031 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004032 } else {
4033 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004034 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4035 + section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00004036 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004037 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004038 switch (endian) {
4039 case DEVICE_LITTLE_ENDIAN:
4040 stl_le_p(ptr, val);
4041 break;
4042 case DEVICE_BIG_ENDIAN:
4043 stl_be_p(ptr, val);
4044 break;
4045 default:
4046 stl_p(ptr, val);
4047 break;
4048 }
bellard3a7d9292005-08-21 09:26:42 +00004049 if (!cpu_physical_memory_is_dirty(addr1)) {
4050 /* invalidate code */
4051 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4052 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004053 cpu_physical_memory_set_dirty_flags(addr1,
4054 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004055 }
bellard8df1cd02005-01-28 22:37:22 +00004056 }
4057}
4058
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004059void stl_phys(target_phys_addr_t addr, uint32_t val)
4060{
4061 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4062}
4063
4064void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4065{
4066 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4067}
4068
4069void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4070{
4071 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4072}
4073
bellardaab33092005-10-30 20:48:42 +00004074/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004075void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004076{
4077 uint8_t v = val;
4078 cpu_physical_memory_write(addr, &v, 1);
4079}
4080
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004081/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004082static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4083 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004084{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004085 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004086 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004087
Avi Kivity06ef3522012-02-13 16:11:22 +02004088 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004089
Avi Kivityf3705d52012-03-08 16:16:34 +02004090 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004091 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004092 if (memory_region_is_ram(section->mr)) {
4093 section = &phys_sections[phys_section_rom];
4094 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004095#if defined(TARGET_WORDS_BIGENDIAN)
4096 if (endian == DEVICE_LITTLE_ENDIAN) {
4097 val = bswap16(val);
4098 }
4099#else
4100 if (endian == DEVICE_BIG_ENDIAN) {
4101 val = bswap16(val);
4102 }
4103#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004104 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004105 } else {
4106 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004107 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4108 + section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004109 /* RAM case */
4110 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004111 switch (endian) {
4112 case DEVICE_LITTLE_ENDIAN:
4113 stw_le_p(ptr, val);
4114 break;
4115 case DEVICE_BIG_ENDIAN:
4116 stw_be_p(ptr, val);
4117 break;
4118 default:
4119 stw_p(ptr, val);
4120 break;
4121 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004122 if (!cpu_physical_memory_is_dirty(addr1)) {
4123 /* invalidate code */
4124 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4125 /* set dirty bit */
4126 cpu_physical_memory_set_dirty_flags(addr1,
4127 (0xff & ~CODE_DIRTY_FLAG));
4128 }
4129 }
bellardaab33092005-10-30 20:48:42 +00004130}
4131
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004132void stw_phys(target_phys_addr_t addr, uint32_t val)
4133{
4134 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4135}
4136
4137void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4138{
4139 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4140}
4141
4142void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4143{
4144 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4145}
4146
bellardaab33092005-10-30 20:48:42 +00004147/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004148void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004149{
4150 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004151 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004152}
4153
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004154void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4155{
4156 val = cpu_to_le64(val);
4157 cpu_physical_memory_write(addr, &val, 8);
4158}
4159
4160void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4161{
4162 val = cpu_to_be64(val);
4163 cpu_physical_memory_write(addr, &val, 8);
4164}
4165
aliguori5e2972f2009-03-28 17:51:36 +00004166/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004167int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004168 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004169{
4170 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004171 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004172 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004173
4174 while (len > 0) {
4175 page = addr & TARGET_PAGE_MASK;
4176 phys_addr = cpu_get_phys_page_debug(env, page);
4177 /* if no physical page mapped, return an error */
4178 if (phys_addr == -1)
4179 return -1;
4180 l = (page + TARGET_PAGE_SIZE) - addr;
4181 if (l > len)
4182 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004183 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004184 if (is_write)
4185 cpu_physical_memory_write_rom(phys_addr, buf, l);
4186 else
aliguori5e2972f2009-03-28 17:51:36 +00004187 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004188 len -= l;
4189 buf += l;
4190 addr += l;
4191 }
4192 return 0;
4193}
Paul Brooka68fe892010-03-01 00:08:59 +00004194#endif
bellard13eb76e2004-01-24 15:23:36 +00004195
pbrook2e70f6e2008-06-29 01:03:05 +00004196/* in deterministic execution mode, instructions doing device I/Os
4197 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004198void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004199{
4200 TranslationBlock *tb;
4201 uint32_t n, cflags;
4202 target_ulong pc, cs_base;
4203 uint64_t flags;
4204
Blue Swirl20503962012-04-09 14:20:20 +00004205 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004206 if (!tb) {
4207 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004208 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004209 }
4210 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004211 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004212 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004213 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004214 n = n - env->icount_decr.u16.low;
4215 /* Generate a new TB ending on the I/O insn. */
4216 n++;
4217 /* On MIPS and SH, delay slot instructions can only be restarted if
4218 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004219 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004220 branch. */
4221#if defined(TARGET_MIPS)
4222 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4223 env->active_tc.PC -= 4;
4224 env->icount_decr.u16.low++;
4225 env->hflags &= ~MIPS_HFLAG_BMASK;
4226 }
4227#elif defined(TARGET_SH4)
4228 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4229 && n > 1) {
4230 env->pc -= 2;
4231 env->icount_decr.u16.low++;
4232 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4233 }
4234#endif
4235 /* This should never happen. */
4236 if (n > CF_COUNT_MASK)
4237 cpu_abort(env, "TB too big during recompile");
4238
4239 cflags = n | CF_LAST_IO;
4240 pc = tb->pc;
4241 cs_base = tb->cs_base;
4242 flags = tb->flags;
4243 tb_phys_invalidate(tb, -1);
4244 /* FIXME: In theory this could raise an exception. In practice
4245 we have already translated the block once so it's probably ok. */
4246 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004247 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004248 the first in the TB) then we end up generating a whole new TB and
4249 repeating the fault, which is horribly inefficient.
4250 Better would be to execute just this insn uncached, or generate a
4251 second new TB. */
4252 cpu_resume_from_signal(env, NULL);
4253}
4254
Paul Brookb3755a92010-03-12 16:54:58 +00004255#if !defined(CONFIG_USER_ONLY)
4256
Stefan Weil055403b2010-10-22 23:03:32 +02004257void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004258{
4259 int i, target_code_size, max_target_code_size;
4260 int direct_jmp_count, direct_jmp2_count, cross_page;
4261 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004262
bellarde3db7222005-01-26 22:00:47 +00004263 target_code_size = 0;
4264 max_target_code_size = 0;
4265 cross_page = 0;
4266 direct_jmp_count = 0;
4267 direct_jmp2_count = 0;
4268 for(i = 0; i < nb_tbs; i++) {
4269 tb = &tbs[i];
4270 target_code_size += tb->size;
4271 if (tb->size > max_target_code_size)
4272 max_target_code_size = tb->size;
4273 if (tb->page_addr[1] != -1)
4274 cross_page++;
4275 if (tb->tb_next_offset[0] != 0xffff) {
4276 direct_jmp_count++;
4277 if (tb->tb_next_offset[1] != 0xffff) {
4278 direct_jmp2_count++;
4279 }
4280 }
4281 }
4282 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004283 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004284 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004285 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4286 cpu_fprintf(f, "TB count %d/%d\n",
4287 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004288 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004289 nb_tbs ? target_code_size / nb_tbs : 0,
4290 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004291 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004292 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4293 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004294 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4295 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004296 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4297 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004298 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004299 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4300 direct_jmp2_count,
4301 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004302 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004303 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4304 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4305 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004306 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004307}
4308
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004309/*
4310 * A helper function for the _utterly broken_ virtio device model to find out if
4311 * it's running on a big endian machine. Don't do this at home kids!
4312 */
4313bool virtio_is_big_endian(void);
4314bool virtio_is_big_endian(void)
4315{
4316#if defined(TARGET_WORDS_BIGENDIAN)
4317 return true;
4318#else
4319 return false;
4320#endif
4321}
4322
bellard61382a52003-10-27 21:22:23 +00004323#endif