blob: a0494c72bfaa2258bb11131432d19e57791afd36 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Blue Swirl0cac1b62012-04-09 16:50:52 +000060#include "cputlb.h"
61
Avi Kivity67d95c12011-12-15 15:25:22 +020062#define WANT_EXEC_OBSOLETE
63#include "exec-obsolete.h"
64
bellardfd6ce8f2003-05-14 19:00:11 +000065//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000066//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000067//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000068
69/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000070//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weil68409812012-04-04 07:45:21 +020096#elif defined(_WIN32) && !defined(_WIN64)
Stefan Weilf8e2af12009-06-18 23:04:48 +020097#define code_gen_section \
98 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000099#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000107/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000108static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200109static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000110
pbrooke2eef172008-06-08 01:09:01 +0000111#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000112int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000113static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000114
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300116
117static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300118static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300119
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200121static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200122
pbrooke2eef172008-06-08 01:09:01 +0000123#endif
bellard9fa3e852004-01-04 18:06:42 +0000124
Andreas Färber9349b4f2012-03-14 01:38:32 +0100125CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100128DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000129/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000130 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000133
bellard54936002003-05-13 00:25:15 +0000134typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000135 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000136 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
bellard54936002003-05-13 00:25:15 +0000144} PageDesc;
145
Paul Brook41c1b1c2010-03-12 16:54:58 +0000146/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000153#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000154#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000156#endif
bellard54936002003-05-13 00:25:15 +0000157
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000160#define L2_SIZE (1 << L2_BITS)
161
Avi Kivity3eef53d2012-02-10 14:57:31 +0200162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800165/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
Stefan Weilc6d50672012-03-16 20:23:49 +0100179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000182
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000186
pbrooke2eef172008-06-08 01:09:01 +0000187#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200188typedef struct PhysPageEntry PhysPageEntry;
189
Avi Kivity5312bd82012-02-12 18:32:55 +0200190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200196
Avi Kivity4346ae32012-02-10 17:00:01 +0200197struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200201};
202
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
Avi Kivity07f07b32012-02-13 20:45:32 +0200207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800209/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200210 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000212
pbrooke2eef172008-06-08 01:09:01 +0000213static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300214static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000215
Avi Kivity1ec9b902012-01-02 12:47:48 +0200216static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000217#endif
bellard33417e72003-08-10 21:47:01 +0000218
bellard34865132003-10-05 14:28:56 +0000219/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200220#ifdef WIN32
221static const char *logfilename = "qemu.log";
222#else
blueswir1d9b630f2008-10-05 09:57:08 +0000223static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200224#endif
bellard34865132003-10-05 14:28:56 +0000225FILE *logfile;
226int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000227static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000228
bellarde3db7222005-01-26 22:00:47 +0000229/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000230static int tb_flush_count;
231static int tb_phys_invalidate_count;
232
bellard7cb69ca2008-05-10 10:55:51 +0000233#ifdef _WIN32
234static void map_exec(void *addr, long size)
235{
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
239
240}
241#else
242static void map_exec(void *addr, long size)
243{
bellard43694152008-05-29 09:35:57 +0000244 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000245
bellard43694152008-05-29 09:35:57 +0000246 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000247 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000248 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000249
250 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000251 end += page_size - 1;
252 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000253
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
256}
257#endif
258
bellardb346ff42003-06-15 20:05:50 +0000259static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000260{
bellard83fb7ad2004-07-05 21:25:26 +0000261 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000262 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000263#ifdef _WIN32
264 {
265 SYSTEM_INFO system_info;
266
267 GetSystemInfo(&system_info);
268 qemu_real_host_page_size = system_info.dwPageSize;
269 }
270#else
271 qemu_real_host_page_size = getpagesize();
272#endif
bellard83fb7ad2004-07-05 21:25:26 +0000273 if (qemu_host_page_size == 0)
274 qemu_host_page_size = qemu_real_host_page_size;
275 if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000277 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000278
Paul Brook2e9a5712010-05-05 16:32:59 +0100279#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000280 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100281#ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry *freep;
283 int i, cnt;
284
285 freep = kinfo_getvmmap(getpid(), &cnt);
286 if (freep) {
287 mmap_lock();
288 for (i = 0; i < cnt; i++) {
289 unsigned long startaddr, endaddr;
290
291 startaddr = freep[i].kve_start;
292 endaddr = freep[i].kve_end;
293 if (h2g_valid(startaddr)) {
294 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295
296 if (h2g_valid(endaddr)) {
297 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100299 } else {
300#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100303#endif
304 }
305 }
306 }
307 free(freep);
308 mmap_unlock();
309 }
310#else
balrog50a95692007-12-12 01:16:23 +0000311 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000312
pbrook07765902008-05-31 16:33:53 +0000313 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800314
Aurelien Jarnofd436902010-04-10 17:20:36 +0200315 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000316 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800317 mmap_lock();
318
balrog50a95692007-12-12 01:16:23 +0000319 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800320 unsigned long startaddr, endaddr;
321 int n;
322
323 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324
325 if (n == 2 && h2g_valid(startaddr)) {
326 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327
328 if (h2g_valid(endaddr)) {
329 endaddr = h2g(endaddr);
330 } else {
331 endaddr = ~0ul;
332 }
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000334 }
335 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800336
balrog50a95692007-12-12 01:16:23 +0000337 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800338 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000339 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100340#endif
balrog50a95692007-12-12 01:16:23 +0000341 }
342#endif
bellard54936002003-05-13 00:25:15 +0000343}
344
Paul Brook41c1b1c2010-03-12 16:54:58 +0000345static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000346{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000347 PageDesc *pd;
348 void **lp;
349 int i;
350
pbrook17e23772008-06-09 13:47:45 +0000351#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500352 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353# define ALLOC(P, SIZE) \
354 do { \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800357 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000358#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500360 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000361#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800363 /* Level 1. Always allocated. */
364 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365
366 /* Level 2..N-1. */
367 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 void **p = *lp;
369
370 if (p == NULL) {
371 if (!alloc) {
372 return NULL;
373 }
374 ALLOC(p, sizeof(void *) * L2_SIZE);
375 *lp = p;
376 }
377
378 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000379 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800380
381 pd = *lp;
382 if (pd == NULL) {
383 if (!alloc) {
384 return NULL;
385 }
386 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 *lp = pd;
388 }
389
390#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800391
392 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000393}
394
Paul Brook41c1b1c2010-03-12 16:54:58 +0000395static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000396{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800397 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000398}
399
Paul Brook6d9a1302010-02-28 23:55:53 +0000400#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200401
Avi Kivityf7bf5462012-02-13 20:12:05 +0200402static void phys_map_node_reserve(unsigned nodes)
403{
404 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
405 typedef PhysPageEntry Node[L2_SIZE];
406 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
407 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
408 phys_map_nodes_nb + nodes);
409 phys_map_nodes = g_renew(Node, phys_map_nodes,
410 phys_map_nodes_nb_alloc);
411 }
412}
413
414static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200415{
416 unsigned i;
417 uint16_t ret;
418
Avi Kivityf7bf5462012-02-13 20:12:05 +0200419 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200420 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200421 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200422 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200423 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200424 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200425 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200426 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200427}
428
429static void phys_map_nodes_reset(void)
430{
431 phys_map_nodes_nb = 0;
432}
433
Avi Kivityf7bf5462012-02-13 20:12:05 +0200434
Avi Kivity29990972012-02-13 20:21:20 +0200435static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
436 target_phys_addr_t *nb, uint16_t leaf,
437 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200438{
439 PhysPageEntry *p;
440 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200441 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200442
Avi Kivity07f07b32012-02-13 20:45:32 +0200443 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200444 lp->ptr = phys_map_node_alloc();
445 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200446 if (level == 0) {
447 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200448 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200449 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200450 }
451 }
452 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200453 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200454 }
Avi Kivity29990972012-02-13 20:21:20 +0200455 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200456
Avi Kivity29990972012-02-13 20:21:20 +0200457 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200458 if ((*index & (step - 1)) == 0 && *nb >= step) {
459 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200460 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200461 *index += step;
462 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200463 } else {
464 phys_page_set_level(lp, index, nb, leaf, level - 1);
465 }
466 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200467 }
468}
469
Avi Kivity29990972012-02-13 20:21:20 +0200470static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
471 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000472{
Avi Kivity29990972012-02-13 20:21:20 +0200473 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200474 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000475
Avi Kivity29990972012-02-13 20:21:20 +0200476 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000477}
478
Blue Swirl0cac1b62012-04-09 16:50:52 +0000479MemoryRegionSection *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000480{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200481 PhysPageEntry lp = phys_map;
482 PhysPageEntry *p;
483 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200484 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200485
Avi Kivity07f07b32012-02-13 20:45:32 +0200486 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200487 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200488 goto not_found;
489 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200490 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200491 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200492 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200493
Avi Kivityc19e8802012-02-13 20:25:31 +0200494 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200495not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200496 return &phys_sections[s_index];
497}
498
Blue Swirle5548612012-04-21 13:08:33 +0000499bool memory_region_is_unassigned(MemoryRegion *mr)
500{
501 return mr != &io_mem_ram && mr != &io_mem_rom
502 && mr != &io_mem_notdirty && !mr->rom_device
503 && mr != &io_mem_watch;
504}
505
pbrookc8a706f2008-06-02 16:16:42 +0000506#define mmap_lock() do { } while(0)
507#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000508#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000509
bellard43694152008-05-29 09:35:57 +0000510#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
511
512#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100513/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000514 user mode. It will change when a dedicated libc will be used */
515#define USE_STATIC_CODE_GEN_BUFFER
516#endif
517
518#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200519static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
520 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000521#endif
522
blueswir18fcd3692008-08-17 20:26:25 +0000523static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000524{
bellard43694152008-05-29 09:35:57 +0000525#ifdef USE_STATIC_CODE_GEN_BUFFER
526 code_gen_buffer = static_code_gen_buffer;
527 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
528 map_exec(code_gen_buffer, code_gen_buffer_size);
529#else
bellard26a5f132008-05-28 12:30:31 +0000530 code_gen_buffer_size = tb_size;
531 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000532#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000533 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
534#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100535 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000536 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000537#endif
bellard26a5f132008-05-28 12:30:31 +0000538 }
539 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
540 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
541 /* The code gen buffer location may have constraints depending on
542 the host cpu and OS */
543#if defined(__linux__)
544 {
545 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000546 void *start = NULL;
547
bellard26a5f132008-05-28 12:30:31 +0000548 flags = MAP_PRIVATE | MAP_ANONYMOUS;
549#if defined(__x86_64__)
550 flags |= MAP_32BIT;
551 /* Cannot map more than that */
552 if (code_gen_buffer_size > (800 * 1024 * 1024))
553 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000554#elif defined(__sparc_v9__)
555 // Map the buffer below 2G, so we can use direct calls and branches
556 flags |= MAP_FIXED;
557 start = (void *) 0x60000000UL;
558 if (code_gen_buffer_size > (512 * 1024 * 1024))
559 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000560#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100561 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000562 if (code_gen_buffer_size > 16 * 1024 * 1024)
563 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700564#elif defined(__s390x__)
565 /* Map the buffer so that we can use direct calls and branches. */
566 /* We have a +- 4GB range on the branches; leave some slop. */
567 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
568 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
569 }
570 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000571#endif
blueswir1141ac462008-07-26 15:05:57 +0000572 code_gen_buffer = mmap(start, code_gen_buffer_size,
573 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000574 flags, -1, 0);
575 if (code_gen_buffer == MAP_FAILED) {
576 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
577 exit(1);
578 }
579 }
Bradcbb608a2010-12-20 21:25:40 -0500580#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000581 || defined(__DragonFly__) || defined(__OpenBSD__) \
582 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000583 {
584 int flags;
585 void *addr = NULL;
586 flags = MAP_PRIVATE | MAP_ANONYMOUS;
587#if defined(__x86_64__)
588 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
589 * 0x40000000 is free */
590 flags |= MAP_FIXED;
591 addr = (void *)0x40000000;
592 /* Cannot map more than that */
593 if (code_gen_buffer_size > (800 * 1024 * 1024))
594 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000595#elif defined(__sparc_v9__)
596 // Map the buffer below 2G, so we can use direct calls and branches
597 flags |= MAP_FIXED;
598 addr = (void *) 0x60000000UL;
599 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
600 code_gen_buffer_size = (512 * 1024 * 1024);
601 }
aliguori06e67a82008-09-27 15:32:41 +0000602#endif
603 code_gen_buffer = mmap(addr, code_gen_buffer_size,
604 PROT_WRITE | PROT_READ | PROT_EXEC,
605 flags, -1, 0);
606 if (code_gen_buffer == MAP_FAILED) {
607 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
608 exit(1);
609 }
610 }
bellard26a5f132008-05-28 12:30:31 +0000611#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500612 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000613 map_exec(code_gen_buffer, code_gen_buffer_size);
614#endif
bellard43694152008-05-29 09:35:57 +0000615#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000616 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100617 code_gen_buffer_max_size = code_gen_buffer_size -
618 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000619 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500620 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000621}
622
623/* Must be called before using the QEMU cpus. 'tb_size' is the size
624 (in bytes) allocated to the translation buffer. Zero means default
625 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200626void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000627{
bellard26a5f132008-05-28 12:30:31 +0000628 cpu_gen_init();
629 code_gen_alloc(tb_size);
630 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700631 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000632 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700633#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
634 /* There's no guest base to take into account, so go ahead and
635 initialize the prologue now. */
636 tcg_prologue_init(&tcg_ctx);
637#endif
bellard26a5f132008-05-28 12:30:31 +0000638}
639
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200640bool tcg_enabled(void)
641{
642 return code_gen_buffer != NULL;
643}
644
645void cpu_exec_init_all(void)
646{
647#if !defined(CONFIG_USER_ONLY)
648 memory_map_init();
649 io_mem_init();
650#endif
651}
652
pbrook9656f322008-07-01 20:01:19 +0000653#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
654
Juan Quintelae59fb372009-09-29 22:48:21 +0200655static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200656{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100657 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200658
aurel323098dba2009-03-07 21:28:24 +0000659 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
660 version_id is increased. */
661 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000662 tlb_flush(env, 1);
663
664 return 0;
665}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200666
667static const VMStateDescription vmstate_cpu_common = {
668 .name = "cpu_common",
669 .version_id = 1,
670 .minimum_version_id = 1,
671 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200672 .post_load = cpu_common_post_load,
673 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100674 VMSTATE_UINT32(halted, CPUArchState),
675 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200676 VMSTATE_END_OF_LIST()
677 }
678};
pbrook9656f322008-07-01 20:01:19 +0000679#endif
680
Andreas Färber9349b4f2012-03-14 01:38:32 +0100681CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400682{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100683 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400684
685 while (env) {
686 if (env->cpu_index == cpu)
687 break;
688 env = env->next_cpu;
689 }
690
691 return env;
692}
693
Andreas Färber9349b4f2012-03-14 01:38:32 +0100694void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000695{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100696 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000697 int cpu_index;
698
pbrookc2764712009-03-07 15:24:59 +0000699#if defined(CONFIG_USER_ONLY)
700 cpu_list_lock();
701#endif
bellard6a00d602005-11-21 23:25:50 +0000702 env->next_cpu = NULL;
703 penv = &first_cpu;
704 cpu_index = 0;
705 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700706 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000707 cpu_index++;
708 }
709 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000710 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000711 QTAILQ_INIT(&env->breakpoints);
712 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100713#ifndef CONFIG_USER_ONLY
714 env->thread_id = qemu_get_thread_id();
715#endif
bellard6a00d602005-11-21 23:25:50 +0000716 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000717#if defined(CONFIG_USER_ONLY)
718 cpu_list_unlock();
719#endif
pbrookb3c77242008-06-30 16:31:04 +0000720#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600721 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
722 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000723 cpu_save, cpu_load, env);
724#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000725}
726
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100727/* Allocate a new translation block. Flush the translation buffer if
728 too many translation blocks or too much generated code. */
729static TranslationBlock *tb_alloc(target_ulong pc)
730{
731 TranslationBlock *tb;
732
733 if (nb_tbs >= code_gen_max_blocks ||
734 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
735 return NULL;
736 tb = &tbs[nb_tbs++];
737 tb->pc = pc;
738 tb->cflags = 0;
739 return tb;
740}
741
742void tb_free(TranslationBlock *tb)
743{
744 /* In practice this is mostly used for single use temporary TB
745 Ignore the hard cases and just back up if this TB happens to
746 be the last one generated. */
747 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
748 code_gen_ptr = tb->tc_ptr;
749 nb_tbs--;
750 }
751}
752
bellard9fa3e852004-01-04 18:06:42 +0000753static inline void invalidate_page_bitmap(PageDesc *p)
754{
755 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500756 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000757 p->code_bitmap = NULL;
758 }
759 p->code_write_count = 0;
760}
761
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800762/* Set to NULL all the 'first_tb' fields in all PageDescs. */
763
764static void page_flush_tb_1 (int level, void **lp)
765{
766 int i;
767
768 if (*lp == NULL) {
769 return;
770 }
771 if (level == 0) {
772 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000773 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800774 pd[i].first_tb = NULL;
775 invalidate_page_bitmap(pd + i);
776 }
777 } else {
778 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000779 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800780 page_flush_tb_1 (level - 1, pp + i);
781 }
782 }
783}
784
bellardfd6ce8f2003-05-14 19:00:11 +0000785static void page_flush_tb(void)
786{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800787 int i;
788 for (i = 0; i < V_L1_SIZE; i++) {
789 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000790 }
791}
792
793/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000794/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100795void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000796{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100797 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000798#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000799 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
800 (unsigned long)(code_gen_ptr - code_gen_buffer),
801 nb_tbs, nb_tbs > 0 ?
802 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000803#endif
bellard26a5f132008-05-28 12:30:31 +0000804 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000805 cpu_abort(env1, "Internal error: code buffer overflow\n");
806
bellardfd6ce8f2003-05-14 19:00:11 +0000807 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000808
bellard6a00d602005-11-21 23:25:50 +0000809 for(env = first_cpu; env != NULL; env = env->next_cpu) {
810 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
811 }
bellard9fa3e852004-01-04 18:06:42 +0000812
bellard8a8a6082004-10-03 13:36:49 +0000813 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000814 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000815
bellardfd6ce8f2003-05-14 19:00:11 +0000816 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000817 /* XXX: flush processor icache at this point if cache flush is
818 expensive */
bellarde3db7222005-01-26 22:00:47 +0000819 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000820}
821
822#ifdef DEBUG_TB_CHECK
823
j_mayerbc98a7e2007-04-04 07:55:12 +0000824static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000825{
826 TranslationBlock *tb;
827 int i;
828 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000829 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
830 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000831 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
832 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000833 printf("ERROR invalidate: address=" TARGET_FMT_lx
834 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000835 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000836 }
837 }
838 }
839}
840
841/* verify that all the pages have correct rights for code */
842static void tb_page_check(void)
843{
844 TranslationBlock *tb;
845 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000846
pbrook99773bd2006-04-16 15:14:59 +0000847 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
848 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000849 flags1 = page_get_flags(tb->pc);
850 flags2 = page_get_flags(tb->pc + tb->size - 1);
851 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
852 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000853 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000854 }
855 }
856 }
857}
858
859#endif
860
861/* invalidate one TB */
862static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
863 int next_offset)
864{
865 TranslationBlock *tb1;
866 for(;;) {
867 tb1 = *ptb;
868 if (tb1 == tb) {
869 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
870 break;
871 }
872 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
873 }
874}
875
bellard9fa3e852004-01-04 18:06:42 +0000876static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
877{
878 TranslationBlock *tb1;
879 unsigned int n1;
880
881 for(;;) {
882 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200883 n1 = (uintptr_t)tb1 & 3;
884 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000885 if (tb1 == tb) {
886 *ptb = tb1->page_next[n1];
887 break;
888 }
889 ptb = &tb1->page_next[n1];
890 }
891}
892
bellardd4e81642003-05-25 16:46:15 +0000893static inline void tb_jmp_remove(TranslationBlock *tb, int n)
894{
895 TranslationBlock *tb1, **ptb;
896 unsigned int n1;
897
898 ptb = &tb->jmp_next[n];
899 tb1 = *ptb;
900 if (tb1) {
901 /* find tb(n) in circular list */
902 for(;;) {
903 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200904 n1 = (uintptr_t)tb1 & 3;
905 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardd4e81642003-05-25 16:46:15 +0000906 if (n1 == n && tb1 == tb)
907 break;
908 if (n1 == 2) {
909 ptb = &tb1->jmp_first;
910 } else {
911 ptb = &tb1->jmp_next[n1];
912 }
913 }
914 /* now we can suppress tb(n) from the list */
915 *ptb = tb->jmp_next[n];
916
917 tb->jmp_next[n] = NULL;
918 }
919}
920
921/* reset the jump entry 'n' of a TB so that it is not chained to
922 another TB */
923static inline void tb_reset_jump(TranslationBlock *tb, int n)
924{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200925 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000926}
927
Paul Brook41c1b1c2010-03-12 16:54:58 +0000928void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000929{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100930 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000931 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000932 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000933 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000934 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000935
bellard9fa3e852004-01-04 18:06:42 +0000936 /* remove the TB from the hash list */
937 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
938 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000939 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000940 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000941
bellard9fa3e852004-01-04 18:06:42 +0000942 /* remove the TB from the page list */
943 if (tb->page_addr[0] != page_addr) {
944 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
945 tb_page_remove(&p->first_tb, tb);
946 invalidate_page_bitmap(p);
947 }
948 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
949 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
950 tb_page_remove(&p->first_tb, tb);
951 invalidate_page_bitmap(p);
952 }
953
bellard8a40a182005-11-20 10:35:40 +0000954 tb_invalidated_flag = 1;
955
956 /* remove the TB from the hash list */
957 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000958 for(env = first_cpu; env != NULL; env = env->next_cpu) {
959 if (env->tb_jmp_cache[h] == tb)
960 env->tb_jmp_cache[h] = NULL;
961 }
bellard8a40a182005-11-20 10:35:40 +0000962
963 /* suppress this TB from the two jump lists */
964 tb_jmp_remove(tb, 0);
965 tb_jmp_remove(tb, 1);
966
967 /* suppress any remaining jumps to this TB */
968 tb1 = tb->jmp_first;
969 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200970 n1 = (uintptr_t)tb1 & 3;
bellard8a40a182005-11-20 10:35:40 +0000971 if (n1 == 2)
972 break;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200973 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000974 tb2 = tb1->jmp_next[n1];
975 tb_reset_jump(tb1, n1);
976 tb1->jmp_next[n1] = NULL;
977 tb1 = tb2;
978 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200979 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +0000980
bellarde3db7222005-01-26 22:00:47 +0000981 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000982}
983
984static inline void set_bits(uint8_t *tab, int start, int len)
985{
986 int end, mask, end1;
987
988 end = start + len;
989 tab += start >> 3;
990 mask = 0xff << (start & 7);
991 if ((start & ~7) == (end & ~7)) {
992 if (start < end) {
993 mask &= ~(0xff << (end & 7));
994 *tab |= mask;
995 }
996 } else {
997 *tab++ |= mask;
998 start = (start + 8) & ~7;
999 end1 = end & ~7;
1000 while (start < end1) {
1001 *tab++ = 0xff;
1002 start += 8;
1003 }
1004 if (start < end) {
1005 mask = ~(0xff << (end & 7));
1006 *tab |= mask;
1007 }
1008 }
1009}
1010
1011static void build_page_bitmap(PageDesc *p)
1012{
1013 int n, tb_start, tb_end;
1014 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001015
Anthony Liguori7267c092011-08-20 22:09:37 -05001016 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001017
1018 tb = p->first_tb;
1019 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001020 n = (uintptr_t)tb & 3;
1021 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001022 /* NOTE: this is subtle as a TB may span two physical pages */
1023 if (n == 0) {
1024 /* NOTE: tb_end may be after the end of the page, but
1025 it is not a problem */
1026 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1027 tb_end = tb_start + tb->size;
1028 if (tb_end > TARGET_PAGE_SIZE)
1029 tb_end = TARGET_PAGE_SIZE;
1030 } else {
1031 tb_start = 0;
1032 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1033 }
1034 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1035 tb = tb->page_next[n];
1036 }
1037}
1038
Andreas Färber9349b4f2012-03-14 01:38:32 +01001039TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001040 target_ulong pc, target_ulong cs_base,
1041 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001042{
1043 TranslationBlock *tb;
1044 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001045 tb_page_addr_t phys_pc, phys_page2;
1046 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001047 int code_gen_size;
1048
Paul Brook41c1b1c2010-03-12 16:54:58 +00001049 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001050 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001051 if (!tb) {
1052 /* flush must be done */
1053 tb_flush(env);
1054 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001055 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001056 /* Don't forget to invalidate previous TB info. */
1057 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001058 }
1059 tc_ptr = code_gen_ptr;
1060 tb->tc_ptr = tc_ptr;
1061 tb->cs_base = cs_base;
1062 tb->flags = flags;
1063 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001064 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001065 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1066 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001067
bellardd720b932004-04-25 17:57:43 +00001068 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001069 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001070 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001071 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001072 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001073 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001074 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001075 return tb;
bellardd720b932004-04-25 17:57:43 +00001076}
ths3b46e622007-09-17 08:09:54 +00001077
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001078/*
1079 * invalidate all TBs which intersect with the target physical pages
1080 * starting in range [start;end[. NOTE: start and end may refer to
1081 * different physical pages. 'is_cpu_write_access' should be true if called
1082 * from a real cpu write access: the virtual CPU will exit the current
1083 * TB if code is modified inside this TB.
1084 */
1085void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1086 int is_cpu_write_access)
1087{
1088 while (start < end) {
1089 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1090 start &= TARGET_PAGE_MASK;
1091 start += TARGET_PAGE_SIZE;
1092 }
1093}
1094
bellard9fa3e852004-01-04 18:06:42 +00001095/* invalidate all TBs which intersect with the target physical page
1096 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001097 the same physical page. 'is_cpu_write_access' should be true if called
1098 from a real cpu write access: the virtual CPU will exit the current
1099 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001100void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001101 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001102{
aliguori6b917542008-11-18 19:46:41 +00001103 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001104 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001105 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001106 PageDesc *p;
1107 int n;
1108#ifdef TARGET_HAS_PRECISE_SMC
1109 int current_tb_not_found = is_cpu_write_access;
1110 TranslationBlock *current_tb = NULL;
1111 int current_tb_modified = 0;
1112 target_ulong current_pc = 0;
1113 target_ulong current_cs_base = 0;
1114 int current_flags = 0;
1115#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001116
1117 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001118 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001119 return;
ths5fafdf22007-09-16 21:08:06 +00001120 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001121 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1122 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001123 /* build code bitmap */
1124 build_page_bitmap(p);
1125 }
1126
1127 /* we remove all the TBs in the range [start, end[ */
1128 /* XXX: see if in some cases it could be faster to invalidate all the code */
1129 tb = p->first_tb;
1130 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001131 n = (uintptr_t)tb & 3;
1132 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001133 tb_next = tb->page_next[n];
1134 /* NOTE: this is subtle as a TB may span two physical pages */
1135 if (n == 0) {
1136 /* NOTE: tb_end may be after the end of the page, but
1137 it is not a problem */
1138 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1139 tb_end = tb_start + tb->size;
1140 } else {
1141 tb_start = tb->page_addr[1];
1142 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1143 }
1144 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001145#ifdef TARGET_HAS_PRECISE_SMC
1146 if (current_tb_not_found) {
1147 current_tb_not_found = 0;
1148 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001149 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001150 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001151 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001152 }
1153 }
1154 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001155 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001156 /* If we are modifying the current TB, we must stop
1157 its execution. We could be more precise by checking
1158 that the modification is after the current PC, but it
1159 would require a specialized function to partially
1160 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001161
bellardd720b932004-04-25 17:57:43 +00001162 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001163 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001164 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1165 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001166 }
1167#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001168 /* we need to do that to handle the case where a signal
1169 occurs while doing tb_phys_invalidate() */
1170 saved_tb = NULL;
1171 if (env) {
1172 saved_tb = env->current_tb;
1173 env->current_tb = NULL;
1174 }
bellard9fa3e852004-01-04 18:06:42 +00001175 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001176 if (env) {
1177 env->current_tb = saved_tb;
1178 if (env->interrupt_request && env->current_tb)
1179 cpu_interrupt(env, env->interrupt_request);
1180 }
bellard9fa3e852004-01-04 18:06:42 +00001181 }
1182 tb = tb_next;
1183 }
1184#if !defined(CONFIG_USER_ONLY)
1185 /* if no code remaining, no need to continue to use slow writes */
1186 if (!p->first_tb) {
1187 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001188 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001189 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001190 }
1191 }
1192#endif
1193#ifdef TARGET_HAS_PRECISE_SMC
1194 if (current_tb_modified) {
1195 /* we generate a block containing just the instruction
1196 modifying the memory. It will ensure that it cannot modify
1197 itself */
bellardea1c1802004-06-14 18:56:36 +00001198 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001199 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001200 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001201 }
1202#endif
1203}
1204
1205/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001206static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001207{
1208 PageDesc *p;
1209 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001210#if 0
bellarda4193c82004-06-03 14:01:43 +00001211 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001212 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1213 cpu_single_env->mem_io_vaddr, len,
1214 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001215 cpu_single_env->eip +
1216 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001217 }
1218#endif
bellard9fa3e852004-01-04 18:06:42 +00001219 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001220 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001221 return;
1222 if (p->code_bitmap) {
1223 offset = start & ~TARGET_PAGE_MASK;
1224 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1225 if (b & ((1 << len) - 1))
1226 goto do_invalidate;
1227 } else {
1228 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001229 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001230 }
1231}
1232
bellard9fa3e852004-01-04 18:06:42 +00001233#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001234static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001235 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001236{
aliguori6b917542008-11-18 19:46:41 +00001237 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001238 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001239 int n;
bellardd720b932004-04-25 17:57:43 +00001240#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001241 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001242 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001243 int current_tb_modified = 0;
1244 target_ulong current_pc = 0;
1245 target_ulong current_cs_base = 0;
1246 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001247#endif
bellard9fa3e852004-01-04 18:06:42 +00001248
1249 addr &= TARGET_PAGE_MASK;
1250 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001251 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001252 return;
1253 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001254#ifdef TARGET_HAS_PRECISE_SMC
1255 if (tb && pc != 0) {
1256 current_tb = tb_find_pc(pc);
1257 }
1258#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001259 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001260 n = (uintptr_t)tb & 3;
1261 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001262#ifdef TARGET_HAS_PRECISE_SMC
1263 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001264 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001265 /* If we are modifying the current TB, we must stop
1266 its execution. We could be more precise by checking
1267 that the modification is after the current PC, but it
1268 would require a specialized function to partially
1269 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001270
bellardd720b932004-04-25 17:57:43 +00001271 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001272 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001273 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1274 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001275 }
1276#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001277 tb_phys_invalidate(tb, addr);
1278 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001279 }
1280 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001281#ifdef TARGET_HAS_PRECISE_SMC
1282 if (current_tb_modified) {
1283 /* we generate a block containing just the instruction
1284 modifying the memory. It will ensure that it cannot modify
1285 itself */
bellardea1c1802004-06-14 18:56:36 +00001286 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001287 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001288 cpu_resume_from_signal(env, puc);
1289 }
1290#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001291}
bellard9fa3e852004-01-04 18:06:42 +00001292#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001293
1294/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001295static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001296 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001297{
1298 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001299#ifndef CONFIG_USER_ONLY
1300 bool page_already_protected;
1301#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001302
bellard9fa3e852004-01-04 18:06:42 +00001303 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001304 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001305 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001306#ifndef CONFIG_USER_ONLY
1307 page_already_protected = p->first_tb != NULL;
1308#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001309 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001310 invalidate_page_bitmap(p);
1311
bellard107db442004-06-22 18:48:46 +00001312#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001313
bellard9fa3e852004-01-04 18:06:42 +00001314#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001315 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001316 target_ulong addr;
1317 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001318 int prot;
1319
bellardfd6ce8f2003-05-14 19:00:11 +00001320 /* force the host page as non writable (writes will have a
1321 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001322 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001323 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001324 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1325 addr += TARGET_PAGE_SIZE) {
1326
1327 p2 = page_find (addr >> TARGET_PAGE_BITS);
1328 if (!p2)
1329 continue;
1330 prot |= p2->flags;
1331 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001332 }
ths5fafdf22007-09-16 21:08:06 +00001333 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001334 (prot & PAGE_BITS) & ~PAGE_WRITE);
1335#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001336 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001337 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001338#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001339 }
bellard9fa3e852004-01-04 18:06:42 +00001340#else
1341 /* if some code is already present, then the pages are already
1342 protected. So we handle the case where only the first TB is
1343 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001344 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001345 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001346 }
1347#endif
bellardd720b932004-04-25 17:57:43 +00001348
1349#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001350}
1351
bellard9fa3e852004-01-04 18:06:42 +00001352/* add a new TB and link it to the physical page tables. phys_page2 is
1353 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001354void tb_link_page(TranslationBlock *tb,
1355 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001356{
bellard9fa3e852004-01-04 18:06:42 +00001357 unsigned int h;
1358 TranslationBlock **ptb;
1359
pbrookc8a706f2008-06-02 16:16:42 +00001360 /* Grab the mmap lock to stop another thread invalidating this TB
1361 before we are done. */
1362 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001363 /* add in the physical hash table */
1364 h = tb_phys_hash_func(phys_pc);
1365 ptb = &tb_phys_hash[h];
1366 tb->phys_hash_next = *ptb;
1367 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001368
1369 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001370 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1371 if (phys_page2 != -1)
1372 tb_alloc_page(tb, 1, phys_page2);
1373 else
1374 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001375
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001376 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001377 tb->jmp_next[0] = NULL;
1378 tb->jmp_next[1] = NULL;
1379
1380 /* init original jump addresses */
1381 if (tb->tb_next_offset[0] != 0xffff)
1382 tb_reset_jump(tb, 0);
1383 if (tb->tb_next_offset[1] != 0xffff)
1384 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001385
1386#ifdef DEBUG_TB_CHECK
1387 tb_page_check();
1388#endif
pbrookc8a706f2008-06-02 16:16:42 +00001389 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001390}
1391
bellarda513fe12003-05-27 23:29:48 +00001392/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1393 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001394TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001395{
1396 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001397 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001398 TranslationBlock *tb;
1399
1400 if (nb_tbs <= 0)
1401 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001402 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1403 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001404 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001405 }
bellarda513fe12003-05-27 23:29:48 +00001406 /* binary search (cf Knuth) */
1407 m_min = 0;
1408 m_max = nb_tbs - 1;
1409 while (m_min <= m_max) {
1410 m = (m_min + m_max) >> 1;
1411 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001412 v = (uintptr_t)tb->tc_ptr;
bellarda513fe12003-05-27 23:29:48 +00001413 if (v == tc_ptr)
1414 return tb;
1415 else if (tc_ptr < v) {
1416 m_max = m - 1;
1417 } else {
1418 m_min = m + 1;
1419 }
ths5fafdf22007-09-16 21:08:06 +00001420 }
bellarda513fe12003-05-27 23:29:48 +00001421 return &tbs[m_max];
1422}
bellard75012672003-06-21 13:11:07 +00001423
bellardea041c02003-06-25 16:16:50 +00001424static void tb_reset_jump_recursive(TranslationBlock *tb);
1425
1426static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1427{
1428 TranslationBlock *tb1, *tb_next, **ptb;
1429 unsigned int n1;
1430
1431 tb1 = tb->jmp_next[n];
1432 if (tb1 != NULL) {
1433 /* find head of list */
1434 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001435 n1 = (uintptr_t)tb1 & 3;
1436 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001437 if (n1 == 2)
1438 break;
1439 tb1 = tb1->jmp_next[n1];
1440 }
1441 /* we are now sure now that tb jumps to tb1 */
1442 tb_next = tb1;
1443
1444 /* remove tb from the jmp_first list */
1445 ptb = &tb_next->jmp_first;
1446 for(;;) {
1447 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001448 n1 = (uintptr_t)tb1 & 3;
1449 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001450 if (n1 == n && tb1 == tb)
1451 break;
1452 ptb = &tb1->jmp_next[n1];
1453 }
1454 *ptb = tb->jmp_next[n];
1455 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001456
bellardea041c02003-06-25 16:16:50 +00001457 /* suppress the jump to next tb in generated code */
1458 tb_reset_jump(tb, n);
1459
bellard01243112004-01-04 15:48:17 +00001460 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001461 tb_reset_jump_recursive(tb_next);
1462 }
1463}
1464
1465static void tb_reset_jump_recursive(TranslationBlock *tb)
1466{
1467 tb_reset_jump_recursive2(tb, 0);
1468 tb_reset_jump_recursive2(tb, 1);
1469}
1470
bellard1fddef42005-04-17 19:16:13 +00001471#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001472#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001473static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001474{
1475 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1476}
1477#else
Max Filippov1e7855a2012-04-10 02:48:17 +04001478void tb_invalidate_phys_addr(target_phys_addr_t addr)
bellardd720b932004-04-25 17:57:43 +00001479{
Anthony Liguoric227f092009-10-01 16:12:16 -05001480 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001481 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001482
Avi Kivity06ef3522012-02-13 16:11:22 +02001483 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001484 if (!(memory_region_is_ram(section->mr)
1485 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001486 return;
1487 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001488 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001489 + memory_region_section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001490 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001491}
Max Filippov1e7855a2012-04-10 02:48:17 +04001492
1493static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1494{
1495 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc));
1496}
bellardc27004e2005-01-03 23:35:10 +00001497#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001498#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001499
Paul Brookc527ee82010-03-01 03:31:14 +00001500#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001501void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001502
1503{
1504}
1505
Andreas Färber9349b4f2012-03-14 01:38:32 +01001506int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001507 int flags, CPUWatchpoint **watchpoint)
1508{
1509 return -ENOSYS;
1510}
1511#else
pbrook6658ffb2007-03-16 23:58:11 +00001512/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001513int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001514 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001515{
aliguorib4051332008-11-18 20:14:20 +00001516 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001517 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001518
aliguorib4051332008-11-18 20:14:20 +00001519 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001520 if ((len & (len - 1)) || (addr & ~len_mask) ||
1521 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001522 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1523 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1524 return -EINVAL;
1525 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001526 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001527
aliguoria1d1bb32008-11-18 20:07:32 +00001528 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001529 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001530 wp->flags = flags;
1531
aliguori2dc9f412008-11-18 20:56:59 +00001532 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001533 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001534 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001535 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001536 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001537
pbrook6658ffb2007-03-16 23:58:11 +00001538 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001539
1540 if (watchpoint)
1541 *watchpoint = wp;
1542 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001543}
1544
aliguoria1d1bb32008-11-18 20:07:32 +00001545/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001546int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001547 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001548{
aliguorib4051332008-11-18 20:14:20 +00001549 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001550 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001551
Blue Swirl72cf2d42009-09-12 07:36:22 +00001552 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001553 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001554 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001555 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001556 return 0;
1557 }
1558 }
aliguoria1d1bb32008-11-18 20:07:32 +00001559 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001560}
1561
aliguoria1d1bb32008-11-18 20:07:32 +00001562/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001563void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001564{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001565 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001566
aliguoria1d1bb32008-11-18 20:07:32 +00001567 tlb_flush_page(env, watchpoint->vaddr);
1568
Anthony Liguori7267c092011-08-20 22:09:37 -05001569 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001570}
1571
aliguoria1d1bb32008-11-18 20:07:32 +00001572/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001573void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001574{
aliguoric0ce9982008-11-25 22:13:57 +00001575 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001576
Blue Swirl72cf2d42009-09-12 07:36:22 +00001577 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001578 if (wp->flags & mask)
1579 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001580 }
aliguoria1d1bb32008-11-18 20:07:32 +00001581}
Paul Brookc527ee82010-03-01 03:31:14 +00001582#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001583
1584/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001585int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001586 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001587{
bellard1fddef42005-04-17 19:16:13 +00001588#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001589 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001590
Anthony Liguori7267c092011-08-20 22:09:37 -05001591 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001592
1593 bp->pc = pc;
1594 bp->flags = flags;
1595
aliguori2dc9f412008-11-18 20:56:59 +00001596 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001597 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001598 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001599 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001600 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001601
1602 breakpoint_invalidate(env, pc);
1603
1604 if (breakpoint)
1605 *breakpoint = bp;
1606 return 0;
1607#else
1608 return -ENOSYS;
1609#endif
1610}
1611
1612/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001613int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001614{
1615#if defined(TARGET_HAS_ICE)
1616 CPUBreakpoint *bp;
1617
Blue Swirl72cf2d42009-09-12 07:36:22 +00001618 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001619 if (bp->pc == pc && bp->flags == flags) {
1620 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001621 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001622 }
bellard4c3a88a2003-07-26 12:06:08 +00001623 }
aliguoria1d1bb32008-11-18 20:07:32 +00001624 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001625#else
aliguoria1d1bb32008-11-18 20:07:32 +00001626 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001627#endif
1628}
1629
aliguoria1d1bb32008-11-18 20:07:32 +00001630/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001631void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001632{
bellard1fddef42005-04-17 19:16:13 +00001633#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001634 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001635
aliguoria1d1bb32008-11-18 20:07:32 +00001636 breakpoint_invalidate(env, breakpoint->pc);
1637
Anthony Liguori7267c092011-08-20 22:09:37 -05001638 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001639#endif
1640}
1641
1642/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001643void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001644{
1645#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001646 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001647
Blue Swirl72cf2d42009-09-12 07:36:22 +00001648 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001649 if (bp->flags & mask)
1650 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001651 }
bellard4c3a88a2003-07-26 12:06:08 +00001652#endif
1653}
1654
bellardc33a3462003-07-29 20:50:33 +00001655/* enable or disable single step mode. EXCP_DEBUG is returned by the
1656 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001657void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001658{
bellard1fddef42005-04-17 19:16:13 +00001659#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001660 if (env->singlestep_enabled != enabled) {
1661 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001662 if (kvm_enabled())
1663 kvm_update_guest_debug(env, 0);
1664 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001665 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001666 /* XXX: only flush what is necessary */
1667 tb_flush(env);
1668 }
bellardc33a3462003-07-29 20:50:33 +00001669 }
1670#endif
1671}
1672
bellard34865132003-10-05 14:28:56 +00001673/* enable or disable low levels log */
1674void cpu_set_log(int log_flags)
1675{
1676 loglevel = log_flags;
1677 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001678 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001679 if (!logfile) {
1680 perror(logfilename);
1681 _exit(1);
1682 }
bellard9fa3e852004-01-04 18:06:42 +00001683#if !defined(CONFIG_SOFTMMU)
1684 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1685 {
blueswir1b55266b2008-09-20 08:07:15 +00001686 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001687 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1688 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001689#elif defined(_WIN32)
1690 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1691 setvbuf(logfile, NULL, _IONBF, 0);
1692#else
bellard34865132003-10-05 14:28:56 +00001693 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001694#endif
pbrooke735b912007-06-30 13:53:24 +00001695 log_append = 1;
1696 }
1697 if (!loglevel && logfile) {
1698 fclose(logfile);
1699 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001700 }
1701}
1702
1703void cpu_set_log_filename(const char *filename)
1704{
1705 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001706 if (logfile) {
1707 fclose(logfile);
1708 logfile = NULL;
1709 }
1710 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001711}
bellardc33a3462003-07-29 20:50:33 +00001712
Andreas Färber9349b4f2012-03-14 01:38:32 +01001713static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001714{
pbrookd5975362008-06-07 20:50:51 +00001715 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1716 problem and hope the cpu will stop of its own accord. For userspace
1717 emulation this often isn't actually as bad as it sounds. Often
1718 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001719 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001720 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001721
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001722 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001723 tb = env->current_tb;
1724 /* if the cpu is currently executing code, we must unlink it and
1725 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001726 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001727 env->current_tb = NULL;
1728 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001729 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001730 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001731}
1732
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001733#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001734/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001735static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001736{
1737 int old_mask;
1738
1739 old_mask = env->interrupt_request;
1740 env->interrupt_request |= mask;
1741
aliguori8edac962009-04-24 18:03:45 +00001742 /*
1743 * If called from iothread context, wake the target cpu in
1744 * case its halted.
1745 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001746 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001747 qemu_cpu_kick(env);
1748 return;
1749 }
aliguori8edac962009-04-24 18:03:45 +00001750
pbrook2e70f6e2008-06-29 01:03:05 +00001751 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001752 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001753 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001754 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001755 cpu_abort(env, "Raised interrupt while not in I/O function");
1756 }
pbrook2e70f6e2008-06-29 01:03:05 +00001757 } else {
aurel323098dba2009-03-07 21:28:24 +00001758 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001759 }
1760}
1761
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001762CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1763
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001764#else /* CONFIG_USER_ONLY */
1765
Andreas Färber9349b4f2012-03-14 01:38:32 +01001766void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001767{
1768 env->interrupt_request |= mask;
1769 cpu_unlink_tb(env);
1770}
1771#endif /* CONFIG_USER_ONLY */
1772
Andreas Färber9349b4f2012-03-14 01:38:32 +01001773void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001774{
1775 env->interrupt_request &= ~mask;
1776}
1777
Andreas Färber9349b4f2012-03-14 01:38:32 +01001778void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001779{
1780 env->exit_request = 1;
1781 cpu_unlink_tb(env);
1782}
1783
blueswir1c7cd6a32008-10-02 18:27:46 +00001784const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001785 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001786 "show generated host assembly code for each compiled TB" },
1787 { CPU_LOG_TB_IN_ASM, "in_asm",
1788 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001789 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001790 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001791 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001792 "show micro ops "
1793#ifdef TARGET_I386
1794 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001795#endif
blueswir1e01a1152008-03-14 17:37:11 +00001796 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001797 { CPU_LOG_INT, "int",
1798 "show interrupts/exceptions in short format" },
1799 { CPU_LOG_EXEC, "exec",
1800 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001801 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001802 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001803#ifdef TARGET_I386
1804 { CPU_LOG_PCALL, "pcall",
1805 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001806 { CPU_LOG_RESET, "cpu_reset",
1807 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001808#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001809#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001810 { CPU_LOG_IOPORT, "ioport",
1811 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001812#endif
bellardf193c792004-03-21 17:06:25 +00001813 { 0, NULL, NULL },
1814};
1815
1816static int cmp1(const char *s1, int n, const char *s2)
1817{
1818 if (strlen(s2) != n)
1819 return 0;
1820 return memcmp(s1, s2, n) == 0;
1821}
ths3b46e622007-09-17 08:09:54 +00001822
bellardf193c792004-03-21 17:06:25 +00001823/* takes a comma separated list of log masks. Return 0 if error. */
1824int cpu_str_to_log_mask(const char *str)
1825{
blueswir1c7cd6a32008-10-02 18:27:46 +00001826 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001827 int mask;
1828 const char *p, *p1;
1829
1830 p = str;
1831 mask = 0;
1832 for(;;) {
1833 p1 = strchr(p, ',');
1834 if (!p1)
1835 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001836 if(cmp1(p,p1-p,"all")) {
1837 for(item = cpu_log_items; item->mask != 0; item++) {
1838 mask |= item->mask;
1839 }
1840 } else {
1841 for(item = cpu_log_items; item->mask != 0; item++) {
1842 if (cmp1(p, p1 - p, item->name))
1843 goto found;
1844 }
1845 return 0;
bellardf193c792004-03-21 17:06:25 +00001846 }
bellardf193c792004-03-21 17:06:25 +00001847 found:
1848 mask |= item->mask;
1849 if (*p1 != ',')
1850 break;
1851 p = p1 + 1;
1852 }
1853 return mask;
1854}
bellardea041c02003-06-25 16:16:50 +00001855
Andreas Färber9349b4f2012-03-14 01:38:32 +01001856void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001857{
1858 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001859 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001860
1861 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001862 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001863 fprintf(stderr, "qemu: fatal: ");
1864 vfprintf(stderr, fmt, ap);
1865 fprintf(stderr, "\n");
1866#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001867 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1868#else
1869 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001870#endif
aliguori93fcfe32009-01-15 22:34:14 +00001871 if (qemu_log_enabled()) {
1872 qemu_log("qemu: fatal: ");
1873 qemu_log_vprintf(fmt, ap2);
1874 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001875#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001876 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001877#else
aliguori93fcfe32009-01-15 22:34:14 +00001878 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001879#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001880 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001881 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001882 }
pbrook493ae1f2007-11-23 16:53:59 +00001883 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001884 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001885#if defined(CONFIG_USER_ONLY)
1886 {
1887 struct sigaction act;
1888 sigfillset(&act.sa_mask);
1889 act.sa_handler = SIG_DFL;
1890 sigaction(SIGABRT, &act, NULL);
1891 }
1892#endif
bellard75012672003-06-21 13:11:07 +00001893 abort();
1894}
1895
Andreas Färber9349b4f2012-03-14 01:38:32 +01001896CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001897{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001898 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1899 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001900 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001901#if defined(TARGET_HAS_ICE)
1902 CPUBreakpoint *bp;
1903 CPUWatchpoint *wp;
1904#endif
1905
Andreas Färber9349b4f2012-03-14 01:38:32 +01001906 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001907
1908 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001909 new_env->next_cpu = next_cpu;
1910 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001911
1912 /* Clone all break/watchpoints.
1913 Note: Once we support ptrace with hw-debug register access, make sure
1914 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001915 QTAILQ_INIT(&env->breakpoints);
1916 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001917#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001918 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001919 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1920 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001921 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001922 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1923 wp->flags, NULL);
1924 }
1925#endif
1926
thsc5be9f02007-02-28 20:20:53 +00001927 return new_env;
1928}
1929
bellard01243112004-01-04 15:48:17 +00001930#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001931void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001932{
1933 unsigned int i;
1934
1935 /* Discard jump cache entries for any tb which might potentially
1936 overlap the flushed page. */
1937 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1938 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001939 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001940
1941 i = tb_jmp_cache_hash_page(addr);
1942 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001943 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001944}
1945
pbrook5579c7f2009-04-11 14:47:08 +00001946/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001947void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001948 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001949{
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001950 uintptr_t length, start1;
bellard1ccde1c2004-02-06 19:46:14 +00001951
1952 start &= TARGET_PAGE_MASK;
1953 end = TARGET_PAGE_ALIGN(end);
1954
1955 length = end - start;
1956 if (length == 0)
1957 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001958 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001959
bellard1ccde1c2004-02-06 19:46:14 +00001960 /* we modify the TLB cache so that the dirty bit will be set again
1961 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001962 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001963 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001964 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001965 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001966 != (end - 1) - start) {
1967 abort();
1968 }
Blue Swirle5548612012-04-21 13:08:33 +00001969 cpu_tlb_reset_dirty_all(start1, length);
bellard1ccde1c2004-02-06 19:46:14 +00001970}
1971
aliguori74576192008-10-06 14:02:03 +00001972int cpu_physical_memory_set_dirty_tracking(int enable)
1973{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001974 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001975 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001976 return ret;
aliguori74576192008-10-06 14:02:03 +00001977}
1978
Blue Swirle5548612012-04-21 13:08:33 +00001979target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1980 MemoryRegionSection *section,
1981 target_ulong vaddr,
1982 target_phys_addr_t paddr,
1983 int prot,
1984 target_ulong *address)
1985{
1986 target_phys_addr_t iotlb;
1987 CPUWatchpoint *wp;
1988
Blue Swirlcc5bea62012-04-14 14:56:48 +00001989 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001990 /* Normal RAM. */
1991 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001992 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001993 if (!section->readonly) {
1994 iotlb |= phys_section_notdirty;
1995 } else {
1996 iotlb |= phys_section_rom;
1997 }
1998 } else {
1999 /* IO handlers are currently passed a physical address.
2000 It would be nice to pass an offset from the base address
2001 of that region. This would avoid having to special case RAM,
2002 and avoid full address decoding in every device.
2003 We can't use the high bits of pd for this because
2004 IO_MEM_ROMD uses these as a ram address. */
2005 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +00002006 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00002007 }
2008
2009 /* Make accesses to pages with watchpoints go via the
2010 watchpoint trap routines. */
2011 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2012 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2013 /* Avoid trapping reads of pages with a write breakpoint. */
2014 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2015 iotlb = phys_section_watch + paddr;
2016 *address |= TLB_MMIO;
2017 break;
2018 }
2019 }
2020 }
2021
2022 return iotlb;
2023}
2024
bellard01243112004-01-04 15:48:17 +00002025#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002026/*
2027 * Walks guest process memory "regions" one by one
2028 * and calls callback function 'fn' for each region.
2029 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002030
2031struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002032{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002033 walk_memory_regions_fn fn;
2034 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02002035 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002036 int prot;
2037};
bellard9fa3e852004-01-04 18:06:42 +00002038
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002039static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002040 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002041{
2042 if (data->start != -1ul) {
2043 int rc = data->fn(data->priv, data->start, end, data->prot);
2044 if (rc != 0) {
2045 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002046 }
bellard33417e72003-08-10 21:47:01 +00002047 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002048
2049 data->start = (new_prot ? end : -1ul);
2050 data->prot = new_prot;
2051
2052 return 0;
2053}
2054
2055static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002056 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002057{
Paul Brookb480d9b2010-03-12 23:23:29 +00002058 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002059 int i, rc;
2060
2061 if (*lp == NULL) {
2062 return walk_memory_regions_end(data, base, 0);
2063 }
2064
2065 if (level == 0) {
2066 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002067 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002068 int prot = pd[i].flags;
2069
2070 pa = base | (i << TARGET_PAGE_BITS);
2071 if (prot != data->prot) {
2072 rc = walk_memory_regions_end(data, pa, prot);
2073 if (rc != 0) {
2074 return rc;
2075 }
2076 }
2077 }
2078 } else {
2079 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002080 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002081 pa = base | ((abi_ulong)i <<
2082 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002083 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2084 if (rc != 0) {
2085 return rc;
2086 }
2087 }
2088 }
2089
2090 return 0;
2091}
2092
2093int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2094{
2095 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02002096 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002097
2098 data.fn = fn;
2099 data.priv = priv;
2100 data.start = -1ul;
2101 data.prot = 0;
2102
2103 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002104 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002105 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2106 if (rc != 0) {
2107 return rc;
2108 }
2109 }
2110
2111 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002112}
2113
Paul Brookb480d9b2010-03-12 23:23:29 +00002114static int dump_region(void *priv, abi_ulong start,
2115 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002116{
2117 FILE *f = (FILE *)priv;
2118
Paul Brookb480d9b2010-03-12 23:23:29 +00002119 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2120 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002121 start, end, end - start,
2122 ((prot & PAGE_READ) ? 'r' : '-'),
2123 ((prot & PAGE_WRITE) ? 'w' : '-'),
2124 ((prot & PAGE_EXEC) ? 'x' : '-'));
2125
2126 return (0);
2127}
2128
2129/* dump memory mappings */
2130void page_dump(FILE *f)
2131{
2132 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2133 "start", "end", "size", "prot");
2134 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002135}
2136
pbrook53a59602006-03-25 19:31:22 +00002137int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002138{
bellard9fa3e852004-01-04 18:06:42 +00002139 PageDesc *p;
2140
2141 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002142 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002143 return 0;
2144 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002145}
2146
Richard Henderson376a7902010-03-10 15:57:04 -08002147/* Modify the flags of a page and invalidate the code if necessary.
2148 The flag PAGE_WRITE_ORG is positioned automatically depending
2149 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002150void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002151{
Richard Henderson376a7902010-03-10 15:57:04 -08002152 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002153
Richard Henderson376a7902010-03-10 15:57:04 -08002154 /* This function should never be called with addresses outside the
2155 guest address space. If this assert fires, it probably indicates
2156 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002157#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2158 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002159#endif
2160 assert(start < end);
2161
bellard9fa3e852004-01-04 18:06:42 +00002162 start = start & TARGET_PAGE_MASK;
2163 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002164
2165 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002166 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002167 }
2168
2169 for (addr = start, len = end - start;
2170 len != 0;
2171 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2172 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2173
2174 /* If the write protection bit is set, then we invalidate
2175 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002176 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002177 (flags & PAGE_WRITE) &&
2178 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002179 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002180 }
2181 p->flags = flags;
2182 }
bellard9fa3e852004-01-04 18:06:42 +00002183}
2184
ths3d97b402007-11-02 19:02:07 +00002185int page_check_range(target_ulong start, target_ulong len, int flags)
2186{
2187 PageDesc *p;
2188 target_ulong end;
2189 target_ulong addr;
2190
Richard Henderson376a7902010-03-10 15:57:04 -08002191 /* This function should never be called with addresses outside the
2192 guest address space. If this assert fires, it probably indicates
2193 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002194#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2195 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002196#endif
2197
Richard Henderson3e0650a2010-03-29 10:54:42 -07002198 if (len == 0) {
2199 return 0;
2200 }
Richard Henderson376a7902010-03-10 15:57:04 -08002201 if (start + len - 1 < start) {
2202 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002203 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002204 }
balrog55f280c2008-10-28 10:24:11 +00002205
ths3d97b402007-11-02 19:02:07 +00002206 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2207 start = start & TARGET_PAGE_MASK;
2208
Richard Henderson376a7902010-03-10 15:57:04 -08002209 for (addr = start, len = end - start;
2210 len != 0;
2211 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002212 p = page_find(addr >> TARGET_PAGE_BITS);
2213 if( !p )
2214 return -1;
2215 if( !(p->flags & PAGE_VALID) )
2216 return -1;
2217
bellarddae32702007-11-14 10:51:00 +00002218 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002219 return -1;
bellarddae32702007-11-14 10:51:00 +00002220 if (flags & PAGE_WRITE) {
2221 if (!(p->flags & PAGE_WRITE_ORG))
2222 return -1;
2223 /* unprotect the page if it was put read-only because it
2224 contains translated code */
2225 if (!(p->flags & PAGE_WRITE)) {
2226 if (!page_unprotect(addr, 0, NULL))
2227 return -1;
2228 }
2229 return 0;
2230 }
ths3d97b402007-11-02 19:02:07 +00002231 }
2232 return 0;
2233}
2234
bellard9fa3e852004-01-04 18:06:42 +00002235/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002236 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002237int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002238{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002239 unsigned int prot;
2240 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002241 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002242
pbrookc8a706f2008-06-02 16:16:42 +00002243 /* Technically this isn't safe inside a signal handler. However we
2244 know this only ever happens in a synchronous SEGV handler, so in
2245 practice it seems to be ok. */
2246 mmap_lock();
2247
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002248 p = page_find(address >> TARGET_PAGE_BITS);
2249 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002250 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002251 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002252 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002253
bellard9fa3e852004-01-04 18:06:42 +00002254 /* if the page was really writable, then we change its
2255 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002256 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2257 host_start = address & qemu_host_page_mask;
2258 host_end = host_start + qemu_host_page_size;
2259
2260 prot = 0;
2261 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2262 p = page_find(addr >> TARGET_PAGE_BITS);
2263 p->flags |= PAGE_WRITE;
2264 prot |= p->flags;
2265
bellard9fa3e852004-01-04 18:06:42 +00002266 /* and since the content will be modified, we must invalidate
2267 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002268 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002269#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002270 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002271#endif
bellard9fa3e852004-01-04 18:06:42 +00002272 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002273 mprotect((void *)g2h(host_start), qemu_host_page_size,
2274 prot & PAGE_BITS);
2275
2276 mmap_unlock();
2277 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002278 }
pbrookc8a706f2008-06-02 16:16:42 +00002279 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002280 return 0;
2281}
bellard9fa3e852004-01-04 18:06:42 +00002282#endif /* defined(CONFIG_USER_ONLY) */
2283
pbrooke2eef172008-06-08 01:09:01 +00002284#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002285
Paul Brookc04b2b72010-03-01 03:31:14 +00002286#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2287typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002288 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002289 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002290 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002291} subpage_t;
2292
Anthony Liguoric227f092009-10-01 16:12:16 -05002293static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002294 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002295static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002296static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002297{
Avi Kivity5312bd82012-02-12 18:32:55 +02002298 MemoryRegionSection *section = &phys_sections[section_index];
2299 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002300
2301 if (mr->subpage) {
2302 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2303 memory_region_destroy(&subpage->iomem);
2304 g_free(subpage);
2305 }
2306}
2307
Avi Kivity4346ae32012-02-10 17:00:01 +02002308static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002309{
2310 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002311 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002312
Avi Kivityc19e8802012-02-13 20:25:31 +02002313 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002314 return;
2315 }
2316
Avi Kivityc19e8802012-02-13 20:25:31 +02002317 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002318 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002319 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002320 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002321 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002322 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002323 }
Avi Kivity54688b12012-02-09 17:34:32 +02002324 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002325 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002326 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002327}
2328
2329static void destroy_all_mappings(void)
2330{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002331 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002332 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002333}
2334
Avi Kivity5312bd82012-02-12 18:32:55 +02002335static uint16_t phys_section_add(MemoryRegionSection *section)
2336{
2337 if (phys_sections_nb == phys_sections_nb_alloc) {
2338 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2339 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2340 phys_sections_nb_alloc);
2341 }
2342 phys_sections[phys_sections_nb] = *section;
2343 return phys_sections_nb++;
2344}
2345
2346static void phys_sections_clear(void)
2347{
2348 phys_sections_nb = 0;
2349}
2350
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002351/* register physical memory.
2352 For RAM, 'size' must be a multiple of the target page size.
2353 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002354 io memory page. The address used when calling the IO function is
2355 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002356 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002357 before calculating this offset. This should not be a problem unless
2358 the low bits of start_addr and region_offset differ. */
Avi Kivity0f0cb162012-02-13 17:14:32 +02002359static void register_subpage(MemoryRegionSection *section)
2360{
2361 subpage_t *subpage;
2362 target_phys_addr_t base = section->offset_within_address_space
2363 & TARGET_PAGE_MASK;
Avi Kivityf3705d52012-03-08 16:16:34 +02002364 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002365 MemoryRegionSection subsection = {
2366 .offset_within_address_space = base,
2367 .size = TARGET_PAGE_SIZE,
2368 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002369 target_phys_addr_t start, end;
2370
Avi Kivityf3705d52012-03-08 16:16:34 +02002371 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002372
Avi Kivityf3705d52012-03-08 16:16:34 +02002373 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002374 subpage = subpage_init(base);
2375 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002376 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2377 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002378 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002379 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002380 }
2381 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2382 end = start + section->size;
2383 subpage_register(subpage, start, end, phys_section_add(section));
2384}
2385
2386
2387static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002388{
Avi Kivitydd811242012-01-02 12:17:03 +02002389 target_phys_addr_t start_addr = section->offset_within_address_space;
2390 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002391 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002392 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002393
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002394 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002395
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002396 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002397 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2398 section_index);
bellard33417e72003-08-10 21:47:01 +00002399}
2400
Avi Kivity0f0cb162012-02-13 17:14:32 +02002401void cpu_register_physical_memory_log(MemoryRegionSection *section,
2402 bool readonly)
2403{
2404 MemoryRegionSection now = *section, remain = *section;
2405
2406 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2407 || (now.size < TARGET_PAGE_SIZE)) {
2408 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2409 - now.offset_within_address_space,
2410 now.size);
2411 register_subpage(&now);
2412 remain.size -= now.size;
2413 remain.offset_within_address_space += now.size;
2414 remain.offset_within_region += now.size;
2415 }
2416 now = remain;
2417 now.size &= TARGET_PAGE_MASK;
2418 if (now.size) {
2419 register_multipage(&now);
2420 remain.size -= now.size;
2421 remain.offset_within_address_space += now.size;
2422 remain.offset_within_region += now.size;
2423 }
2424 now = remain;
2425 if (now.size) {
2426 register_subpage(&now);
2427 }
2428}
2429
2430
Anthony Liguoric227f092009-10-01 16:12:16 -05002431void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002432{
2433 if (kvm_enabled())
2434 kvm_coalesce_mmio_region(addr, size);
2435}
2436
Anthony Liguoric227f092009-10-01 16:12:16 -05002437void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002438{
2439 if (kvm_enabled())
2440 kvm_uncoalesce_mmio_region(addr, size);
2441}
2442
Sheng Yang62a27442010-01-26 19:21:16 +08002443void qemu_flush_coalesced_mmio_buffer(void)
2444{
2445 if (kvm_enabled())
2446 kvm_flush_coalesced_mmio_buffer();
2447}
2448
Marcelo Tosattic9027602010-03-01 20:25:08 -03002449#if defined(__linux__) && !defined(TARGET_S390X)
2450
2451#include <sys/vfs.h>
2452
2453#define HUGETLBFS_MAGIC 0x958458f6
2454
2455static long gethugepagesize(const char *path)
2456{
2457 struct statfs fs;
2458 int ret;
2459
2460 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002461 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002462 } while (ret != 0 && errno == EINTR);
2463
2464 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002465 perror(path);
2466 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002467 }
2468
2469 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002470 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002471
2472 return fs.f_bsize;
2473}
2474
Alex Williamson04b16652010-07-02 11:13:17 -06002475static void *file_ram_alloc(RAMBlock *block,
2476 ram_addr_t memory,
2477 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002478{
2479 char *filename;
2480 void *area;
2481 int fd;
2482#ifdef MAP_POPULATE
2483 int flags;
2484#endif
2485 unsigned long hpagesize;
2486
2487 hpagesize = gethugepagesize(path);
2488 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002489 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002490 }
2491
2492 if (memory < hpagesize) {
2493 return NULL;
2494 }
2495
2496 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2497 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2498 return NULL;
2499 }
2500
2501 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002502 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002503 }
2504
2505 fd = mkstemp(filename);
2506 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002507 perror("unable to create backing store for hugepages");
2508 free(filename);
2509 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002510 }
2511 unlink(filename);
2512 free(filename);
2513
2514 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2515
2516 /*
2517 * ftruncate is not supported by hugetlbfs in older
2518 * hosts, so don't bother bailing out on errors.
2519 * If anything goes wrong with it under other filesystems,
2520 * mmap will fail.
2521 */
2522 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002523 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002524
2525#ifdef MAP_POPULATE
2526 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2527 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2528 * to sidestep this quirk.
2529 */
2530 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2531 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2532#else
2533 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2534#endif
2535 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002536 perror("file_ram_alloc: can't mmap RAM pages");
2537 close(fd);
2538 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002539 }
Alex Williamson04b16652010-07-02 11:13:17 -06002540 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002541 return area;
2542}
2543#endif
2544
Alex Williamsond17b5282010-06-25 11:08:38 -06002545static ram_addr_t find_ram_offset(ram_addr_t size)
2546{
Alex Williamson04b16652010-07-02 11:13:17 -06002547 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002548 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002549
2550 if (QLIST_EMPTY(&ram_list.blocks))
2551 return 0;
2552
2553 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002554 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002555
2556 end = block->offset + block->length;
2557
2558 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2559 if (next_block->offset >= end) {
2560 next = MIN(next, next_block->offset);
2561 }
2562 }
2563 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002564 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002565 mingap = next - end;
2566 }
2567 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002568
2569 if (offset == RAM_ADDR_MAX) {
2570 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2571 (uint64_t)size);
2572 abort();
2573 }
2574
Alex Williamson04b16652010-07-02 11:13:17 -06002575 return offset;
2576}
2577
2578static ram_addr_t last_ram_offset(void)
2579{
Alex Williamsond17b5282010-06-25 11:08:38 -06002580 RAMBlock *block;
2581 ram_addr_t last = 0;
2582
2583 QLIST_FOREACH(block, &ram_list.blocks, next)
2584 last = MAX(last, block->offset + block->length);
2585
2586 return last;
2587}
2588
Avi Kivityc5705a72011-12-20 15:59:12 +02002589void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002590{
2591 RAMBlock *new_block, *block;
2592
Avi Kivityc5705a72011-12-20 15:59:12 +02002593 new_block = NULL;
2594 QLIST_FOREACH(block, &ram_list.blocks, next) {
2595 if (block->offset == addr) {
2596 new_block = block;
2597 break;
2598 }
2599 }
2600 assert(new_block);
2601 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002602
2603 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2604 char *id = dev->parent_bus->info->get_dev_path(dev);
2605 if (id) {
2606 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002607 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002608 }
2609 }
2610 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2611
2612 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002613 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002614 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2615 new_block->idstr);
2616 abort();
2617 }
2618 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002619}
2620
2621ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2622 MemoryRegion *mr)
2623{
2624 RAMBlock *new_block;
2625
2626 size = TARGET_PAGE_ALIGN(size);
2627 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002628
Avi Kivity7c637362011-12-21 13:09:49 +02002629 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002630 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002631 if (host) {
2632 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002633 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002634 } else {
2635 if (mem_path) {
2636#if defined (__linux__) && !defined(TARGET_S390X)
2637 new_block->host = file_ram_alloc(new_block, size, mem_path);
2638 if (!new_block->host) {
2639 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002640 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002641 }
2642#else
2643 fprintf(stderr, "-mem-path option unsupported\n");
2644 exit(1);
2645#endif
2646 } else {
2647#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002648 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2649 an system defined value, which is at least 256GB. Larger systems
2650 have larger values. We put the guest between the end of data
2651 segment (system break) and this value. We use 32GB as a base to
2652 have enough room for the system break to grow. */
2653 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002654 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002655 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002656 if (new_block->host == MAP_FAILED) {
2657 fprintf(stderr, "Allocating RAM failed\n");
2658 abort();
2659 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002660#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002661 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002662 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002663 } else {
2664 new_block->host = qemu_vmalloc(size);
2665 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002666#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002667 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002668 }
2669 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002670 new_block->length = size;
2671
2672 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2673
Anthony Liguori7267c092011-08-20 22:09:37 -05002674 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002675 last_ram_offset() >> TARGET_PAGE_BITS);
2676 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2677 0xff, size >> TARGET_PAGE_BITS);
2678
2679 if (kvm_enabled())
2680 kvm_setup_guest_memory(new_block->host, size);
2681
2682 return new_block->offset;
2683}
2684
Avi Kivityc5705a72011-12-20 15:59:12 +02002685ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002686{
Avi Kivityc5705a72011-12-20 15:59:12 +02002687 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002688}
bellarde9a1ab12007-02-08 23:08:38 +00002689
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002690void qemu_ram_free_from_ptr(ram_addr_t addr)
2691{
2692 RAMBlock *block;
2693
2694 QLIST_FOREACH(block, &ram_list.blocks, next) {
2695 if (addr == block->offset) {
2696 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002697 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002698 return;
2699 }
2700 }
2701}
2702
Anthony Liguoric227f092009-10-01 16:12:16 -05002703void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002704{
Alex Williamson04b16652010-07-02 11:13:17 -06002705 RAMBlock *block;
2706
2707 QLIST_FOREACH(block, &ram_list.blocks, next) {
2708 if (addr == block->offset) {
2709 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002710 if (block->flags & RAM_PREALLOC_MASK) {
2711 ;
2712 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002713#if defined (__linux__) && !defined(TARGET_S390X)
2714 if (block->fd) {
2715 munmap(block->host, block->length);
2716 close(block->fd);
2717 } else {
2718 qemu_vfree(block->host);
2719 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002720#else
2721 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002722#endif
2723 } else {
2724#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2725 munmap(block->host, block->length);
2726#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002727 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002728 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002729 } else {
2730 qemu_vfree(block->host);
2731 }
Alex Williamson04b16652010-07-02 11:13:17 -06002732#endif
2733 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002734 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002735 return;
2736 }
2737 }
2738
bellarde9a1ab12007-02-08 23:08:38 +00002739}
2740
Huang Yingcd19cfa2011-03-02 08:56:19 +01002741#ifndef _WIN32
2742void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2743{
2744 RAMBlock *block;
2745 ram_addr_t offset;
2746 int flags;
2747 void *area, *vaddr;
2748
2749 QLIST_FOREACH(block, &ram_list.blocks, next) {
2750 offset = addr - block->offset;
2751 if (offset < block->length) {
2752 vaddr = block->host + offset;
2753 if (block->flags & RAM_PREALLOC_MASK) {
2754 ;
2755 } else {
2756 flags = MAP_FIXED;
2757 munmap(vaddr, length);
2758 if (mem_path) {
2759#if defined(__linux__) && !defined(TARGET_S390X)
2760 if (block->fd) {
2761#ifdef MAP_POPULATE
2762 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2763 MAP_PRIVATE;
2764#else
2765 flags |= MAP_PRIVATE;
2766#endif
2767 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2768 flags, block->fd, offset);
2769 } else {
2770 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2771 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2772 flags, -1, 0);
2773 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002774#else
2775 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002776#endif
2777 } else {
2778#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2779 flags |= MAP_SHARED | MAP_ANONYMOUS;
2780 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2781 flags, -1, 0);
2782#else
2783 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2784 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2785 flags, -1, 0);
2786#endif
2787 }
2788 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002789 fprintf(stderr, "Could not remap addr: "
2790 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002791 length, addr);
2792 exit(1);
2793 }
2794 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2795 }
2796 return;
2797 }
2798 }
2799}
2800#endif /* !_WIN32 */
2801
pbrookdc828ca2009-04-09 22:21:07 +00002802/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002803 With the exception of the softmmu code in this file, this should
2804 only be used for local memory (e.g. video ram) that the device owns,
2805 and knows it isn't going to access beyond the end of the block.
2806
2807 It should not be used for general purpose DMA.
2808 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2809 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002810void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002811{
pbrook94a6b542009-04-11 17:15:54 +00002812 RAMBlock *block;
2813
Alex Williamsonf471a172010-06-11 11:11:42 -06002814 QLIST_FOREACH(block, &ram_list.blocks, next) {
2815 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002816 /* Move this entry to to start of the list. */
2817 if (block != QLIST_FIRST(&ram_list.blocks)) {
2818 QLIST_REMOVE(block, next);
2819 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2820 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002821 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002822 /* We need to check if the requested address is in the RAM
2823 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002824 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002825 */
2826 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002827 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002828 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002829 block->host =
2830 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002831 }
2832 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002833 return block->host + (addr - block->offset);
2834 }
pbrook94a6b542009-04-11 17:15:54 +00002835 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002836
2837 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2838 abort();
2839
2840 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002841}
2842
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002843/* Return a host pointer to ram allocated with qemu_ram_alloc.
2844 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2845 */
2846void *qemu_safe_ram_ptr(ram_addr_t addr)
2847{
2848 RAMBlock *block;
2849
2850 QLIST_FOREACH(block, &ram_list.blocks, next) {
2851 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002852 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002853 /* We need to check if the requested address is in the RAM
2854 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002855 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002856 */
2857 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002858 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002859 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002860 block->host =
2861 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002862 }
2863 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002864 return block->host + (addr - block->offset);
2865 }
2866 }
2867
2868 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2869 abort();
2870
2871 return NULL;
2872}
2873
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002874/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2875 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002876void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002877{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002878 if (*size == 0) {
2879 return NULL;
2880 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002881 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002882 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002883 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002884 RAMBlock *block;
2885
2886 QLIST_FOREACH(block, &ram_list.blocks, next) {
2887 if (addr - block->offset < block->length) {
2888 if (addr - block->offset + *size > block->length)
2889 *size = block->length - addr + block->offset;
2890 return block->host + (addr - block->offset);
2891 }
2892 }
2893
2894 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2895 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002896 }
2897}
2898
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002899void qemu_put_ram_ptr(void *addr)
2900{
2901 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002902}
2903
Marcelo Tosattie8902612010-10-11 15:31:19 -03002904int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002905{
pbrook94a6b542009-04-11 17:15:54 +00002906 RAMBlock *block;
2907 uint8_t *host = ptr;
2908
Jan Kiszka868bb332011-06-21 22:59:09 +02002909 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002910 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002911 return 0;
2912 }
2913
Alex Williamsonf471a172010-06-11 11:11:42 -06002914 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002915 /* This case append when the block is not mapped. */
2916 if (block->host == NULL) {
2917 continue;
2918 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002919 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002920 *ram_addr = block->offset + (host - block->host);
2921 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002922 }
pbrook94a6b542009-04-11 17:15:54 +00002923 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002924
Marcelo Tosattie8902612010-10-11 15:31:19 -03002925 return -1;
2926}
Alex Williamsonf471a172010-06-11 11:11:42 -06002927
Marcelo Tosattie8902612010-10-11 15:31:19 -03002928/* Some of the softmmu routines need to translate from a host pointer
2929 (typically a TLB entry) back to a ram offset. */
2930ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2931{
2932 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002933
Marcelo Tosattie8902612010-10-11 15:31:19 -03002934 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2935 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2936 abort();
2937 }
2938 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002939}
2940
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002941static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2942 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002943{
pbrook67d3b952006-12-18 05:03:52 +00002944#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002945 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002946#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002947#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002948 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002949#endif
2950 return 0;
2951}
2952
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002953static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2954 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002955{
2956#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002957 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002958#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002959#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002960 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002961#endif
2962}
2963
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002964static const MemoryRegionOps unassigned_mem_ops = {
2965 .read = unassigned_mem_read,
2966 .write = unassigned_mem_write,
2967 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002968};
2969
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002970static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2971 unsigned size)
2972{
2973 abort();
2974}
2975
2976static void error_mem_write(void *opaque, target_phys_addr_t addr,
2977 uint64_t value, unsigned size)
2978{
2979 abort();
2980}
2981
2982static const MemoryRegionOps error_mem_ops = {
2983 .read = error_mem_read,
2984 .write = error_mem_write,
2985 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002986};
2987
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002988static const MemoryRegionOps rom_mem_ops = {
2989 .read = error_mem_read,
2990 .write = unassigned_mem_write,
2991 .endianness = DEVICE_NATIVE_ENDIAN,
2992};
2993
2994static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2995 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002996{
bellard3a7d9292005-08-21 09:26:42 +00002997 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002998 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002999 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3000#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003001 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003002 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003003#endif
3004 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003005 switch (size) {
3006 case 1:
3007 stb_p(qemu_get_ram_ptr(ram_addr), val);
3008 break;
3009 case 2:
3010 stw_p(qemu_get_ram_ptr(ram_addr), val);
3011 break;
3012 case 4:
3013 stl_p(qemu_get_ram_ptr(ram_addr), val);
3014 break;
3015 default:
3016 abort();
3017 }
bellardf23db162005-08-21 19:12:28 +00003018 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003019 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003020 /* we remove the notdirty callback only if the code has been
3021 flushed */
3022 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003023 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003024}
3025
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003026static const MemoryRegionOps notdirty_mem_ops = {
3027 .read = error_mem_read,
3028 .write = notdirty_mem_write,
3029 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003030};
3031
pbrook0f459d12008-06-09 00:20:13 +00003032/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003033static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003034{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003035 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003036 target_ulong pc, cs_base;
3037 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003038 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003039 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003040 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003041
aliguori06d55cc2008-11-18 20:24:06 +00003042 if (env->watchpoint_hit) {
3043 /* We re-entered the check after replacing the TB. Now raise
3044 * the debug interrupt so that is will trigger after the
3045 * current instruction. */
3046 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3047 return;
3048 }
pbrook2e70f6e2008-06-29 01:03:05 +00003049 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003050 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003051 if ((vaddr == (wp->vaddr & len_mask) ||
3052 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003053 wp->flags |= BP_WATCHPOINT_HIT;
3054 if (!env->watchpoint_hit) {
3055 env->watchpoint_hit = wp;
3056 tb = tb_find_pc(env->mem_io_pc);
3057 if (!tb) {
3058 cpu_abort(env, "check_watchpoint: could not find TB for "
3059 "pc=%p", (void *)env->mem_io_pc);
3060 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003061 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003062 tb_phys_invalidate(tb, -1);
3063 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3064 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04003065 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00003066 } else {
3067 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3068 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04003069 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00003070 }
aliguori06d55cc2008-11-18 20:24:06 +00003071 }
aliguori6e140f22008-11-18 20:37:55 +00003072 } else {
3073 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003074 }
3075 }
3076}
3077
pbrook6658ffb2007-03-16 23:58:11 +00003078/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3079 so these check for a hit then pass through to the normal out-of-line
3080 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003081static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3082 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003083{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003084 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3085 switch (size) {
3086 case 1: return ldub_phys(addr);
3087 case 2: return lduw_phys(addr);
3088 case 4: return ldl_phys(addr);
3089 default: abort();
3090 }
pbrook6658ffb2007-03-16 23:58:11 +00003091}
3092
Avi Kivity1ec9b902012-01-02 12:47:48 +02003093static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3094 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003095{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003096 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3097 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003098 case 1:
3099 stb_phys(addr, val);
3100 break;
3101 case 2:
3102 stw_phys(addr, val);
3103 break;
3104 case 4:
3105 stl_phys(addr, val);
3106 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003107 default: abort();
3108 }
pbrook6658ffb2007-03-16 23:58:11 +00003109}
3110
Avi Kivity1ec9b902012-01-02 12:47:48 +02003111static const MemoryRegionOps watch_mem_ops = {
3112 .read = watch_mem_read,
3113 .write = watch_mem_write,
3114 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003115};
pbrook6658ffb2007-03-16 23:58:11 +00003116
Avi Kivity70c68e42012-01-02 12:32:48 +02003117static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3118 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003119{
Avi Kivity70c68e42012-01-02 12:32:48 +02003120 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003121 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003122 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003123#if defined(DEBUG_SUBPAGE)
3124 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3125 mmio, len, addr, idx);
3126#endif
blueswir1db7b5422007-05-26 17:36:03 +00003127
Avi Kivity5312bd82012-02-12 18:32:55 +02003128 section = &phys_sections[mmio->sub_section[idx]];
3129 addr += mmio->base;
3130 addr -= section->offset_within_address_space;
3131 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003132 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003133}
3134
Avi Kivity70c68e42012-01-02 12:32:48 +02003135static void subpage_write(void *opaque, target_phys_addr_t addr,
3136 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003137{
Avi Kivity70c68e42012-01-02 12:32:48 +02003138 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003139 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003140 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003141#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003142 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3143 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003144 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003145#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003146
Avi Kivity5312bd82012-02-12 18:32:55 +02003147 section = &phys_sections[mmio->sub_section[idx]];
3148 addr += mmio->base;
3149 addr -= section->offset_within_address_space;
3150 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003151 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003152}
3153
Avi Kivity70c68e42012-01-02 12:32:48 +02003154static const MemoryRegionOps subpage_ops = {
3155 .read = subpage_read,
3156 .write = subpage_write,
3157 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003158};
3159
Avi Kivityde712f92012-01-02 12:41:07 +02003160static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3161 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003162{
3163 ram_addr_t raddr = addr;
3164 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003165 switch (size) {
3166 case 1: return ldub_p(ptr);
3167 case 2: return lduw_p(ptr);
3168 case 4: return ldl_p(ptr);
3169 default: abort();
3170 }
Andreas Färber56384e82011-11-30 16:26:21 +01003171}
3172
Avi Kivityde712f92012-01-02 12:41:07 +02003173static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3174 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003175{
3176 ram_addr_t raddr = addr;
3177 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003178 switch (size) {
3179 case 1: return stb_p(ptr, value);
3180 case 2: return stw_p(ptr, value);
3181 case 4: return stl_p(ptr, value);
3182 default: abort();
3183 }
Andreas Färber56384e82011-11-30 16:26:21 +01003184}
3185
Avi Kivityde712f92012-01-02 12:41:07 +02003186static const MemoryRegionOps subpage_ram_ops = {
3187 .read = subpage_ram_read,
3188 .write = subpage_ram_write,
3189 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003190};
3191
Anthony Liguoric227f092009-10-01 16:12:16 -05003192static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003193 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003194{
3195 int idx, eidx;
3196
3197 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3198 return -1;
3199 idx = SUBPAGE_IDX(start);
3200 eidx = SUBPAGE_IDX(end);
3201#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003202 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003203 mmio, start, end, idx, eidx, memory);
3204#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003205 if (memory_region_is_ram(phys_sections[section].mr)) {
3206 MemoryRegionSection new_section = phys_sections[section];
3207 new_section.mr = &io_mem_subpage_ram;
3208 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003209 }
blueswir1db7b5422007-05-26 17:36:03 +00003210 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003211 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003212 }
3213
3214 return 0;
3215}
3216
Avi Kivity0f0cb162012-02-13 17:14:32 +02003217static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003218{
Anthony Liguoric227f092009-10-01 16:12:16 -05003219 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003220
Anthony Liguori7267c092011-08-20 22:09:37 -05003221 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003222
3223 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003224 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3225 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003226 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003227#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003228 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3229 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003230#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003231 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003232
3233 return mmio;
3234}
3235
Avi Kivity5312bd82012-02-12 18:32:55 +02003236static uint16_t dummy_section(MemoryRegion *mr)
3237{
3238 MemoryRegionSection section = {
3239 .mr = mr,
3240 .offset_within_address_space = 0,
3241 .offset_within_region = 0,
3242 .size = UINT64_MAX,
3243 };
3244
3245 return phys_section_add(&section);
3246}
3247
Avi Kivity37ec01d2012-03-08 18:08:35 +02003248MemoryRegion *iotlb_to_region(target_phys_addr_t index)
Avi Kivityaa102232012-03-08 17:06:55 +02003249{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003250 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003251}
3252
Avi Kivitye9179ce2009-06-14 11:38:52 +03003253static void io_mem_init(void)
3254{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003255 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003256 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3257 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3258 "unassigned", UINT64_MAX);
3259 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3260 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003261 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3262 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003263 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3264 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003265}
3266
Avi Kivity50c1e142012-02-08 21:36:02 +02003267static void core_begin(MemoryListener *listener)
3268{
Avi Kivity54688b12012-02-09 17:34:32 +02003269 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003270 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003271 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003272 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003273 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3274 phys_section_rom = dummy_section(&io_mem_rom);
3275 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003276}
3277
3278static void core_commit(MemoryListener *listener)
3279{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003280 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003281
3282 /* since each CPU stores ram addresses in its TLB cache, we must
3283 reset the modified entries */
3284 /* XXX: slow ! */
3285 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3286 tlb_flush(env, 1);
3287 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003288}
3289
Avi Kivity93632742012-02-08 16:54:16 +02003290static void core_region_add(MemoryListener *listener,
3291 MemoryRegionSection *section)
3292{
Avi Kivity4855d412012-02-08 21:16:05 +02003293 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003294}
3295
3296static void core_region_del(MemoryListener *listener,
3297 MemoryRegionSection *section)
3298{
Avi Kivity93632742012-02-08 16:54:16 +02003299}
3300
Avi Kivity50c1e142012-02-08 21:36:02 +02003301static void core_region_nop(MemoryListener *listener,
3302 MemoryRegionSection *section)
3303{
Avi Kivity54688b12012-02-09 17:34:32 +02003304 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003305}
3306
Avi Kivity93632742012-02-08 16:54:16 +02003307static void core_log_start(MemoryListener *listener,
3308 MemoryRegionSection *section)
3309{
3310}
3311
3312static void core_log_stop(MemoryListener *listener,
3313 MemoryRegionSection *section)
3314{
3315}
3316
3317static void core_log_sync(MemoryListener *listener,
3318 MemoryRegionSection *section)
3319{
3320}
3321
3322static void core_log_global_start(MemoryListener *listener)
3323{
3324 cpu_physical_memory_set_dirty_tracking(1);
3325}
3326
3327static void core_log_global_stop(MemoryListener *listener)
3328{
3329 cpu_physical_memory_set_dirty_tracking(0);
3330}
3331
3332static void core_eventfd_add(MemoryListener *listener,
3333 MemoryRegionSection *section,
3334 bool match_data, uint64_t data, int fd)
3335{
3336}
3337
3338static void core_eventfd_del(MemoryListener *listener,
3339 MemoryRegionSection *section,
3340 bool match_data, uint64_t data, int fd)
3341{
3342}
3343
Avi Kivity50c1e142012-02-08 21:36:02 +02003344static void io_begin(MemoryListener *listener)
3345{
3346}
3347
3348static void io_commit(MemoryListener *listener)
3349{
3350}
3351
Avi Kivity4855d412012-02-08 21:16:05 +02003352static void io_region_add(MemoryListener *listener,
3353 MemoryRegionSection *section)
3354{
Avi Kivitya2d33522012-03-05 17:40:12 +02003355 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3356
3357 mrio->mr = section->mr;
3358 mrio->offset = section->offset_within_region;
3359 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003360 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003361 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003362}
3363
3364static void io_region_del(MemoryListener *listener,
3365 MemoryRegionSection *section)
3366{
3367 isa_unassign_ioport(section->offset_within_address_space, section->size);
3368}
3369
Avi Kivity50c1e142012-02-08 21:36:02 +02003370static void io_region_nop(MemoryListener *listener,
3371 MemoryRegionSection *section)
3372{
3373}
3374
Avi Kivity4855d412012-02-08 21:16:05 +02003375static void io_log_start(MemoryListener *listener,
3376 MemoryRegionSection *section)
3377{
3378}
3379
3380static void io_log_stop(MemoryListener *listener,
3381 MemoryRegionSection *section)
3382{
3383}
3384
3385static void io_log_sync(MemoryListener *listener,
3386 MemoryRegionSection *section)
3387{
3388}
3389
3390static void io_log_global_start(MemoryListener *listener)
3391{
3392}
3393
3394static void io_log_global_stop(MemoryListener *listener)
3395{
3396}
3397
3398static void io_eventfd_add(MemoryListener *listener,
3399 MemoryRegionSection *section,
3400 bool match_data, uint64_t data, int fd)
3401{
3402}
3403
3404static void io_eventfd_del(MemoryListener *listener,
3405 MemoryRegionSection *section,
3406 bool match_data, uint64_t data, int fd)
3407{
3408}
3409
Avi Kivity93632742012-02-08 16:54:16 +02003410static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003411 .begin = core_begin,
3412 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003413 .region_add = core_region_add,
3414 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003415 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003416 .log_start = core_log_start,
3417 .log_stop = core_log_stop,
3418 .log_sync = core_log_sync,
3419 .log_global_start = core_log_global_start,
3420 .log_global_stop = core_log_global_stop,
3421 .eventfd_add = core_eventfd_add,
3422 .eventfd_del = core_eventfd_del,
3423 .priority = 0,
3424};
3425
Avi Kivity4855d412012-02-08 21:16:05 +02003426static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003427 .begin = io_begin,
3428 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003429 .region_add = io_region_add,
3430 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003431 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003432 .log_start = io_log_start,
3433 .log_stop = io_log_stop,
3434 .log_sync = io_log_sync,
3435 .log_global_start = io_log_global_start,
3436 .log_global_stop = io_log_global_stop,
3437 .eventfd_add = io_eventfd_add,
3438 .eventfd_del = io_eventfd_del,
3439 .priority = 0,
3440};
3441
Avi Kivity62152b82011-07-26 14:26:14 +03003442static void memory_map_init(void)
3443{
Anthony Liguori7267c092011-08-20 22:09:37 -05003444 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003445 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003446 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003447
Anthony Liguori7267c092011-08-20 22:09:37 -05003448 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003449 memory_region_init(system_io, "io", 65536);
3450 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003451
Avi Kivity4855d412012-02-08 21:16:05 +02003452 memory_listener_register(&core_memory_listener, system_memory);
3453 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003454}
3455
3456MemoryRegion *get_system_memory(void)
3457{
3458 return system_memory;
3459}
3460
Avi Kivity309cb472011-08-08 16:09:03 +03003461MemoryRegion *get_system_io(void)
3462{
3463 return system_io;
3464}
3465
pbrooke2eef172008-06-08 01:09:01 +00003466#endif /* !defined(CONFIG_USER_ONLY) */
3467
bellard13eb76e2004-01-24 15:23:36 +00003468/* physical memory access (slow version, mainly for debug) */
3469#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003470int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003471 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003472{
3473 int l, flags;
3474 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003475 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003476
3477 while (len > 0) {
3478 page = addr & TARGET_PAGE_MASK;
3479 l = (page + TARGET_PAGE_SIZE) - addr;
3480 if (l > len)
3481 l = len;
3482 flags = page_get_flags(page);
3483 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003484 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003485 if (is_write) {
3486 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003487 return -1;
bellard579a97f2007-11-11 14:26:47 +00003488 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003489 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003490 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003491 memcpy(p, buf, l);
3492 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003493 } else {
3494 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003495 return -1;
bellard579a97f2007-11-11 14:26:47 +00003496 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003497 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003498 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003499 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003500 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003501 }
3502 len -= l;
3503 buf += l;
3504 addr += l;
3505 }
Paul Brooka68fe892010-03-01 00:08:59 +00003506 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003507}
bellard8df1cd02005-01-28 22:37:22 +00003508
bellard13eb76e2004-01-24 15:23:36 +00003509#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003510void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003511 int len, int is_write)
3512{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003513 int l;
bellard13eb76e2004-01-24 15:23:36 +00003514 uint8_t *ptr;
3515 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003516 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003517 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003518
bellard13eb76e2004-01-24 15:23:36 +00003519 while (len > 0) {
3520 page = addr & TARGET_PAGE_MASK;
3521 l = (page + TARGET_PAGE_SIZE) - addr;
3522 if (l > len)
3523 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003524 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003525
bellard13eb76e2004-01-24 15:23:36 +00003526 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003527 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003528 target_phys_addr_t addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00003529 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003530 /* XXX: could force cpu_single_env to NULL to avoid
3531 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003532 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003533 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003534 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003535 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003536 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003537 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003538 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003539 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003540 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003541 l = 2;
3542 } else {
bellard1c213d12005-09-03 10:49:04 +00003543 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003544 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003545 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003546 l = 1;
3547 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003548 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003549 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003550 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003551 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003552 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003553 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003554 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003555 if (!cpu_physical_memory_is_dirty(addr1)) {
3556 /* invalidate code */
3557 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3558 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003559 cpu_physical_memory_set_dirty_flags(
3560 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003561 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003562 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003563 }
3564 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003565 if (!(memory_region_is_ram(section->mr) ||
3566 memory_region_is_romd(section->mr))) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003567 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003568 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003569 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003570 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003571 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003572 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003573 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003574 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003575 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003576 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003577 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003578 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003579 l = 2;
3580 } else {
bellard1c213d12005-09-03 10:49:04 +00003581 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003582 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003583 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003584 l = 1;
3585 }
3586 } else {
3587 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003588 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00003589 + memory_region_section_addr(section,
3590 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003591 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003592 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003593 }
3594 }
3595 len -= l;
3596 buf += l;
3597 addr += l;
3598 }
3599}
bellard8df1cd02005-01-28 22:37:22 +00003600
bellardd0ecd2a2006-04-23 17:14:48 +00003601/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003602void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003603 const uint8_t *buf, int len)
3604{
3605 int l;
3606 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003607 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003608 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003609
bellardd0ecd2a2006-04-23 17:14:48 +00003610 while (len > 0) {
3611 page = addr & TARGET_PAGE_MASK;
3612 l = (page + TARGET_PAGE_SIZE) - addr;
3613 if (l > len)
3614 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003615 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003616
Blue Swirlcc5bea62012-04-14 14:56:48 +00003617 if (!(memory_region_is_ram(section->mr) ||
3618 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00003619 /* do nothing */
3620 } else {
3621 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003622 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003623 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003624 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003625 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003626 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003627 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003628 }
3629 len -= l;
3630 buf += l;
3631 addr += l;
3632 }
3633}
3634
aliguori6d16c2f2009-01-22 16:59:11 +00003635typedef struct {
3636 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003637 target_phys_addr_t addr;
3638 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003639} BounceBuffer;
3640
3641static BounceBuffer bounce;
3642
aliguoriba223c22009-01-22 16:59:16 +00003643typedef struct MapClient {
3644 void *opaque;
3645 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003646 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003647} MapClient;
3648
Blue Swirl72cf2d42009-09-12 07:36:22 +00003649static QLIST_HEAD(map_client_list, MapClient) map_client_list
3650 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003651
3652void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3653{
Anthony Liguori7267c092011-08-20 22:09:37 -05003654 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003655
3656 client->opaque = opaque;
3657 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003658 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003659 return client;
3660}
3661
3662void cpu_unregister_map_client(void *_client)
3663{
3664 MapClient *client = (MapClient *)_client;
3665
Blue Swirl72cf2d42009-09-12 07:36:22 +00003666 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003667 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003668}
3669
3670static void cpu_notify_map_clients(void)
3671{
3672 MapClient *client;
3673
Blue Swirl72cf2d42009-09-12 07:36:22 +00003674 while (!QLIST_EMPTY(&map_client_list)) {
3675 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003676 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003677 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003678 }
3679}
3680
aliguori6d16c2f2009-01-22 16:59:11 +00003681/* Map a physical memory region into a host virtual address.
3682 * May map a subset of the requested range, given by and returned in *plen.
3683 * May return NULL if resources needed to perform the mapping are exhausted.
3684 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003685 * Use cpu_register_map_client() to know when retrying the map operation is
3686 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003687 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003688void *cpu_physical_memory_map(target_phys_addr_t addr,
3689 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003690 int is_write)
3691{
Anthony Liguoric227f092009-10-01 16:12:16 -05003692 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003693 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003694 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003695 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003696 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003697 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003698 ram_addr_t rlen;
3699 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003700
3701 while (len > 0) {
3702 page = addr & TARGET_PAGE_MASK;
3703 l = (page + TARGET_PAGE_SIZE) - addr;
3704 if (l > len)
3705 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003706 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003707
Avi Kivityf3705d52012-03-08 16:16:34 +02003708 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003709 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003710 break;
3711 }
3712 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3713 bounce.addr = addr;
3714 bounce.len = l;
3715 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003716 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003717 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003718
3719 *plen = l;
3720 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003721 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003722 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003723 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003724 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003725 }
aliguori6d16c2f2009-01-22 16:59:11 +00003726
3727 len -= l;
3728 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003729 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003730 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003731 rlen = todo;
3732 ret = qemu_ram_ptr_length(raddr, &rlen);
3733 *plen = rlen;
3734 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003735}
3736
3737/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3738 * Will also mark the memory as dirty if is_write == 1. access_len gives
3739 * the amount of memory that was actually read or written by the caller.
3740 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003741void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3742 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003743{
3744 if (buffer != bounce.buffer) {
3745 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003746 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003747 while (access_len) {
3748 unsigned l;
3749 l = TARGET_PAGE_SIZE;
3750 if (l > access_len)
3751 l = access_len;
3752 if (!cpu_physical_memory_is_dirty(addr1)) {
3753 /* invalidate code */
3754 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3755 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003756 cpu_physical_memory_set_dirty_flags(
3757 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003758 }
3759 addr1 += l;
3760 access_len -= l;
3761 }
3762 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003763 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003764 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003765 }
aliguori6d16c2f2009-01-22 16:59:11 +00003766 return;
3767 }
3768 if (is_write) {
3769 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3770 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003771 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003772 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003773 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003774}
bellardd0ecd2a2006-04-23 17:14:48 +00003775
bellard8df1cd02005-01-28 22:37:22 +00003776/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003777static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3778 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003779{
bellard8df1cd02005-01-28 22:37:22 +00003780 uint8_t *ptr;
3781 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003782 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003783
Avi Kivity06ef3522012-02-13 16:11:22 +02003784 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003785
Blue Swirlcc5bea62012-04-14 14:56:48 +00003786 if (!(memory_region_is_ram(section->mr) ||
3787 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00003788 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003789 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003790 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003791#if defined(TARGET_WORDS_BIGENDIAN)
3792 if (endian == DEVICE_LITTLE_ENDIAN) {
3793 val = bswap32(val);
3794 }
3795#else
3796 if (endian == DEVICE_BIG_ENDIAN) {
3797 val = bswap32(val);
3798 }
3799#endif
bellard8df1cd02005-01-28 22:37:22 +00003800 } else {
3801 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003802 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003803 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003804 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003805 switch (endian) {
3806 case DEVICE_LITTLE_ENDIAN:
3807 val = ldl_le_p(ptr);
3808 break;
3809 case DEVICE_BIG_ENDIAN:
3810 val = ldl_be_p(ptr);
3811 break;
3812 default:
3813 val = ldl_p(ptr);
3814 break;
3815 }
bellard8df1cd02005-01-28 22:37:22 +00003816 }
3817 return val;
3818}
3819
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003820uint32_t ldl_phys(target_phys_addr_t addr)
3821{
3822 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3823}
3824
3825uint32_t ldl_le_phys(target_phys_addr_t addr)
3826{
3827 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3828}
3829
3830uint32_t ldl_be_phys(target_phys_addr_t addr)
3831{
3832 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3833}
3834
bellard84b7b8e2005-11-28 21:19:04 +00003835/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003836static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3837 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003838{
bellard84b7b8e2005-11-28 21:19:04 +00003839 uint8_t *ptr;
3840 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003841 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003842
Avi Kivity06ef3522012-02-13 16:11:22 +02003843 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003844
Blue Swirlcc5bea62012-04-14 14:56:48 +00003845 if (!(memory_region_is_ram(section->mr) ||
3846 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00003847 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003848 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003849
3850 /* XXX This is broken when device endian != cpu endian.
3851 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003852#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003853 val = io_mem_read(section->mr, addr, 4) << 32;
3854 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003855#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003856 val = io_mem_read(section->mr, addr, 4);
3857 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003858#endif
3859 } else {
3860 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003861 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003862 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003863 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003864 switch (endian) {
3865 case DEVICE_LITTLE_ENDIAN:
3866 val = ldq_le_p(ptr);
3867 break;
3868 case DEVICE_BIG_ENDIAN:
3869 val = ldq_be_p(ptr);
3870 break;
3871 default:
3872 val = ldq_p(ptr);
3873 break;
3874 }
bellard84b7b8e2005-11-28 21:19:04 +00003875 }
3876 return val;
3877}
3878
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003879uint64_t ldq_phys(target_phys_addr_t addr)
3880{
3881 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3882}
3883
3884uint64_t ldq_le_phys(target_phys_addr_t addr)
3885{
3886 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3887}
3888
3889uint64_t ldq_be_phys(target_phys_addr_t addr)
3890{
3891 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3892}
3893
bellardaab33092005-10-30 20:48:42 +00003894/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003895uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003896{
3897 uint8_t val;
3898 cpu_physical_memory_read(addr, &val, 1);
3899 return val;
3900}
3901
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003902/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003903static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3904 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003905{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003906 uint8_t *ptr;
3907 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003908 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003909
Avi Kivity06ef3522012-02-13 16:11:22 +02003910 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003911
Blue Swirlcc5bea62012-04-14 14:56:48 +00003912 if (!(memory_region_is_ram(section->mr) ||
3913 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003914 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003915 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003916 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003917#if defined(TARGET_WORDS_BIGENDIAN)
3918 if (endian == DEVICE_LITTLE_ENDIAN) {
3919 val = bswap16(val);
3920 }
3921#else
3922 if (endian == DEVICE_BIG_ENDIAN) {
3923 val = bswap16(val);
3924 }
3925#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003926 } else {
3927 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003928 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003929 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003930 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003931 switch (endian) {
3932 case DEVICE_LITTLE_ENDIAN:
3933 val = lduw_le_p(ptr);
3934 break;
3935 case DEVICE_BIG_ENDIAN:
3936 val = lduw_be_p(ptr);
3937 break;
3938 default:
3939 val = lduw_p(ptr);
3940 break;
3941 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003942 }
3943 return val;
bellardaab33092005-10-30 20:48:42 +00003944}
3945
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003946uint32_t lduw_phys(target_phys_addr_t addr)
3947{
3948 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3949}
3950
3951uint32_t lduw_le_phys(target_phys_addr_t addr)
3952{
3953 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3954}
3955
3956uint32_t lduw_be_phys(target_phys_addr_t addr)
3957{
3958 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3959}
3960
bellard8df1cd02005-01-28 22:37:22 +00003961/* warning: addr must be aligned. The ram page is not masked as dirty
3962 and the code inside is not invalidated. It is useful if the dirty
3963 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003964void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003965{
bellard8df1cd02005-01-28 22:37:22 +00003966 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003967 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003968
Avi Kivity06ef3522012-02-13 16:11:22 +02003969 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003970
Avi Kivityf3705d52012-03-08 16:16:34 +02003971 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003972 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003973 if (memory_region_is_ram(section->mr)) {
3974 section = &phys_sections[phys_section_rom];
3975 }
3976 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003977 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003978 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003979 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003980 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003981 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003982 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003983
3984 if (unlikely(in_migration)) {
3985 if (!cpu_physical_memory_is_dirty(addr1)) {
3986 /* invalidate code */
3987 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3988 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003989 cpu_physical_memory_set_dirty_flags(
3990 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003991 }
3992 }
bellard8df1cd02005-01-28 22:37:22 +00003993 }
3994}
3995
Anthony Liguoric227f092009-10-01 16:12:16 -05003996void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003997{
j_mayerbc98a7e2007-04-04 07:55:12 +00003998 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003999 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00004000
Avi Kivity06ef3522012-02-13 16:11:22 +02004001 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004002
Avi Kivityf3705d52012-03-08 16:16:34 +02004003 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00004004 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004005 if (memory_region_is_ram(section->mr)) {
4006 section = &phys_sections[phys_section_rom];
4007 }
j_mayerbc98a7e2007-04-04 07:55:12 +00004008#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02004009 io_mem_write(section->mr, addr, val >> 32, 4);
4010 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004011#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02004012 io_mem_write(section->mr, addr, (uint32_t)val, 4);
4013 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004014#endif
4015 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004016 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004017 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004018 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00004019 stq_p(ptr, val);
4020 }
4021}
4022
bellard8df1cd02005-01-28 22:37:22 +00004023/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004024static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4025 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004026{
bellard8df1cd02005-01-28 22:37:22 +00004027 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004028 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004029
Avi Kivity06ef3522012-02-13 16:11:22 +02004030 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004031
Avi Kivityf3705d52012-03-08 16:16:34 +02004032 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00004033 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004034 if (memory_region_is_ram(section->mr)) {
4035 section = &phys_sections[phys_section_rom];
4036 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004037#if defined(TARGET_WORDS_BIGENDIAN)
4038 if (endian == DEVICE_LITTLE_ENDIAN) {
4039 val = bswap32(val);
4040 }
4041#else
4042 if (endian == DEVICE_BIG_ENDIAN) {
4043 val = bswap32(val);
4044 }
4045#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004046 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004047 } else {
4048 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004049 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004050 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00004051 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004052 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004053 switch (endian) {
4054 case DEVICE_LITTLE_ENDIAN:
4055 stl_le_p(ptr, val);
4056 break;
4057 case DEVICE_BIG_ENDIAN:
4058 stl_be_p(ptr, val);
4059 break;
4060 default:
4061 stl_p(ptr, val);
4062 break;
4063 }
bellard3a7d9292005-08-21 09:26:42 +00004064 if (!cpu_physical_memory_is_dirty(addr1)) {
4065 /* invalidate code */
4066 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4067 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004068 cpu_physical_memory_set_dirty_flags(addr1,
4069 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004070 }
bellard8df1cd02005-01-28 22:37:22 +00004071 }
4072}
4073
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004074void stl_phys(target_phys_addr_t addr, uint32_t val)
4075{
4076 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4077}
4078
4079void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4080{
4081 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4082}
4083
4084void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4085{
4086 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4087}
4088
bellardaab33092005-10-30 20:48:42 +00004089/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004090void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004091{
4092 uint8_t v = val;
4093 cpu_physical_memory_write(addr, &v, 1);
4094}
4095
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004096/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004097static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4098 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004099{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004100 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004101 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004102
Avi Kivity06ef3522012-02-13 16:11:22 +02004103 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004104
Avi Kivityf3705d52012-03-08 16:16:34 +02004105 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00004106 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004107 if (memory_region_is_ram(section->mr)) {
4108 section = &phys_sections[phys_section_rom];
4109 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004110#if defined(TARGET_WORDS_BIGENDIAN)
4111 if (endian == DEVICE_LITTLE_ENDIAN) {
4112 val = bswap16(val);
4113 }
4114#else
4115 if (endian == DEVICE_BIG_ENDIAN) {
4116 val = bswap16(val);
4117 }
4118#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004119 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004120 } else {
4121 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004122 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004123 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004124 /* RAM case */
4125 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004126 switch (endian) {
4127 case DEVICE_LITTLE_ENDIAN:
4128 stw_le_p(ptr, val);
4129 break;
4130 case DEVICE_BIG_ENDIAN:
4131 stw_be_p(ptr, val);
4132 break;
4133 default:
4134 stw_p(ptr, val);
4135 break;
4136 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004137 if (!cpu_physical_memory_is_dirty(addr1)) {
4138 /* invalidate code */
4139 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4140 /* set dirty bit */
4141 cpu_physical_memory_set_dirty_flags(addr1,
4142 (0xff & ~CODE_DIRTY_FLAG));
4143 }
4144 }
bellardaab33092005-10-30 20:48:42 +00004145}
4146
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004147void stw_phys(target_phys_addr_t addr, uint32_t val)
4148{
4149 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4150}
4151
4152void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4153{
4154 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4155}
4156
4157void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4158{
4159 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4160}
4161
bellardaab33092005-10-30 20:48:42 +00004162/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004163void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004164{
4165 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004166 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004167}
4168
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004169void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4170{
4171 val = cpu_to_le64(val);
4172 cpu_physical_memory_write(addr, &val, 8);
4173}
4174
4175void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4176{
4177 val = cpu_to_be64(val);
4178 cpu_physical_memory_write(addr, &val, 8);
4179}
4180
aliguori5e2972f2009-03-28 17:51:36 +00004181/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004182int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004183 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004184{
4185 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004186 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004187 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004188
4189 while (len > 0) {
4190 page = addr & TARGET_PAGE_MASK;
4191 phys_addr = cpu_get_phys_page_debug(env, page);
4192 /* if no physical page mapped, return an error */
4193 if (phys_addr == -1)
4194 return -1;
4195 l = (page + TARGET_PAGE_SIZE) - addr;
4196 if (l > len)
4197 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004198 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004199 if (is_write)
4200 cpu_physical_memory_write_rom(phys_addr, buf, l);
4201 else
aliguori5e2972f2009-03-28 17:51:36 +00004202 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004203 len -= l;
4204 buf += l;
4205 addr += l;
4206 }
4207 return 0;
4208}
Paul Brooka68fe892010-03-01 00:08:59 +00004209#endif
bellard13eb76e2004-01-24 15:23:36 +00004210
pbrook2e70f6e2008-06-29 01:03:05 +00004211/* in deterministic execution mode, instructions doing device I/Os
4212 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004213void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004214{
4215 TranslationBlock *tb;
4216 uint32_t n, cflags;
4217 target_ulong pc, cs_base;
4218 uint64_t flags;
4219
Blue Swirl20503962012-04-09 14:20:20 +00004220 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004221 if (!tb) {
4222 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004223 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004224 }
4225 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004226 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004227 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004228 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004229 n = n - env->icount_decr.u16.low;
4230 /* Generate a new TB ending on the I/O insn. */
4231 n++;
4232 /* On MIPS and SH, delay slot instructions can only be restarted if
4233 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004234 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004235 branch. */
4236#if defined(TARGET_MIPS)
4237 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4238 env->active_tc.PC -= 4;
4239 env->icount_decr.u16.low++;
4240 env->hflags &= ~MIPS_HFLAG_BMASK;
4241 }
4242#elif defined(TARGET_SH4)
4243 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4244 && n > 1) {
4245 env->pc -= 2;
4246 env->icount_decr.u16.low++;
4247 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4248 }
4249#endif
4250 /* This should never happen. */
4251 if (n > CF_COUNT_MASK)
4252 cpu_abort(env, "TB too big during recompile");
4253
4254 cflags = n | CF_LAST_IO;
4255 pc = tb->pc;
4256 cs_base = tb->cs_base;
4257 flags = tb->flags;
4258 tb_phys_invalidate(tb, -1);
4259 /* FIXME: In theory this could raise an exception. In practice
4260 we have already translated the block once so it's probably ok. */
4261 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004262 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004263 the first in the TB) then we end up generating a whole new TB and
4264 repeating the fault, which is horribly inefficient.
4265 Better would be to execute just this insn uncached, or generate a
4266 second new TB. */
4267 cpu_resume_from_signal(env, NULL);
4268}
4269
Paul Brookb3755a92010-03-12 16:54:58 +00004270#if !defined(CONFIG_USER_ONLY)
4271
Stefan Weil055403b2010-10-22 23:03:32 +02004272void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004273{
4274 int i, target_code_size, max_target_code_size;
4275 int direct_jmp_count, direct_jmp2_count, cross_page;
4276 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004277
bellarde3db7222005-01-26 22:00:47 +00004278 target_code_size = 0;
4279 max_target_code_size = 0;
4280 cross_page = 0;
4281 direct_jmp_count = 0;
4282 direct_jmp2_count = 0;
4283 for(i = 0; i < nb_tbs; i++) {
4284 tb = &tbs[i];
4285 target_code_size += tb->size;
4286 if (tb->size > max_target_code_size)
4287 max_target_code_size = tb->size;
4288 if (tb->page_addr[1] != -1)
4289 cross_page++;
4290 if (tb->tb_next_offset[0] != 0xffff) {
4291 direct_jmp_count++;
4292 if (tb->tb_next_offset[1] != 0xffff) {
4293 direct_jmp2_count++;
4294 }
4295 }
4296 }
4297 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004298 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004299 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004300 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4301 cpu_fprintf(f, "TB count %d/%d\n",
4302 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004303 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004304 nb_tbs ? target_code_size / nb_tbs : 0,
4305 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004306 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004307 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4308 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004309 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4310 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004311 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4312 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004313 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004314 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4315 direct_jmp2_count,
4316 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004317 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004318 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4319 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4320 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004321 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004322}
4323
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004324/*
4325 * A helper function for the _utterly broken_ virtio device model to find out if
4326 * it's running on a big endian machine. Don't do this at home kids!
4327 */
4328bool virtio_is_big_endian(void);
4329bool virtio_is_big_endian(void)
4330{
4331#if defined(TARGET_WORDS_BIGENDIAN)
4332 return true;
4333#else
4334 return false;
4335#endif
4336}
4337
bellard61382a52003-10-27 21:22:23 +00004338#endif