blob: bfc4acc206318eeb296e4f3716f37b4c37bf42d2 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Blue Swirl0cac1b62012-04-09 16:50:52 +000060#include "cputlb.h"
61
Avi Kivity7762c2c2012-09-20 16:02:51 +030062#include "memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020063
bellardfd6ce8f2003-05-14 19:00:11 +000064//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000065//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000070
ths1196be32007-03-17 15:17:58 +000071//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000072//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000073
pbrook99773bd2006-04-16 15:14:59 +000074#if !defined(CONFIG_USER_ONLY)
75/* TB consistency checks only implemented for usermode emulation. */
76#undef DEBUG_TB_CHECK
77#endif
78
bellard9fa3e852004-01-04 18:06:42 +000079#define SMC_BITMAP_USE_THRESHOLD 10
80
blueswir1bdaf78e2008-10-04 07:24:27 +000081static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020082static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000083TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000084static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000085/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050086spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000087
Richard Henderson9b9c37c2012-09-21 10:34:21 -070088#if defined(__arm__) || defined(__sparc__)
blueswir1141ac462008-07-26 15:05:57 +000089/* The prologue must be reachable with a direct jump. ARM and Sparc64
90 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000091 section close to code segment. */
92#define code_gen_section \
93 __attribute__((__section__(".gen_code"))) \
94 __attribute__((aligned (32)))
Stefan Weil68409812012-04-04 07:45:21 +020095#elif defined(_WIN32) && !defined(_WIN64)
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#define code_gen_section \
97 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000098#else
99#define code_gen_section \
100 __attribute__((aligned (32)))
101#endif
102
103uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000104static uint8_t *code_gen_buffer;
105static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000106/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000107static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200108static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000109
pbrooke2eef172008-06-08 01:09:01 +0000110#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000111int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000112static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000113
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200114RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300115
116static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300117static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300118
Avi Kivityf6790af2012-10-02 20:13:51 +0200119AddressSpace address_space_io;
120AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +0200121
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200122MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200123static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200124
pbrooke2eef172008-06-08 01:09:01 +0000125#endif
bellard9fa3e852004-01-04 18:06:42 +0000126
Andreas Färber9349b4f2012-03-14 01:38:32 +0100127CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000128/* current CPU in the current thread. It is only valid inside
129 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100130DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000131/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000132 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000133 2 = Adaptive rate instruction counting. */
134int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000135
bellard54936002003-05-13 00:25:15 +0000136typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000137 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000138 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000139 /* in order to optimize self modifying code, we count the number
140 of lookups we do to a given page to use a bitmap */
141 unsigned int code_write_count;
142 uint8_t *code_bitmap;
143#if defined(CONFIG_USER_ONLY)
144 unsigned long flags;
145#endif
bellard54936002003-05-13 00:25:15 +0000146} PageDesc;
147
Paul Brook41c1b1c2010-03-12 16:54:58 +0000148/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800149 while in user mode we want it to be based on virtual addresses. */
150#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000151#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
152# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
153#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800154# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000155#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000156#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800157# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000158#endif
bellard54936002003-05-13 00:25:15 +0000159
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800160/* Size of the L2 (and L3, etc) page tables. */
161#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000162#define L2_SIZE (1 << L2_BITS)
163
Avi Kivity3eef53d2012-02-10 14:57:31 +0200164#define P_L2_LEVELS \
165 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
166
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800168#define V_L1_BITS_REM \
169 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
170
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800171#if V_L1_BITS_REM < 4
172#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
173#else
174#define V_L1_BITS V_L1_BITS_REM
175#endif
176
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800177#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
178
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800179#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
180
Stefan Weilc6d50672012-03-16 20:23:49 +0100181uintptr_t qemu_real_host_page_size;
182uintptr_t qemu_host_page_size;
183uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000184
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800185/* This is a multi-level map on the virtual address space.
186 The bottom level has pointers to PageDesc. */
187static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000188
pbrooke2eef172008-06-08 01:09:01 +0000189#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200190
Avi Kivity5312bd82012-02-12 18:32:55 +0200191static MemoryRegionSection *phys_sections;
192static unsigned phys_sections_nb, phys_sections_nb_alloc;
193static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200194static uint16_t phys_section_notdirty;
195static uint16_t phys_section_rom;
196static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200197
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200198/* Simple allocator for PhysPageEntry nodes */
199static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
200static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
201
Avi Kivity07f07b32012-02-13 20:45:32 +0200202#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200203
pbrooke2eef172008-06-08 01:09:01 +0000204static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300205static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000206
Avi Kivity1ec9b902012-01-02 12:47:48 +0200207static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000208#endif
bellard33417e72003-08-10 21:47:01 +0000209
bellarde3db7222005-01-26 22:00:47 +0000210/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000211static int tb_flush_count;
212static int tb_phys_invalidate_count;
213
bellard7cb69ca2008-05-10 10:55:51 +0000214#ifdef _WIN32
215static void map_exec(void *addr, long size)
216{
217 DWORD old_protect;
218 VirtualProtect(addr, size,
219 PAGE_EXECUTE_READWRITE, &old_protect);
220
221}
222#else
223static void map_exec(void *addr, long size)
224{
bellard43694152008-05-29 09:35:57 +0000225 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000226
bellard43694152008-05-29 09:35:57 +0000227 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000228 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000229 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000230
231 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000232 end += page_size - 1;
233 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000234
235 mprotect((void *)start, end - start,
236 PROT_READ | PROT_WRITE | PROT_EXEC);
237}
238#endif
239
bellardb346ff42003-06-15 20:05:50 +0000240static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000241{
bellard83fb7ad2004-07-05 21:25:26 +0000242 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000243 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000244#ifdef _WIN32
245 {
246 SYSTEM_INFO system_info;
247
248 GetSystemInfo(&system_info);
249 qemu_real_host_page_size = system_info.dwPageSize;
250 }
251#else
252 qemu_real_host_page_size = getpagesize();
253#endif
bellard83fb7ad2004-07-05 21:25:26 +0000254 if (qemu_host_page_size == 0)
255 qemu_host_page_size = qemu_real_host_page_size;
256 if (qemu_host_page_size < TARGET_PAGE_SIZE)
257 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000258 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000259
Paul Brook2e9a5712010-05-05 16:32:59 +0100260#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000261 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100262#ifdef HAVE_KINFO_GETVMMAP
263 struct kinfo_vmentry *freep;
264 int i, cnt;
265
266 freep = kinfo_getvmmap(getpid(), &cnt);
267 if (freep) {
268 mmap_lock();
269 for (i = 0; i < cnt; i++) {
270 unsigned long startaddr, endaddr;
271
272 startaddr = freep[i].kve_start;
273 endaddr = freep[i].kve_end;
274 if (h2g_valid(startaddr)) {
275 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
276
277 if (h2g_valid(endaddr)) {
278 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200279 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100280 } else {
281#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
282 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200283 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100284#endif
285 }
286 }
287 }
288 free(freep);
289 mmap_unlock();
290 }
291#else
balrog50a95692007-12-12 01:16:23 +0000292 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000293
pbrook07765902008-05-31 16:33:53 +0000294 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800295
Aurelien Jarnofd436902010-04-10 17:20:36 +0200296 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000297 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800298 mmap_lock();
299
balrog50a95692007-12-12 01:16:23 +0000300 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800301 unsigned long startaddr, endaddr;
302 int n;
303
304 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
305
306 if (n == 2 && h2g_valid(startaddr)) {
307 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
308
309 if (h2g_valid(endaddr)) {
310 endaddr = h2g(endaddr);
311 } else {
312 endaddr = ~0ul;
313 }
314 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000315 }
316 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800317
balrog50a95692007-12-12 01:16:23 +0000318 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800319 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000320 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100321#endif
balrog50a95692007-12-12 01:16:23 +0000322 }
323#endif
bellard54936002003-05-13 00:25:15 +0000324}
325
Paul Brook41c1b1c2010-03-12 16:54:58 +0000326static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000327{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000328 PageDesc *pd;
329 void **lp;
330 int i;
331
pbrook17e23772008-06-09 13:47:45 +0000332#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500333 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800334# define ALLOC(P, SIZE) \
335 do { \
336 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
337 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800338 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000339#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800340# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500341 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000342#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800343
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800344 /* Level 1. Always allocated. */
345 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
346
347 /* Level 2..N-1. */
348 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
349 void **p = *lp;
350
351 if (p == NULL) {
352 if (!alloc) {
353 return NULL;
354 }
355 ALLOC(p, sizeof(void *) * L2_SIZE);
356 *lp = p;
357 }
358
359 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000360 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800361
362 pd = *lp;
363 if (pd == NULL) {
364 if (!alloc) {
365 return NULL;
366 }
367 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
368 *lp = pd;
369 }
370
371#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800372
373 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000374}
375
Paul Brook41c1b1c2010-03-12 16:54:58 +0000376static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000377{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800378 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000379}
380
Paul Brook6d9a1302010-02-28 23:55:53 +0000381#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200382
Avi Kivityf7bf5462012-02-13 20:12:05 +0200383static void phys_map_node_reserve(unsigned nodes)
384{
385 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
386 typedef PhysPageEntry Node[L2_SIZE];
387 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
388 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
389 phys_map_nodes_nb + nodes);
390 phys_map_nodes = g_renew(Node, phys_map_nodes,
391 phys_map_nodes_nb_alloc);
392 }
393}
394
395static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200396{
397 unsigned i;
398 uint16_t ret;
399
Avi Kivityf7bf5462012-02-13 20:12:05 +0200400 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200401 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200402 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200403 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200404 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200405 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200406 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200407 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200408}
409
410static void phys_map_nodes_reset(void)
411{
412 phys_map_nodes_nb = 0;
413}
414
Avi Kivityf7bf5462012-02-13 20:12:05 +0200415
Avi Kivity29990972012-02-13 20:21:20 +0200416static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
417 target_phys_addr_t *nb, uint16_t leaf,
418 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200419{
420 PhysPageEntry *p;
421 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200422 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200423
Avi Kivity07f07b32012-02-13 20:45:32 +0200424 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200425 lp->ptr = phys_map_node_alloc();
426 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200427 if (level == 0) {
428 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200429 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200430 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200431 }
432 }
433 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200434 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200435 }
Avi Kivity29990972012-02-13 20:21:20 +0200436 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200437
Avi Kivity29990972012-02-13 20:21:20 +0200438 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200439 if ((*index & (step - 1)) == 0 && *nb >= step) {
440 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200441 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200442 *index += step;
443 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200444 } else {
445 phys_page_set_level(lp, index, nb, leaf, level - 1);
446 }
447 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200448 }
449}
450
Avi Kivityac1970f2012-10-03 16:22:53 +0200451static void phys_page_set(AddressSpaceDispatch *d,
452 target_phys_addr_t index, target_phys_addr_t nb,
Avi Kivity29990972012-02-13 20:21:20 +0200453 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000454{
Avi Kivity29990972012-02-13 20:21:20 +0200455 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200456 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000457
Avi Kivityac1970f2012-10-03 16:22:53 +0200458 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000459}
460
Avi Kivityac1970f2012-10-03 16:22:53 +0200461MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000462{
Avi Kivityac1970f2012-10-03 16:22:53 +0200463 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200464 PhysPageEntry *p;
465 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200466 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200467
Avi Kivity07f07b32012-02-13 20:45:32 +0200468 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200469 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200470 goto not_found;
471 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200472 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200473 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200474 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200475
Avi Kivityc19e8802012-02-13 20:25:31 +0200476 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200477not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200478 return &phys_sections[s_index];
479}
480
Blue Swirle5548612012-04-21 13:08:33 +0000481bool memory_region_is_unassigned(MemoryRegion *mr)
482{
483 return mr != &io_mem_ram && mr != &io_mem_rom
484 && mr != &io_mem_notdirty && !mr->rom_device
485 && mr != &io_mem_watch;
486}
487
pbrookc8a706f2008-06-02 16:16:42 +0000488#define mmap_lock() do { } while(0)
489#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000490#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000491
bellard43694152008-05-29 09:35:57 +0000492#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
493
494#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100495/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000496 user mode. It will change when a dedicated libc will be used */
497#define USE_STATIC_CODE_GEN_BUFFER
498#endif
499
500#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200501static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
502 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000503#endif
504
blueswir18fcd3692008-08-17 20:26:25 +0000505static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000506{
bellard43694152008-05-29 09:35:57 +0000507#ifdef USE_STATIC_CODE_GEN_BUFFER
508 code_gen_buffer = static_code_gen_buffer;
509 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
510 map_exec(code_gen_buffer, code_gen_buffer_size);
511#else
bellard26a5f132008-05-28 12:30:31 +0000512 code_gen_buffer_size = tb_size;
513 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000514#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000515 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
516#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100517 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000518 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000519#endif
bellard26a5f132008-05-28 12:30:31 +0000520 }
521 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
522 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
523 /* The code gen buffer location may have constraints depending on
524 the host cpu and OS */
525#if defined(__linux__)
526 {
527 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000528 void *start = NULL;
529
bellard26a5f132008-05-28 12:30:31 +0000530 flags = MAP_PRIVATE | MAP_ANONYMOUS;
531#if defined(__x86_64__)
532 flags |= MAP_32BIT;
533 /* Cannot map more than that */
534 if (code_gen_buffer_size > (800 * 1024 * 1024))
535 code_gen_buffer_size = (800 * 1024 * 1024);
Richard Henderson9b9c37c2012-09-21 10:34:21 -0700536#elif defined(__sparc__) && HOST_LONG_BITS == 64
blueswir1141ac462008-07-26 15:05:57 +0000537 // Map the buffer below 2G, so we can use direct calls and branches
Richard Hendersond5dd6962012-09-21 10:40:48 -0700538 start = (void *) 0x40000000UL;
blueswir1141ac462008-07-26 15:05:57 +0000539 if (code_gen_buffer_size > (512 * 1024 * 1024))
540 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000541#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100542 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000543 if (code_gen_buffer_size > 16 * 1024 * 1024)
544 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700545#elif defined(__s390x__)
546 /* Map the buffer so that we can use direct calls and branches. */
547 /* We have a +- 4GB range on the branches; leave some slop. */
548 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
549 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
550 }
551 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000552#endif
blueswir1141ac462008-07-26 15:05:57 +0000553 code_gen_buffer = mmap(start, code_gen_buffer_size,
554 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000555 flags, -1, 0);
556 if (code_gen_buffer == MAP_FAILED) {
557 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
558 exit(1);
559 }
560 }
Bradcbb608a2010-12-20 21:25:40 -0500561#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000562 || defined(__DragonFly__) || defined(__OpenBSD__) \
563 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000564 {
565 int flags;
566 void *addr = NULL;
567 flags = MAP_PRIVATE | MAP_ANONYMOUS;
568#if defined(__x86_64__)
569 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
570 * 0x40000000 is free */
571 flags |= MAP_FIXED;
572 addr = (void *)0x40000000;
573 /* Cannot map more than that */
574 if (code_gen_buffer_size > (800 * 1024 * 1024))
575 code_gen_buffer_size = (800 * 1024 * 1024);
Richard Henderson9b9c37c2012-09-21 10:34:21 -0700576#elif defined(__sparc__) && HOST_LONG_BITS == 64
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000577 // Map the buffer below 2G, so we can use direct calls and branches
Richard Hendersond5dd6962012-09-21 10:40:48 -0700578 addr = (void *) 0x40000000UL;
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000579 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
580 code_gen_buffer_size = (512 * 1024 * 1024);
581 }
aliguori06e67a82008-09-27 15:32:41 +0000582#endif
583 code_gen_buffer = mmap(addr, code_gen_buffer_size,
584 PROT_WRITE | PROT_READ | PROT_EXEC,
585 flags, -1, 0);
586 if (code_gen_buffer == MAP_FAILED) {
587 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
588 exit(1);
589 }
590 }
bellard26a5f132008-05-28 12:30:31 +0000591#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500592 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000593 map_exec(code_gen_buffer, code_gen_buffer_size);
594#endif
bellard43694152008-05-29 09:35:57 +0000595#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000596 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100597 code_gen_buffer_max_size = code_gen_buffer_size -
598 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000599 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500600 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000601}
602
603/* Must be called before using the QEMU cpus. 'tb_size' is the size
604 (in bytes) allocated to the translation buffer. Zero means default
605 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200606void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000607{
bellard26a5f132008-05-28 12:30:31 +0000608 cpu_gen_init();
609 code_gen_alloc(tb_size);
610 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700611 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000612 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700613#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
614 /* There's no guest base to take into account, so go ahead and
615 initialize the prologue now. */
616 tcg_prologue_init(&tcg_ctx);
617#endif
bellard26a5f132008-05-28 12:30:31 +0000618}
619
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200620bool tcg_enabled(void)
621{
622 return code_gen_buffer != NULL;
623}
624
625void cpu_exec_init_all(void)
626{
627#if !defined(CONFIG_USER_ONLY)
628 memory_map_init();
629 io_mem_init();
630#endif
631}
632
pbrook9656f322008-07-01 20:01:19 +0000633#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
634
Juan Quintelae59fb372009-09-29 22:48:21 +0200635static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200636{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100637 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200638
aurel323098dba2009-03-07 21:28:24 +0000639 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
640 version_id is increased. */
641 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000642 tlb_flush(env, 1);
643
644 return 0;
645}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200646
647static const VMStateDescription vmstate_cpu_common = {
648 .name = "cpu_common",
649 .version_id = 1,
650 .minimum_version_id = 1,
651 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200652 .post_load = cpu_common_post_load,
653 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100654 VMSTATE_UINT32(halted, CPUArchState),
655 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200656 VMSTATE_END_OF_LIST()
657 }
658};
pbrook9656f322008-07-01 20:01:19 +0000659#endif
660
Andreas Färber9349b4f2012-03-14 01:38:32 +0100661CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400662{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100663 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400664
665 while (env) {
666 if (env->cpu_index == cpu)
667 break;
668 env = env->next_cpu;
669 }
670
671 return env;
672}
673
Andreas Färber9349b4f2012-03-14 01:38:32 +0100674void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000675{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100676 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000677 int cpu_index;
678
pbrookc2764712009-03-07 15:24:59 +0000679#if defined(CONFIG_USER_ONLY)
680 cpu_list_lock();
681#endif
bellard6a00d602005-11-21 23:25:50 +0000682 env->next_cpu = NULL;
683 penv = &first_cpu;
684 cpu_index = 0;
685 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700686 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000687 cpu_index++;
688 }
689 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000690 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000691 QTAILQ_INIT(&env->breakpoints);
692 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100693#ifndef CONFIG_USER_ONLY
694 env->thread_id = qemu_get_thread_id();
695#endif
bellard6a00d602005-11-21 23:25:50 +0000696 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000697#if defined(CONFIG_USER_ONLY)
698 cpu_list_unlock();
699#endif
pbrookb3c77242008-06-30 16:31:04 +0000700#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600701 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
702 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000703 cpu_save, cpu_load, env);
704#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000705}
706
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100707/* Allocate a new translation block. Flush the translation buffer if
708 too many translation blocks or too much generated code. */
709static TranslationBlock *tb_alloc(target_ulong pc)
710{
711 TranslationBlock *tb;
712
713 if (nb_tbs >= code_gen_max_blocks ||
714 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
715 return NULL;
716 tb = &tbs[nb_tbs++];
717 tb->pc = pc;
718 tb->cflags = 0;
719 return tb;
720}
721
722void tb_free(TranslationBlock *tb)
723{
724 /* In practice this is mostly used for single use temporary TB
725 Ignore the hard cases and just back up if this TB happens to
726 be the last one generated. */
727 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
728 code_gen_ptr = tb->tc_ptr;
729 nb_tbs--;
730 }
731}
732
bellard9fa3e852004-01-04 18:06:42 +0000733static inline void invalidate_page_bitmap(PageDesc *p)
734{
735 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500736 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000737 p->code_bitmap = NULL;
738 }
739 p->code_write_count = 0;
740}
741
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800742/* Set to NULL all the 'first_tb' fields in all PageDescs. */
743
744static void page_flush_tb_1 (int level, void **lp)
745{
746 int i;
747
748 if (*lp == NULL) {
749 return;
750 }
751 if (level == 0) {
752 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000753 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800754 pd[i].first_tb = NULL;
755 invalidate_page_bitmap(pd + i);
756 }
757 } else {
758 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000759 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800760 page_flush_tb_1 (level - 1, pp + i);
761 }
762 }
763}
764
bellardfd6ce8f2003-05-14 19:00:11 +0000765static void page_flush_tb(void)
766{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800767 int i;
768 for (i = 0; i < V_L1_SIZE; i++) {
769 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000770 }
771}
772
773/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000774/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100775void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000776{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100777 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000778#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000779 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
780 (unsigned long)(code_gen_ptr - code_gen_buffer),
781 nb_tbs, nb_tbs > 0 ?
782 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000783#endif
bellard26a5f132008-05-28 12:30:31 +0000784 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000785 cpu_abort(env1, "Internal error: code buffer overflow\n");
786
bellardfd6ce8f2003-05-14 19:00:11 +0000787 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000788
bellard6a00d602005-11-21 23:25:50 +0000789 for(env = first_cpu; env != NULL; env = env->next_cpu) {
790 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
791 }
bellard9fa3e852004-01-04 18:06:42 +0000792
bellard8a8a6082004-10-03 13:36:49 +0000793 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000794 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000795
bellardfd6ce8f2003-05-14 19:00:11 +0000796 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000797 /* XXX: flush processor icache at this point if cache flush is
798 expensive */
bellarde3db7222005-01-26 22:00:47 +0000799 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000800}
801
802#ifdef DEBUG_TB_CHECK
803
j_mayerbc98a7e2007-04-04 07:55:12 +0000804static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000805{
806 TranslationBlock *tb;
807 int i;
808 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000809 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
810 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000811 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
812 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000813 printf("ERROR invalidate: address=" TARGET_FMT_lx
814 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000815 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000816 }
817 }
818 }
819}
820
821/* verify that all the pages have correct rights for code */
822static void tb_page_check(void)
823{
824 TranslationBlock *tb;
825 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000826
pbrook99773bd2006-04-16 15:14:59 +0000827 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
828 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000829 flags1 = page_get_flags(tb->pc);
830 flags2 = page_get_flags(tb->pc + tb->size - 1);
831 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
832 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000833 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000834 }
835 }
836 }
837}
838
839#endif
840
841/* invalidate one TB */
842static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
843 int next_offset)
844{
845 TranslationBlock *tb1;
846 for(;;) {
847 tb1 = *ptb;
848 if (tb1 == tb) {
849 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
850 break;
851 }
852 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
853 }
854}
855
bellard9fa3e852004-01-04 18:06:42 +0000856static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
857{
858 TranslationBlock *tb1;
859 unsigned int n1;
860
861 for(;;) {
862 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200863 n1 = (uintptr_t)tb1 & 3;
864 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000865 if (tb1 == tb) {
866 *ptb = tb1->page_next[n1];
867 break;
868 }
869 ptb = &tb1->page_next[n1];
870 }
871}
872
bellardd4e81642003-05-25 16:46:15 +0000873static inline void tb_jmp_remove(TranslationBlock *tb, int n)
874{
875 TranslationBlock *tb1, **ptb;
876 unsigned int n1;
877
878 ptb = &tb->jmp_next[n];
879 tb1 = *ptb;
880 if (tb1) {
881 /* find tb(n) in circular list */
882 for(;;) {
883 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200884 n1 = (uintptr_t)tb1 & 3;
885 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardd4e81642003-05-25 16:46:15 +0000886 if (n1 == n && tb1 == tb)
887 break;
888 if (n1 == 2) {
889 ptb = &tb1->jmp_first;
890 } else {
891 ptb = &tb1->jmp_next[n1];
892 }
893 }
894 /* now we can suppress tb(n) from the list */
895 *ptb = tb->jmp_next[n];
896
897 tb->jmp_next[n] = NULL;
898 }
899}
900
901/* reset the jump entry 'n' of a TB so that it is not chained to
902 another TB */
903static inline void tb_reset_jump(TranslationBlock *tb, int n)
904{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200905 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000906}
907
Paul Brook41c1b1c2010-03-12 16:54:58 +0000908void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000909{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100910 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000911 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000912 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000913 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000914 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000915
bellard9fa3e852004-01-04 18:06:42 +0000916 /* remove the TB from the hash list */
917 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
918 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000919 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000920 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000921
bellard9fa3e852004-01-04 18:06:42 +0000922 /* remove the TB from the page list */
923 if (tb->page_addr[0] != page_addr) {
924 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
925 tb_page_remove(&p->first_tb, tb);
926 invalidate_page_bitmap(p);
927 }
928 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
929 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
930 tb_page_remove(&p->first_tb, tb);
931 invalidate_page_bitmap(p);
932 }
933
bellard8a40a182005-11-20 10:35:40 +0000934 tb_invalidated_flag = 1;
935
936 /* remove the TB from the hash list */
937 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000938 for(env = first_cpu; env != NULL; env = env->next_cpu) {
939 if (env->tb_jmp_cache[h] == tb)
940 env->tb_jmp_cache[h] = NULL;
941 }
bellard8a40a182005-11-20 10:35:40 +0000942
943 /* suppress this TB from the two jump lists */
944 tb_jmp_remove(tb, 0);
945 tb_jmp_remove(tb, 1);
946
947 /* suppress any remaining jumps to this TB */
948 tb1 = tb->jmp_first;
949 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200950 n1 = (uintptr_t)tb1 & 3;
bellard8a40a182005-11-20 10:35:40 +0000951 if (n1 == 2)
952 break;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200953 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000954 tb2 = tb1->jmp_next[n1];
955 tb_reset_jump(tb1, n1);
956 tb1->jmp_next[n1] = NULL;
957 tb1 = tb2;
958 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200959 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +0000960
bellarde3db7222005-01-26 22:00:47 +0000961 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000962}
963
964static inline void set_bits(uint8_t *tab, int start, int len)
965{
966 int end, mask, end1;
967
968 end = start + len;
969 tab += start >> 3;
970 mask = 0xff << (start & 7);
971 if ((start & ~7) == (end & ~7)) {
972 if (start < end) {
973 mask &= ~(0xff << (end & 7));
974 *tab |= mask;
975 }
976 } else {
977 *tab++ |= mask;
978 start = (start + 8) & ~7;
979 end1 = end & ~7;
980 while (start < end1) {
981 *tab++ = 0xff;
982 start += 8;
983 }
984 if (start < end) {
985 mask = ~(0xff << (end & 7));
986 *tab |= mask;
987 }
988 }
989}
990
991static void build_page_bitmap(PageDesc *p)
992{
993 int n, tb_start, tb_end;
994 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000995
Anthony Liguori7267c092011-08-20 22:09:37 -0500996 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000997
998 tb = p->first_tb;
999 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001000 n = (uintptr_t)tb & 3;
1001 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001002 /* NOTE: this is subtle as a TB may span two physical pages */
1003 if (n == 0) {
1004 /* NOTE: tb_end may be after the end of the page, but
1005 it is not a problem */
1006 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1007 tb_end = tb_start + tb->size;
1008 if (tb_end > TARGET_PAGE_SIZE)
1009 tb_end = TARGET_PAGE_SIZE;
1010 } else {
1011 tb_start = 0;
1012 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1013 }
1014 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1015 tb = tb->page_next[n];
1016 }
1017}
1018
Andreas Färber9349b4f2012-03-14 01:38:32 +01001019TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001020 target_ulong pc, target_ulong cs_base,
1021 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001022{
1023 TranslationBlock *tb;
1024 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001025 tb_page_addr_t phys_pc, phys_page2;
1026 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001027 int code_gen_size;
1028
Paul Brook41c1b1c2010-03-12 16:54:58 +00001029 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001030 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001031 if (!tb) {
1032 /* flush must be done */
1033 tb_flush(env);
1034 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001035 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001036 /* Don't forget to invalidate previous TB info. */
1037 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001038 }
1039 tc_ptr = code_gen_ptr;
1040 tb->tc_ptr = tc_ptr;
1041 tb->cs_base = cs_base;
1042 tb->flags = flags;
1043 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001044 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001045 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1046 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001047
bellardd720b932004-04-25 17:57:43 +00001048 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001049 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001050 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001051 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001052 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001053 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001054 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001055 return tb;
bellardd720b932004-04-25 17:57:43 +00001056}
ths3b46e622007-09-17 08:09:54 +00001057
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001058/*
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001059 * Invalidate all TBs which intersect with the target physical address range
1060 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1061 * 'is_cpu_write_access' should be true if called from a real cpu write
1062 * access: the virtual CPU will exit the current TB if code is modified inside
1063 * this TB.
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001064 */
1065void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1066 int is_cpu_write_access)
1067{
1068 while (start < end) {
1069 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1070 start &= TARGET_PAGE_MASK;
1071 start += TARGET_PAGE_SIZE;
1072 }
1073}
1074
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001075/*
1076 * Invalidate all TBs which intersect with the target physical address range
1077 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1078 * 'is_cpu_write_access' should be true if called from a real cpu write
1079 * access: the virtual CPU will exit the current TB if code is modified inside
1080 * this TB.
1081 */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001082void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001083 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001084{
aliguori6b917542008-11-18 19:46:41 +00001085 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001086 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001087 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001088 PageDesc *p;
1089 int n;
1090#ifdef TARGET_HAS_PRECISE_SMC
1091 int current_tb_not_found = is_cpu_write_access;
1092 TranslationBlock *current_tb = NULL;
1093 int current_tb_modified = 0;
1094 target_ulong current_pc = 0;
1095 target_ulong current_cs_base = 0;
1096 int current_flags = 0;
1097#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001098
1099 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001100 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001101 return;
ths5fafdf22007-09-16 21:08:06 +00001102 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001103 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1104 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001105 /* build code bitmap */
1106 build_page_bitmap(p);
1107 }
1108
1109 /* we remove all the TBs in the range [start, end[ */
1110 /* XXX: see if in some cases it could be faster to invalidate all the code */
1111 tb = p->first_tb;
1112 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001113 n = (uintptr_t)tb & 3;
1114 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001115 tb_next = tb->page_next[n];
1116 /* NOTE: this is subtle as a TB may span two physical pages */
1117 if (n == 0) {
1118 /* NOTE: tb_end may be after the end of the page, but
1119 it is not a problem */
1120 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1121 tb_end = tb_start + tb->size;
1122 } else {
1123 tb_start = tb->page_addr[1];
1124 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1125 }
1126 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001127#ifdef TARGET_HAS_PRECISE_SMC
1128 if (current_tb_not_found) {
1129 current_tb_not_found = 0;
1130 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001131 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001132 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001133 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001134 }
1135 }
1136 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001137 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001138 /* If we are modifying the current TB, we must stop
1139 its execution. We could be more precise by checking
1140 that the modification is after the current PC, but it
1141 would require a specialized function to partially
1142 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001143
bellardd720b932004-04-25 17:57:43 +00001144 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001145 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001146 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1147 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001148 }
1149#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001150 /* we need to do that to handle the case where a signal
1151 occurs while doing tb_phys_invalidate() */
1152 saved_tb = NULL;
1153 if (env) {
1154 saved_tb = env->current_tb;
1155 env->current_tb = NULL;
1156 }
bellard9fa3e852004-01-04 18:06:42 +00001157 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001158 if (env) {
1159 env->current_tb = saved_tb;
1160 if (env->interrupt_request && env->current_tb)
1161 cpu_interrupt(env, env->interrupt_request);
1162 }
bellard9fa3e852004-01-04 18:06:42 +00001163 }
1164 tb = tb_next;
1165 }
1166#if !defined(CONFIG_USER_ONLY)
1167 /* if no code remaining, no need to continue to use slow writes */
1168 if (!p->first_tb) {
1169 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001170 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001171 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001172 }
1173 }
1174#endif
1175#ifdef TARGET_HAS_PRECISE_SMC
1176 if (current_tb_modified) {
1177 /* we generate a block containing just the instruction
1178 modifying the memory. It will ensure that it cannot modify
1179 itself */
bellardea1c1802004-06-14 18:56:36 +00001180 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001181 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001182 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001183 }
1184#endif
1185}
1186
1187/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001188static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001189{
1190 PageDesc *p;
1191 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001192#if 0
bellarda4193c82004-06-03 14:01:43 +00001193 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001194 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1195 cpu_single_env->mem_io_vaddr, len,
1196 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001197 cpu_single_env->eip +
1198 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001199 }
1200#endif
bellard9fa3e852004-01-04 18:06:42 +00001201 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001202 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001203 return;
1204 if (p->code_bitmap) {
1205 offset = start & ~TARGET_PAGE_MASK;
1206 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1207 if (b & ((1 << len) - 1))
1208 goto do_invalidate;
1209 } else {
1210 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001211 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001212 }
1213}
1214
bellard9fa3e852004-01-04 18:06:42 +00001215#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001216static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001217 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001218{
aliguori6b917542008-11-18 19:46:41 +00001219 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001220 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001221 int n;
bellardd720b932004-04-25 17:57:43 +00001222#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001223 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001224 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001225 int current_tb_modified = 0;
1226 target_ulong current_pc = 0;
1227 target_ulong current_cs_base = 0;
1228 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001229#endif
bellard9fa3e852004-01-04 18:06:42 +00001230
1231 addr &= TARGET_PAGE_MASK;
1232 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001233 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001234 return;
1235 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001236#ifdef TARGET_HAS_PRECISE_SMC
1237 if (tb && pc != 0) {
1238 current_tb = tb_find_pc(pc);
1239 }
1240#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001241 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001242 n = (uintptr_t)tb & 3;
1243 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001244#ifdef TARGET_HAS_PRECISE_SMC
1245 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001246 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001247 /* If we are modifying the current TB, we must stop
1248 its execution. We could be more precise by checking
1249 that the modification is after the current PC, but it
1250 would require a specialized function to partially
1251 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001252
bellardd720b932004-04-25 17:57:43 +00001253 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001254 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001255 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1256 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001257 }
1258#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001259 tb_phys_invalidate(tb, addr);
1260 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001261 }
1262 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001263#ifdef TARGET_HAS_PRECISE_SMC
1264 if (current_tb_modified) {
1265 /* we generate a block containing just the instruction
1266 modifying the memory. It will ensure that it cannot modify
1267 itself */
bellardea1c1802004-06-14 18:56:36 +00001268 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001269 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001270 cpu_resume_from_signal(env, puc);
1271 }
1272#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001273}
bellard9fa3e852004-01-04 18:06:42 +00001274#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001275
1276/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001277static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001278 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001279{
1280 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001281#ifndef CONFIG_USER_ONLY
1282 bool page_already_protected;
1283#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001284
bellard9fa3e852004-01-04 18:06:42 +00001285 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001286 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001287 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001288#ifndef CONFIG_USER_ONLY
1289 page_already_protected = p->first_tb != NULL;
1290#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001291 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001292 invalidate_page_bitmap(p);
1293
bellard107db442004-06-22 18:48:46 +00001294#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001295
bellard9fa3e852004-01-04 18:06:42 +00001296#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001297 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001298 target_ulong addr;
1299 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001300 int prot;
1301
bellardfd6ce8f2003-05-14 19:00:11 +00001302 /* force the host page as non writable (writes will have a
1303 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001304 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001305 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001306 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1307 addr += TARGET_PAGE_SIZE) {
1308
1309 p2 = page_find (addr >> TARGET_PAGE_BITS);
1310 if (!p2)
1311 continue;
1312 prot |= p2->flags;
1313 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001314 }
ths5fafdf22007-09-16 21:08:06 +00001315 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001316 (prot & PAGE_BITS) & ~PAGE_WRITE);
1317#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001318 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001319 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001320#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001321 }
bellard9fa3e852004-01-04 18:06:42 +00001322#else
1323 /* if some code is already present, then the pages are already
1324 protected. So we handle the case where only the first TB is
1325 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001326 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001327 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001328 }
1329#endif
bellardd720b932004-04-25 17:57:43 +00001330
1331#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001332}
1333
bellard9fa3e852004-01-04 18:06:42 +00001334/* add a new TB and link it to the physical page tables. phys_page2 is
1335 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001336void tb_link_page(TranslationBlock *tb,
1337 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001338{
bellard9fa3e852004-01-04 18:06:42 +00001339 unsigned int h;
1340 TranslationBlock **ptb;
1341
pbrookc8a706f2008-06-02 16:16:42 +00001342 /* Grab the mmap lock to stop another thread invalidating this TB
1343 before we are done. */
1344 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001345 /* add in the physical hash table */
1346 h = tb_phys_hash_func(phys_pc);
1347 ptb = &tb_phys_hash[h];
1348 tb->phys_hash_next = *ptb;
1349 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001350
1351 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001352 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1353 if (phys_page2 != -1)
1354 tb_alloc_page(tb, 1, phys_page2);
1355 else
1356 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001357
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001358 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001359 tb->jmp_next[0] = NULL;
1360 tb->jmp_next[1] = NULL;
1361
1362 /* init original jump addresses */
1363 if (tb->tb_next_offset[0] != 0xffff)
1364 tb_reset_jump(tb, 0);
1365 if (tb->tb_next_offset[1] != 0xffff)
1366 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001367
1368#ifdef DEBUG_TB_CHECK
1369 tb_page_check();
1370#endif
pbrookc8a706f2008-06-02 16:16:42 +00001371 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001372}
1373
bellarda513fe12003-05-27 23:29:48 +00001374/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1375 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001376TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001377{
1378 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001379 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001380 TranslationBlock *tb;
1381
1382 if (nb_tbs <= 0)
1383 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001384 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1385 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001386 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001387 }
bellarda513fe12003-05-27 23:29:48 +00001388 /* binary search (cf Knuth) */
1389 m_min = 0;
1390 m_max = nb_tbs - 1;
1391 while (m_min <= m_max) {
1392 m = (m_min + m_max) >> 1;
1393 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001394 v = (uintptr_t)tb->tc_ptr;
bellarda513fe12003-05-27 23:29:48 +00001395 if (v == tc_ptr)
1396 return tb;
1397 else if (tc_ptr < v) {
1398 m_max = m - 1;
1399 } else {
1400 m_min = m + 1;
1401 }
ths5fafdf22007-09-16 21:08:06 +00001402 }
bellarda513fe12003-05-27 23:29:48 +00001403 return &tbs[m_max];
1404}
bellard75012672003-06-21 13:11:07 +00001405
bellardea041c02003-06-25 16:16:50 +00001406static void tb_reset_jump_recursive(TranslationBlock *tb);
1407
1408static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1409{
1410 TranslationBlock *tb1, *tb_next, **ptb;
1411 unsigned int n1;
1412
1413 tb1 = tb->jmp_next[n];
1414 if (tb1 != NULL) {
1415 /* find head of list */
1416 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001417 n1 = (uintptr_t)tb1 & 3;
1418 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001419 if (n1 == 2)
1420 break;
1421 tb1 = tb1->jmp_next[n1];
1422 }
1423 /* we are now sure now that tb jumps to tb1 */
1424 tb_next = tb1;
1425
1426 /* remove tb from the jmp_first list */
1427 ptb = &tb_next->jmp_first;
1428 for(;;) {
1429 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001430 n1 = (uintptr_t)tb1 & 3;
1431 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001432 if (n1 == n && tb1 == tb)
1433 break;
1434 ptb = &tb1->jmp_next[n1];
1435 }
1436 *ptb = tb->jmp_next[n];
1437 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001438
bellardea041c02003-06-25 16:16:50 +00001439 /* suppress the jump to next tb in generated code */
1440 tb_reset_jump(tb, n);
1441
bellard01243112004-01-04 15:48:17 +00001442 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001443 tb_reset_jump_recursive(tb_next);
1444 }
1445}
1446
1447static void tb_reset_jump_recursive(TranslationBlock *tb)
1448{
1449 tb_reset_jump_recursive2(tb, 0);
1450 tb_reset_jump_recursive2(tb, 1);
1451}
1452
bellard1fddef42005-04-17 19:16:13 +00001453#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001454#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001455static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001456{
1457 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1458}
1459#else
Max Filippov1e7855a2012-04-10 02:48:17 +04001460void tb_invalidate_phys_addr(target_phys_addr_t addr)
bellardd720b932004-04-25 17:57:43 +00001461{
Anthony Liguoric227f092009-10-01 16:12:16 -05001462 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001463 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001464
Avi Kivityac1970f2012-10-03 16:22:53 +02001465 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001466 if (!(memory_region_is_ram(section->mr)
1467 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001468 return;
1469 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001470 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001471 + memory_region_section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001472 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001473}
Max Filippov1e7855a2012-04-10 02:48:17 +04001474
1475static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1476{
Max Filippov9d70c4b2012-05-27 20:21:08 +04001477 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1478 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +04001479}
bellardc27004e2005-01-03 23:35:10 +00001480#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001481#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001482
Paul Brookc527ee82010-03-01 03:31:14 +00001483#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001484void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001485
1486{
1487}
1488
Andreas Färber9349b4f2012-03-14 01:38:32 +01001489int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001490 int flags, CPUWatchpoint **watchpoint)
1491{
1492 return -ENOSYS;
1493}
1494#else
pbrook6658ffb2007-03-16 23:58:11 +00001495/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001496int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001497 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001498{
aliguorib4051332008-11-18 20:14:20 +00001499 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001500 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001501
aliguorib4051332008-11-18 20:14:20 +00001502 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001503 if ((len & (len - 1)) || (addr & ~len_mask) ||
1504 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001505 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1506 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1507 return -EINVAL;
1508 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001509 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001510
aliguoria1d1bb32008-11-18 20:07:32 +00001511 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001512 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001513 wp->flags = flags;
1514
aliguori2dc9f412008-11-18 20:56:59 +00001515 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001516 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001517 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001518 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001519 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001520
pbrook6658ffb2007-03-16 23:58:11 +00001521 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001522
1523 if (watchpoint)
1524 *watchpoint = wp;
1525 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001526}
1527
aliguoria1d1bb32008-11-18 20:07:32 +00001528/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001529int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001530 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001531{
aliguorib4051332008-11-18 20:14:20 +00001532 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001533 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001534
Blue Swirl72cf2d42009-09-12 07:36:22 +00001535 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001536 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001537 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001538 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001539 return 0;
1540 }
1541 }
aliguoria1d1bb32008-11-18 20:07:32 +00001542 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001543}
1544
aliguoria1d1bb32008-11-18 20:07:32 +00001545/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001546void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001547{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001548 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001549
aliguoria1d1bb32008-11-18 20:07:32 +00001550 tlb_flush_page(env, watchpoint->vaddr);
1551
Anthony Liguori7267c092011-08-20 22:09:37 -05001552 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001553}
1554
aliguoria1d1bb32008-11-18 20:07:32 +00001555/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001556void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001557{
aliguoric0ce9982008-11-25 22:13:57 +00001558 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001559
Blue Swirl72cf2d42009-09-12 07:36:22 +00001560 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001561 if (wp->flags & mask)
1562 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001563 }
aliguoria1d1bb32008-11-18 20:07:32 +00001564}
Paul Brookc527ee82010-03-01 03:31:14 +00001565#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001566
1567/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001568int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001569 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001570{
bellard1fddef42005-04-17 19:16:13 +00001571#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001572 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001573
Anthony Liguori7267c092011-08-20 22:09:37 -05001574 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001575
1576 bp->pc = pc;
1577 bp->flags = flags;
1578
aliguori2dc9f412008-11-18 20:56:59 +00001579 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001580 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001581 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001582 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001583 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001584
1585 breakpoint_invalidate(env, pc);
1586
1587 if (breakpoint)
1588 *breakpoint = bp;
1589 return 0;
1590#else
1591 return -ENOSYS;
1592#endif
1593}
1594
1595/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001596int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001597{
1598#if defined(TARGET_HAS_ICE)
1599 CPUBreakpoint *bp;
1600
Blue Swirl72cf2d42009-09-12 07:36:22 +00001601 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001602 if (bp->pc == pc && bp->flags == flags) {
1603 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001604 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001605 }
bellard4c3a88a2003-07-26 12:06:08 +00001606 }
aliguoria1d1bb32008-11-18 20:07:32 +00001607 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001608#else
aliguoria1d1bb32008-11-18 20:07:32 +00001609 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001610#endif
1611}
1612
aliguoria1d1bb32008-11-18 20:07:32 +00001613/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001614void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001615{
bellard1fddef42005-04-17 19:16:13 +00001616#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001617 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001618
aliguoria1d1bb32008-11-18 20:07:32 +00001619 breakpoint_invalidate(env, breakpoint->pc);
1620
Anthony Liguori7267c092011-08-20 22:09:37 -05001621 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001622#endif
1623}
1624
1625/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001626void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001627{
1628#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001629 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001630
Blue Swirl72cf2d42009-09-12 07:36:22 +00001631 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001632 if (bp->flags & mask)
1633 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001634 }
bellard4c3a88a2003-07-26 12:06:08 +00001635#endif
1636}
1637
bellardc33a3462003-07-29 20:50:33 +00001638/* enable or disable single step mode. EXCP_DEBUG is returned by the
1639 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001640void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001641{
bellard1fddef42005-04-17 19:16:13 +00001642#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001643 if (env->singlestep_enabled != enabled) {
1644 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001645 if (kvm_enabled())
1646 kvm_update_guest_debug(env, 0);
1647 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001648 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001649 /* XXX: only flush what is necessary */
1650 tb_flush(env);
1651 }
bellardc33a3462003-07-29 20:50:33 +00001652 }
1653#endif
1654}
1655
Andreas Färber9349b4f2012-03-14 01:38:32 +01001656static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001657{
pbrookd5975362008-06-07 20:50:51 +00001658 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1659 problem and hope the cpu will stop of its own accord. For userspace
1660 emulation this often isn't actually as bad as it sounds. Often
1661 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001662 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001663 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001664
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001665 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001666 tb = env->current_tb;
1667 /* if the cpu is currently executing code, we must unlink it and
1668 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001669 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001670 env->current_tb = NULL;
1671 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001672 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001673 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001674}
1675
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001676#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001677/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001678static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001679{
1680 int old_mask;
1681
1682 old_mask = env->interrupt_request;
1683 env->interrupt_request |= mask;
1684
aliguori8edac962009-04-24 18:03:45 +00001685 /*
1686 * If called from iothread context, wake the target cpu in
1687 * case its halted.
1688 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001689 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001690 qemu_cpu_kick(env);
1691 return;
1692 }
aliguori8edac962009-04-24 18:03:45 +00001693
pbrook2e70f6e2008-06-29 01:03:05 +00001694 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001695 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001696 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001697 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001698 cpu_abort(env, "Raised interrupt while not in I/O function");
1699 }
pbrook2e70f6e2008-06-29 01:03:05 +00001700 } else {
aurel323098dba2009-03-07 21:28:24 +00001701 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001702 }
1703}
1704
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001705CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1706
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001707#else /* CONFIG_USER_ONLY */
1708
Andreas Färber9349b4f2012-03-14 01:38:32 +01001709void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001710{
1711 env->interrupt_request |= mask;
1712 cpu_unlink_tb(env);
1713}
1714#endif /* CONFIG_USER_ONLY */
1715
Andreas Färber9349b4f2012-03-14 01:38:32 +01001716void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001717{
1718 env->interrupt_request &= ~mask;
1719}
1720
Andreas Färber9349b4f2012-03-14 01:38:32 +01001721void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001722{
1723 env->exit_request = 1;
1724 cpu_unlink_tb(env);
1725}
1726
Andreas Färber9349b4f2012-03-14 01:38:32 +01001727void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001728{
1729 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001730 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001731
1732 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001733 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001734 fprintf(stderr, "qemu: fatal: ");
1735 vfprintf(stderr, fmt, ap);
1736 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001737 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +00001738 if (qemu_log_enabled()) {
1739 qemu_log("qemu: fatal: ");
1740 qemu_log_vprintf(fmt, ap2);
1741 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001742 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +00001743 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001744 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001745 }
pbrook493ae1f2007-11-23 16:53:59 +00001746 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001747 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001748#if defined(CONFIG_USER_ONLY)
1749 {
1750 struct sigaction act;
1751 sigfillset(&act.sa_mask);
1752 act.sa_handler = SIG_DFL;
1753 sigaction(SIGABRT, &act, NULL);
1754 }
1755#endif
bellard75012672003-06-21 13:11:07 +00001756 abort();
1757}
1758
Andreas Färber9349b4f2012-03-14 01:38:32 +01001759CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001760{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001761 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1762 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001763 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001764#if defined(TARGET_HAS_ICE)
1765 CPUBreakpoint *bp;
1766 CPUWatchpoint *wp;
1767#endif
1768
Andreas Färber9349b4f2012-03-14 01:38:32 +01001769 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001770
1771 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001772 new_env->next_cpu = next_cpu;
1773 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001774
1775 /* Clone all break/watchpoints.
1776 Note: Once we support ptrace with hw-debug register access, make sure
1777 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001778 QTAILQ_INIT(&env->breakpoints);
1779 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001780#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001781 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001782 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1783 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001784 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001785 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1786 wp->flags, NULL);
1787 }
1788#endif
1789
thsc5be9f02007-02-28 20:20:53 +00001790 return new_env;
1791}
1792
bellard01243112004-01-04 15:48:17 +00001793#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001794void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001795{
1796 unsigned int i;
1797
1798 /* Discard jump cache entries for any tb which might potentially
1799 overlap the flushed page. */
1800 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1801 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001802 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001803
1804 i = tb_jmp_cache_hash_page(addr);
1805 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001806 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001807}
1808
Juan Quintelad24981d2012-05-22 00:42:40 +02001809static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1810 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +00001811{
Juan Quintelad24981d2012-05-22 00:42:40 +02001812 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +00001813
bellard1ccde1c2004-02-06 19:46:14 +00001814 /* we modify the TLB cache so that the dirty bit will be set again
1815 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001816 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001817 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001818 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001819 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001820 != (end - 1) - start) {
1821 abort();
1822 }
Blue Swirle5548612012-04-21 13:08:33 +00001823 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001824
1825}
1826
1827/* Note: start and end must be within the same ram block. */
1828void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1829 int dirty_flags)
1830{
1831 uintptr_t length;
1832
1833 start &= TARGET_PAGE_MASK;
1834 end = TARGET_PAGE_ALIGN(end);
1835
1836 length = end - start;
1837 if (length == 0)
1838 return;
1839 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1840
1841 if (tcg_enabled()) {
1842 tlb_reset_dirty_range_all(start, end, length);
1843 }
bellard1ccde1c2004-02-06 19:46:14 +00001844}
1845
aliguori74576192008-10-06 14:02:03 +00001846int cpu_physical_memory_set_dirty_tracking(int enable)
1847{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001848 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001849 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001850 return ret;
aliguori74576192008-10-06 14:02:03 +00001851}
1852
Blue Swirle5548612012-04-21 13:08:33 +00001853target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1854 MemoryRegionSection *section,
1855 target_ulong vaddr,
1856 target_phys_addr_t paddr,
1857 int prot,
1858 target_ulong *address)
1859{
1860 target_phys_addr_t iotlb;
1861 CPUWatchpoint *wp;
1862
Blue Swirlcc5bea62012-04-14 14:56:48 +00001863 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001864 /* Normal RAM. */
1865 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001866 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001867 if (!section->readonly) {
1868 iotlb |= phys_section_notdirty;
1869 } else {
1870 iotlb |= phys_section_rom;
1871 }
1872 } else {
1873 /* IO handlers are currently passed a physical address.
1874 It would be nice to pass an offset from the base address
1875 of that region. This would avoid having to special case RAM,
1876 and avoid full address decoding in every device.
1877 We can't use the high bits of pd for this because
1878 IO_MEM_ROMD uses these as a ram address. */
1879 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001880 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001881 }
1882
1883 /* Make accesses to pages with watchpoints go via the
1884 watchpoint trap routines. */
1885 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1886 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1887 /* Avoid trapping reads of pages with a write breakpoint. */
1888 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1889 iotlb = phys_section_watch + paddr;
1890 *address |= TLB_MMIO;
1891 break;
1892 }
1893 }
1894 }
1895
1896 return iotlb;
1897}
1898
bellard01243112004-01-04 15:48:17 +00001899#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001900/*
1901 * Walks guest process memory "regions" one by one
1902 * and calls callback function 'fn' for each region.
1903 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001904
1905struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00001906{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001907 walk_memory_regions_fn fn;
1908 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001909 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001910 int prot;
1911};
bellard9fa3e852004-01-04 18:06:42 +00001912
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001913static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001914 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001915{
1916 if (data->start != -1ul) {
1917 int rc = data->fn(data->priv, data->start, end, data->prot);
1918 if (rc != 0) {
1919 return rc;
bellard9fa3e852004-01-04 18:06:42 +00001920 }
bellard33417e72003-08-10 21:47:01 +00001921 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001922
1923 data->start = (new_prot ? end : -1ul);
1924 data->prot = new_prot;
1925
1926 return 0;
1927}
1928
1929static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001930 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001931{
Paul Brookb480d9b2010-03-12 23:23:29 +00001932 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001933 int i, rc;
1934
1935 if (*lp == NULL) {
1936 return walk_memory_regions_end(data, base, 0);
1937 }
1938
1939 if (level == 0) {
1940 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001941 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001942 int prot = pd[i].flags;
1943
1944 pa = base | (i << TARGET_PAGE_BITS);
1945 if (prot != data->prot) {
1946 rc = walk_memory_regions_end(data, pa, prot);
1947 if (rc != 0) {
1948 return rc;
1949 }
1950 }
1951 }
1952 } else {
1953 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001954 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001955 pa = base | ((abi_ulong)i <<
1956 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001957 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1958 if (rc != 0) {
1959 return rc;
1960 }
1961 }
1962 }
1963
1964 return 0;
1965}
1966
1967int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1968{
1969 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001970 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001971
1972 data.fn = fn;
1973 data.priv = priv;
1974 data.start = -1ul;
1975 data.prot = 0;
1976
1977 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001978 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001979 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1980 if (rc != 0) {
1981 return rc;
1982 }
1983 }
1984
1985 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001986}
1987
Paul Brookb480d9b2010-03-12 23:23:29 +00001988static int dump_region(void *priv, abi_ulong start,
1989 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001990{
1991 FILE *f = (FILE *)priv;
1992
Paul Brookb480d9b2010-03-12 23:23:29 +00001993 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
1994 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001995 start, end, end - start,
1996 ((prot & PAGE_READ) ? 'r' : '-'),
1997 ((prot & PAGE_WRITE) ? 'w' : '-'),
1998 ((prot & PAGE_EXEC) ? 'x' : '-'));
1999
2000 return (0);
2001}
2002
2003/* dump memory mappings */
2004void page_dump(FILE *f)
2005{
2006 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2007 "start", "end", "size", "prot");
2008 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002009}
2010
pbrook53a59602006-03-25 19:31:22 +00002011int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002012{
bellard9fa3e852004-01-04 18:06:42 +00002013 PageDesc *p;
2014
2015 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002016 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002017 return 0;
2018 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002019}
2020
Richard Henderson376a7902010-03-10 15:57:04 -08002021/* Modify the flags of a page and invalidate the code if necessary.
2022 The flag PAGE_WRITE_ORG is positioned automatically depending
2023 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002024void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002025{
Richard Henderson376a7902010-03-10 15:57:04 -08002026 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002027
Richard Henderson376a7902010-03-10 15:57:04 -08002028 /* This function should never be called with addresses outside the
2029 guest address space. If this assert fires, it probably indicates
2030 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002031#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2032 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002033#endif
2034 assert(start < end);
2035
bellard9fa3e852004-01-04 18:06:42 +00002036 start = start & TARGET_PAGE_MASK;
2037 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002038
2039 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002040 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002041 }
2042
2043 for (addr = start, len = end - start;
2044 len != 0;
2045 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2046 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2047
2048 /* If the write protection bit is set, then we invalidate
2049 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002050 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002051 (flags & PAGE_WRITE) &&
2052 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002053 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002054 }
2055 p->flags = flags;
2056 }
bellard9fa3e852004-01-04 18:06:42 +00002057}
2058
ths3d97b402007-11-02 19:02:07 +00002059int page_check_range(target_ulong start, target_ulong len, int flags)
2060{
2061 PageDesc *p;
2062 target_ulong end;
2063 target_ulong addr;
2064
Richard Henderson376a7902010-03-10 15:57:04 -08002065 /* This function should never be called with addresses outside the
2066 guest address space. If this assert fires, it probably indicates
2067 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002068#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2069 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002070#endif
2071
Richard Henderson3e0650a2010-03-29 10:54:42 -07002072 if (len == 0) {
2073 return 0;
2074 }
Richard Henderson376a7902010-03-10 15:57:04 -08002075 if (start + len - 1 < start) {
2076 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002077 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002078 }
balrog55f280c2008-10-28 10:24:11 +00002079
ths3d97b402007-11-02 19:02:07 +00002080 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2081 start = start & TARGET_PAGE_MASK;
2082
Richard Henderson376a7902010-03-10 15:57:04 -08002083 for (addr = start, len = end - start;
2084 len != 0;
2085 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002086 p = page_find(addr >> TARGET_PAGE_BITS);
2087 if( !p )
2088 return -1;
2089 if( !(p->flags & PAGE_VALID) )
2090 return -1;
2091
bellarddae32702007-11-14 10:51:00 +00002092 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002093 return -1;
bellarddae32702007-11-14 10:51:00 +00002094 if (flags & PAGE_WRITE) {
2095 if (!(p->flags & PAGE_WRITE_ORG))
2096 return -1;
2097 /* unprotect the page if it was put read-only because it
2098 contains translated code */
2099 if (!(p->flags & PAGE_WRITE)) {
2100 if (!page_unprotect(addr, 0, NULL))
2101 return -1;
2102 }
2103 return 0;
2104 }
ths3d97b402007-11-02 19:02:07 +00002105 }
2106 return 0;
2107}
2108
bellard9fa3e852004-01-04 18:06:42 +00002109/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002110 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002111int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002112{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002113 unsigned int prot;
2114 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002115 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002116
pbrookc8a706f2008-06-02 16:16:42 +00002117 /* Technically this isn't safe inside a signal handler. However we
2118 know this only ever happens in a synchronous SEGV handler, so in
2119 practice it seems to be ok. */
2120 mmap_lock();
2121
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002122 p = page_find(address >> TARGET_PAGE_BITS);
2123 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002124 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002125 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002126 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002127
bellard9fa3e852004-01-04 18:06:42 +00002128 /* if the page was really writable, then we change its
2129 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002130 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2131 host_start = address & qemu_host_page_mask;
2132 host_end = host_start + qemu_host_page_size;
2133
2134 prot = 0;
2135 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2136 p = page_find(addr >> TARGET_PAGE_BITS);
2137 p->flags |= PAGE_WRITE;
2138 prot |= p->flags;
2139
bellard9fa3e852004-01-04 18:06:42 +00002140 /* and since the content will be modified, we must invalidate
2141 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002142 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002143#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002144 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002145#endif
bellard9fa3e852004-01-04 18:06:42 +00002146 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002147 mprotect((void *)g2h(host_start), qemu_host_page_size,
2148 prot & PAGE_BITS);
2149
2150 mmap_unlock();
2151 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002152 }
pbrookc8a706f2008-06-02 16:16:42 +00002153 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002154 return 0;
2155}
bellard9fa3e852004-01-04 18:06:42 +00002156#endif /* defined(CONFIG_USER_ONLY) */
2157
pbrooke2eef172008-06-08 01:09:01 +00002158#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002159
Paul Brookc04b2b72010-03-01 03:31:14 +00002160#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2161typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002162 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002163 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002164 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002165} subpage_t;
2166
Anthony Liguoric227f092009-10-01 16:12:16 -05002167static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002168 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002169static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002170static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002171{
Avi Kivity5312bd82012-02-12 18:32:55 +02002172 MemoryRegionSection *section = &phys_sections[section_index];
2173 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002174
2175 if (mr->subpage) {
2176 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2177 memory_region_destroy(&subpage->iomem);
2178 g_free(subpage);
2179 }
2180}
2181
Avi Kivity4346ae32012-02-10 17:00:01 +02002182static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002183{
2184 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002185 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002186
Avi Kivityc19e8802012-02-13 20:25:31 +02002187 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002188 return;
2189 }
2190
Avi Kivityc19e8802012-02-13 20:25:31 +02002191 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002192 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002193 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002194 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002195 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002196 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002197 }
Avi Kivity54688b12012-02-09 17:34:32 +02002198 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002199 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002200 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002201}
2202
Avi Kivityac1970f2012-10-03 16:22:53 +02002203static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +02002204{
Avi Kivityac1970f2012-10-03 16:22:53 +02002205 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002206 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002207}
2208
Avi Kivity5312bd82012-02-12 18:32:55 +02002209static uint16_t phys_section_add(MemoryRegionSection *section)
2210{
2211 if (phys_sections_nb == phys_sections_nb_alloc) {
2212 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2213 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2214 phys_sections_nb_alloc);
2215 }
2216 phys_sections[phys_sections_nb] = *section;
2217 return phys_sections_nb++;
2218}
2219
2220static void phys_sections_clear(void)
2221{
2222 phys_sections_nb = 0;
2223}
2224
Avi Kivityac1970f2012-10-03 16:22:53 +02002225static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02002226{
2227 subpage_t *subpage;
2228 target_phys_addr_t base = section->offset_within_address_space
2229 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +02002230 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002231 MemoryRegionSection subsection = {
2232 .offset_within_address_space = base,
2233 .size = TARGET_PAGE_SIZE,
2234 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002235 target_phys_addr_t start, end;
2236
Avi Kivityf3705d52012-03-08 16:16:34 +02002237 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002238
Avi Kivityf3705d52012-03-08 16:16:34 +02002239 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002240 subpage = subpage_init(base);
2241 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02002242 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +02002243 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002244 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002245 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002246 }
2247 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -04002248 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +02002249 subpage_register(subpage, start, end, phys_section_add(section));
2250}
2251
2252
Avi Kivityac1970f2012-10-03 16:22:53 +02002253static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002254{
Avi Kivitydd811242012-01-02 12:17:03 +02002255 target_phys_addr_t start_addr = section->offset_within_address_space;
2256 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002257 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002258 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002259
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002260 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002261
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002262 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +02002263 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +02002264 section_index);
bellard33417e72003-08-10 21:47:01 +00002265}
2266
Avi Kivityac1970f2012-10-03 16:22:53 +02002267static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02002268{
Avi Kivityac1970f2012-10-03 16:22:53 +02002269 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002270 MemoryRegionSection now = *section, remain = *section;
2271
2272 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2273 || (now.size < TARGET_PAGE_SIZE)) {
2274 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2275 - now.offset_within_address_space,
2276 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02002277 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002278 remain.size -= now.size;
2279 remain.offset_within_address_space += now.size;
2280 remain.offset_within_region += now.size;
2281 }
Tyler Hall69b67642012-07-25 18:45:04 -04002282 while (remain.size >= TARGET_PAGE_SIZE) {
2283 now = remain;
2284 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
2285 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +02002286 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04002287 } else {
2288 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +02002289 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04002290 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02002291 remain.size -= now.size;
2292 remain.offset_within_address_space += now.size;
2293 remain.offset_within_region += now.size;
2294 }
2295 now = remain;
2296 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002297 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002298 }
2299}
2300
Sheng Yang62a27442010-01-26 19:21:16 +08002301void qemu_flush_coalesced_mmio_buffer(void)
2302{
2303 if (kvm_enabled())
2304 kvm_flush_coalesced_mmio_buffer();
2305}
2306
Marcelo Tosattic9027602010-03-01 20:25:08 -03002307#if defined(__linux__) && !defined(TARGET_S390X)
2308
2309#include <sys/vfs.h>
2310
2311#define HUGETLBFS_MAGIC 0x958458f6
2312
2313static long gethugepagesize(const char *path)
2314{
2315 struct statfs fs;
2316 int ret;
2317
2318 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002319 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002320 } while (ret != 0 && errno == EINTR);
2321
2322 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002323 perror(path);
2324 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002325 }
2326
2327 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002328 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002329
2330 return fs.f_bsize;
2331}
2332
Alex Williamson04b16652010-07-02 11:13:17 -06002333static void *file_ram_alloc(RAMBlock *block,
2334 ram_addr_t memory,
2335 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002336{
2337 char *filename;
2338 void *area;
2339 int fd;
2340#ifdef MAP_POPULATE
2341 int flags;
2342#endif
2343 unsigned long hpagesize;
2344
2345 hpagesize = gethugepagesize(path);
2346 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002347 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002348 }
2349
2350 if (memory < hpagesize) {
2351 return NULL;
2352 }
2353
2354 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2355 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2356 return NULL;
2357 }
2358
2359 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002360 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002361 }
2362
2363 fd = mkstemp(filename);
2364 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002365 perror("unable to create backing store for hugepages");
2366 free(filename);
2367 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002368 }
2369 unlink(filename);
2370 free(filename);
2371
2372 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2373
2374 /*
2375 * ftruncate is not supported by hugetlbfs in older
2376 * hosts, so don't bother bailing out on errors.
2377 * If anything goes wrong with it under other filesystems,
2378 * mmap will fail.
2379 */
2380 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002381 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002382
2383#ifdef MAP_POPULATE
2384 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2385 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2386 * to sidestep this quirk.
2387 */
2388 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2389 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2390#else
2391 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2392#endif
2393 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002394 perror("file_ram_alloc: can't mmap RAM pages");
2395 close(fd);
2396 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002397 }
Alex Williamson04b16652010-07-02 11:13:17 -06002398 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002399 return area;
2400}
2401#endif
2402
Alex Williamsond17b5282010-06-25 11:08:38 -06002403static ram_addr_t find_ram_offset(ram_addr_t size)
2404{
Alex Williamson04b16652010-07-02 11:13:17 -06002405 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002406 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002407
2408 if (QLIST_EMPTY(&ram_list.blocks))
2409 return 0;
2410
2411 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002412 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002413
2414 end = block->offset + block->length;
2415
2416 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2417 if (next_block->offset >= end) {
2418 next = MIN(next, next_block->offset);
2419 }
2420 }
2421 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002422 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002423 mingap = next - end;
2424 }
2425 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002426
2427 if (offset == RAM_ADDR_MAX) {
2428 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2429 (uint64_t)size);
2430 abort();
2431 }
2432
Alex Williamson04b16652010-07-02 11:13:17 -06002433 return offset;
2434}
2435
2436static ram_addr_t last_ram_offset(void)
2437{
Alex Williamsond17b5282010-06-25 11:08:38 -06002438 RAMBlock *block;
2439 ram_addr_t last = 0;
2440
2441 QLIST_FOREACH(block, &ram_list.blocks, next)
2442 last = MAX(last, block->offset + block->length);
2443
2444 return last;
2445}
2446
Jason Baronddb97f12012-08-02 15:44:16 -04002447static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2448{
2449 int ret;
2450 QemuOpts *machine_opts;
2451
2452 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2453 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2454 if (machine_opts &&
2455 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2456 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2457 if (ret) {
2458 perror("qemu_madvise");
2459 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2460 "but dump_guest_core=off specified\n");
2461 }
2462 }
2463}
2464
Avi Kivityc5705a72011-12-20 15:59:12 +02002465void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002466{
2467 RAMBlock *new_block, *block;
2468
Avi Kivityc5705a72011-12-20 15:59:12 +02002469 new_block = NULL;
2470 QLIST_FOREACH(block, &ram_list.blocks, next) {
2471 if (block->offset == addr) {
2472 new_block = block;
2473 break;
2474 }
2475 }
2476 assert(new_block);
2477 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002478
Anthony Liguori09e5ab62012-02-03 12:28:43 -06002479 if (dev) {
2480 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002481 if (id) {
2482 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002483 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002484 }
2485 }
2486 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2487
2488 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002489 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002490 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2491 new_block->idstr);
2492 abort();
2493 }
2494 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002495}
2496
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002497static int memory_try_enable_merging(void *addr, size_t len)
2498{
2499 QemuOpts *opts;
2500
2501 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2502 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
2503 /* disabled by the user */
2504 return 0;
2505 }
2506
2507 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
2508}
2509
Avi Kivityc5705a72011-12-20 15:59:12 +02002510ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2511 MemoryRegion *mr)
2512{
2513 RAMBlock *new_block;
2514
2515 size = TARGET_PAGE_ALIGN(size);
2516 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002517
Avi Kivity7c637362011-12-21 13:09:49 +02002518 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002519 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002520 if (host) {
2521 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002522 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002523 } else {
2524 if (mem_path) {
2525#if defined (__linux__) && !defined(TARGET_S390X)
2526 new_block->host = file_ram_alloc(new_block, size, mem_path);
2527 if (!new_block->host) {
2528 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002529 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002530 }
2531#else
2532 fprintf(stderr, "-mem-path option unsupported\n");
2533 exit(1);
2534#endif
2535 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02002536 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002537 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00002538 } else if (kvm_enabled()) {
2539 /* some s390/kvm configurations have special constraints */
2540 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01002541 } else {
2542 new_block->host = qemu_vmalloc(size);
2543 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002544 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002545 }
2546 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002547 new_block->length = size;
2548
2549 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2550
Anthony Liguori7267c092011-08-20 22:09:37 -05002551 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002552 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04002553 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2554 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02002555 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002556
Jason Baronddb97f12012-08-02 15:44:16 -04002557 qemu_ram_setup_dump(new_block->host, size);
2558
Cam Macdonell84b89d72010-07-26 18:10:57 -06002559 if (kvm_enabled())
2560 kvm_setup_guest_memory(new_block->host, size);
2561
2562 return new_block->offset;
2563}
2564
Avi Kivityc5705a72011-12-20 15:59:12 +02002565ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002566{
Avi Kivityc5705a72011-12-20 15:59:12 +02002567 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002568}
bellarde9a1ab12007-02-08 23:08:38 +00002569
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002570void qemu_ram_free_from_ptr(ram_addr_t addr)
2571{
2572 RAMBlock *block;
2573
2574 QLIST_FOREACH(block, &ram_list.blocks, next) {
2575 if (addr == block->offset) {
2576 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002577 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002578 return;
2579 }
2580 }
2581}
2582
Anthony Liguoric227f092009-10-01 16:12:16 -05002583void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002584{
Alex Williamson04b16652010-07-02 11:13:17 -06002585 RAMBlock *block;
2586
2587 QLIST_FOREACH(block, &ram_list.blocks, next) {
2588 if (addr == block->offset) {
2589 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002590 if (block->flags & RAM_PREALLOC_MASK) {
2591 ;
2592 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002593#if defined (__linux__) && !defined(TARGET_S390X)
2594 if (block->fd) {
2595 munmap(block->host, block->length);
2596 close(block->fd);
2597 } else {
2598 qemu_vfree(block->host);
2599 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002600#else
2601 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002602#endif
2603 } else {
2604#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2605 munmap(block->host, block->length);
2606#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002607 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002608 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002609 } else {
2610 qemu_vfree(block->host);
2611 }
Alex Williamson04b16652010-07-02 11:13:17 -06002612#endif
2613 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002614 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002615 return;
2616 }
2617 }
2618
bellarde9a1ab12007-02-08 23:08:38 +00002619}
2620
Huang Yingcd19cfa2011-03-02 08:56:19 +01002621#ifndef _WIN32
2622void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2623{
2624 RAMBlock *block;
2625 ram_addr_t offset;
2626 int flags;
2627 void *area, *vaddr;
2628
2629 QLIST_FOREACH(block, &ram_list.blocks, next) {
2630 offset = addr - block->offset;
2631 if (offset < block->length) {
2632 vaddr = block->host + offset;
2633 if (block->flags & RAM_PREALLOC_MASK) {
2634 ;
2635 } else {
2636 flags = MAP_FIXED;
2637 munmap(vaddr, length);
2638 if (mem_path) {
2639#if defined(__linux__) && !defined(TARGET_S390X)
2640 if (block->fd) {
2641#ifdef MAP_POPULATE
2642 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2643 MAP_PRIVATE;
2644#else
2645 flags |= MAP_PRIVATE;
2646#endif
2647 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2648 flags, block->fd, offset);
2649 } else {
2650 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2651 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2652 flags, -1, 0);
2653 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002654#else
2655 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002656#endif
2657 } else {
2658#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2659 flags |= MAP_SHARED | MAP_ANONYMOUS;
2660 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2661 flags, -1, 0);
2662#else
2663 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2664 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2665 flags, -1, 0);
2666#endif
2667 }
2668 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002669 fprintf(stderr, "Could not remap addr: "
2670 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002671 length, addr);
2672 exit(1);
2673 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002674 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04002675 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002676 }
2677 return;
2678 }
2679 }
2680}
2681#endif /* !_WIN32 */
2682
pbrookdc828ca2009-04-09 22:21:07 +00002683/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002684 With the exception of the softmmu code in this file, this should
2685 only be used for local memory (e.g. video ram) that the device owns,
2686 and knows it isn't going to access beyond the end of the block.
2687
2688 It should not be used for general purpose DMA.
2689 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2690 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002691void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002692{
pbrook94a6b542009-04-11 17:15:54 +00002693 RAMBlock *block;
2694
Alex Williamsonf471a172010-06-11 11:11:42 -06002695 QLIST_FOREACH(block, &ram_list.blocks, next) {
2696 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002697 /* Move this entry to to start of the list. */
2698 if (block != QLIST_FIRST(&ram_list.blocks)) {
2699 QLIST_REMOVE(block, next);
2700 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2701 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002702 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002703 /* We need to check if the requested address is in the RAM
2704 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002705 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002706 */
2707 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002708 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002709 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002710 block->host =
2711 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002712 }
2713 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002714 return block->host + (addr - block->offset);
2715 }
pbrook94a6b542009-04-11 17:15:54 +00002716 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002717
2718 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2719 abort();
2720
2721 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002722}
2723
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002724/* Return a host pointer to ram allocated with qemu_ram_alloc.
2725 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2726 */
2727void *qemu_safe_ram_ptr(ram_addr_t addr)
2728{
2729 RAMBlock *block;
2730
2731 QLIST_FOREACH(block, &ram_list.blocks, next) {
2732 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002733 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002734 /* We need to check if the requested address is in the RAM
2735 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002736 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002737 */
2738 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002739 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002740 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002741 block->host =
2742 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002743 }
2744 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002745 return block->host + (addr - block->offset);
2746 }
2747 }
2748
2749 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2750 abort();
2751
2752 return NULL;
2753}
2754
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002755/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2756 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002757void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002758{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002759 if (*size == 0) {
2760 return NULL;
2761 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002762 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002763 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002764 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002765 RAMBlock *block;
2766
2767 QLIST_FOREACH(block, &ram_list.blocks, next) {
2768 if (addr - block->offset < block->length) {
2769 if (addr - block->offset + *size > block->length)
2770 *size = block->length - addr + block->offset;
2771 return block->host + (addr - block->offset);
2772 }
2773 }
2774
2775 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2776 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002777 }
2778}
2779
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002780void qemu_put_ram_ptr(void *addr)
2781{
2782 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002783}
2784
Marcelo Tosattie8902612010-10-11 15:31:19 -03002785int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002786{
pbrook94a6b542009-04-11 17:15:54 +00002787 RAMBlock *block;
2788 uint8_t *host = ptr;
2789
Jan Kiszka868bb332011-06-21 22:59:09 +02002790 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002791 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002792 return 0;
2793 }
2794
Alex Williamsonf471a172010-06-11 11:11:42 -06002795 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002796 /* This case append when the block is not mapped. */
2797 if (block->host == NULL) {
2798 continue;
2799 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002800 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002801 *ram_addr = block->offset + (host - block->host);
2802 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002803 }
pbrook94a6b542009-04-11 17:15:54 +00002804 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002805
Marcelo Tosattie8902612010-10-11 15:31:19 -03002806 return -1;
2807}
Alex Williamsonf471a172010-06-11 11:11:42 -06002808
Marcelo Tosattie8902612010-10-11 15:31:19 -03002809/* Some of the softmmu routines need to translate from a host pointer
2810 (typically a TLB entry) back to a ram offset. */
2811ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2812{
2813 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002814
Marcelo Tosattie8902612010-10-11 15:31:19 -03002815 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2816 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2817 abort();
2818 }
2819 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002820}
2821
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002822static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2823 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002824{
pbrook67d3b952006-12-18 05:03:52 +00002825#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002826 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002827#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002828#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002829 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002830#endif
2831 return 0;
2832}
2833
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002834static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2835 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002836{
2837#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002838 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002839#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002840#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002841 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002842#endif
2843}
2844
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002845static const MemoryRegionOps unassigned_mem_ops = {
2846 .read = unassigned_mem_read,
2847 .write = unassigned_mem_write,
2848 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002849};
2850
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002851static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2852 unsigned size)
2853{
2854 abort();
2855}
2856
2857static void error_mem_write(void *opaque, target_phys_addr_t addr,
2858 uint64_t value, unsigned size)
2859{
2860 abort();
2861}
2862
2863static const MemoryRegionOps error_mem_ops = {
2864 .read = error_mem_read,
2865 .write = error_mem_write,
2866 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002867};
2868
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002869static const MemoryRegionOps rom_mem_ops = {
2870 .read = error_mem_read,
2871 .write = unassigned_mem_write,
2872 .endianness = DEVICE_NATIVE_ENDIAN,
2873};
2874
2875static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2876 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002877{
bellard3a7d9292005-08-21 09:26:42 +00002878 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002879 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002880 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2881#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002882 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002883 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002884#endif
2885 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002886 switch (size) {
2887 case 1:
2888 stb_p(qemu_get_ram_ptr(ram_addr), val);
2889 break;
2890 case 2:
2891 stw_p(qemu_get_ram_ptr(ram_addr), val);
2892 break;
2893 case 4:
2894 stl_p(qemu_get_ram_ptr(ram_addr), val);
2895 break;
2896 default:
2897 abort();
2898 }
bellardf23db162005-08-21 19:12:28 +00002899 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002900 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002901 /* we remove the notdirty callback only if the code has been
2902 flushed */
2903 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002904 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002905}
2906
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002907static const MemoryRegionOps notdirty_mem_ops = {
2908 .read = error_mem_read,
2909 .write = notdirty_mem_write,
2910 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002911};
2912
pbrook0f459d12008-06-09 00:20:13 +00002913/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002914static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002915{
Andreas Färber9349b4f2012-03-14 01:38:32 +01002916 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002917 target_ulong pc, cs_base;
2918 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002919 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002920 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002921 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002922
aliguori06d55cc2008-11-18 20:24:06 +00002923 if (env->watchpoint_hit) {
2924 /* We re-entered the check after replacing the TB. Now raise
2925 * the debug interrupt so that is will trigger after the
2926 * current instruction. */
2927 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2928 return;
2929 }
pbrook2e70f6e2008-06-29 01:03:05 +00002930 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002931 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002932 if ((vaddr == (wp->vaddr & len_mask) ||
2933 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002934 wp->flags |= BP_WATCHPOINT_HIT;
2935 if (!env->watchpoint_hit) {
2936 env->watchpoint_hit = wp;
2937 tb = tb_find_pc(env->mem_io_pc);
2938 if (!tb) {
2939 cpu_abort(env, "check_watchpoint: could not find TB for "
2940 "pc=%p", (void *)env->mem_io_pc);
2941 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00002942 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00002943 tb_phys_invalidate(tb, -1);
2944 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2945 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04002946 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00002947 } else {
2948 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2949 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04002950 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002951 }
aliguori06d55cc2008-11-18 20:24:06 +00002952 }
aliguori6e140f22008-11-18 20:37:55 +00002953 } else {
2954 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002955 }
2956 }
2957}
2958
pbrook6658ffb2007-03-16 23:58:11 +00002959/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2960 so these check for a hit then pass through to the normal out-of-line
2961 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02002962static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
2963 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00002964{
Avi Kivity1ec9b902012-01-02 12:47:48 +02002965 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
2966 switch (size) {
2967 case 1: return ldub_phys(addr);
2968 case 2: return lduw_phys(addr);
2969 case 4: return ldl_phys(addr);
2970 default: abort();
2971 }
pbrook6658ffb2007-03-16 23:58:11 +00002972}
2973
Avi Kivity1ec9b902012-01-02 12:47:48 +02002974static void watch_mem_write(void *opaque, target_phys_addr_t addr,
2975 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00002976{
Avi Kivity1ec9b902012-01-02 12:47:48 +02002977 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
2978 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002979 case 1:
2980 stb_phys(addr, val);
2981 break;
2982 case 2:
2983 stw_phys(addr, val);
2984 break;
2985 case 4:
2986 stl_phys(addr, val);
2987 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002988 default: abort();
2989 }
pbrook6658ffb2007-03-16 23:58:11 +00002990}
2991
Avi Kivity1ec9b902012-01-02 12:47:48 +02002992static const MemoryRegionOps watch_mem_ops = {
2993 .read = watch_mem_read,
2994 .write = watch_mem_write,
2995 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002996};
pbrook6658ffb2007-03-16 23:58:11 +00002997
Avi Kivity70c68e42012-01-02 12:32:48 +02002998static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
2999 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003000{
Avi Kivity70c68e42012-01-02 12:32:48 +02003001 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003002 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003003 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003004#if defined(DEBUG_SUBPAGE)
3005 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3006 mmio, len, addr, idx);
3007#endif
blueswir1db7b5422007-05-26 17:36:03 +00003008
Avi Kivity5312bd82012-02-12 18:32:55 +02003009 section = &phys_sections[mmio->sub_section[idx]];
3010 addr += mmio->base;
3011 addr -= section->offset_within_address_space;
3012 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003013 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003014}
3015
Avi Kivity70c68e42012-01-02 12:32:48 +02003016static void subpage_write(void *opaque, target_phys_addr_t addr,
3017 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003018{
Avi Kivity70c68e42012-01-02 12:32:48 +02003019 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003020 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003021 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003022#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003023 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3024 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003025 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003026#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003027
Avi Kivity5312bd82012-02-12 18:32:55 +02003028 section = &phys_sections[mmio->sub_section[idx]];
3029 addr += mmio->base;
3030 addr -= section->offset_within_address_space;
3031 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003032 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003033}
3034
Avi Kivity70c68e42012-01-02 12:32:48 +02003035static const MemoryRegionOps subpage_ops = {
3036 .read = subpage_read,
3037 .write = subpage_write,
3038 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003039};
3040
Avi Kivityde712f92012-01-02 12:41:07 +02003041static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3042 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003043{
3044 ram_addr_t raddr = addr;
3045 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003046 switch (size) {
3047 case 1: return ldub_p(ptr);
3048 case 2: return lduw_p(ptr);
3049 case 4: return ldl_p(ptr);
3050 default: abort();
3051 }
Andreas Färber56384e82011-11-30 16:26:21 +01003052}
3053
Avi Kivityde712f92012-01-02 12:41:07 +02003054static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3055 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003056{
3057 ram_addr_t raddr = addr;
3058 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003059 switch (size) {
3060 case 1: return stb_p(ptr, value);
3061 case 2: return stw_p(ptr, value);
3062 case 4: return stl_p(ptr, value);
3063 default: abort();
3064 }
Andreas Färber56384e82011-11-30 16:26:21 +01003065}
3066
Avi Kivityde712f92012-01-02 12:41:07 +02003067static const MemoryRegionOps subpage_ram_ops = {
3068 .read = subpage_ram_read,
3069 .write = subpage_ram_write,
3070 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003071};
3072
Anthony Liguoric227f092009-10-01 16:12:16 -05003073static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003074 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003075{
3076 int idx, eidx;
3077
3078 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3079 return -1;
3080 idx = SUBPAGE_IDX(start);
3081 eidx = SUBPAGE_IDX(end);
3082#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003083 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003084 mmio, start, end, idx, eidx, memory);
3085#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003086 if (memory_region_is_ram(phys_sections[section].mr)) {
3087 MemoryRegionSection new_section = phys_sections[section];
3088 new_section.mr = &io_mem_subpage_ram;
3089 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003090 }
blueswir1db7b5422007-05-26 17:36:03 +00003091 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003092 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003093 }
3094
3095 return 0;
3096}
3097
Avi Kivity0f0cb162012-02-13 17:14:32 +02003098static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003099{
Anthony Liguoric227f092009-10-01 16:12:16 -05003100 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003101
Anthony Liguori7267c092011-08-20 22:09:37 -05003102 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003103
3104 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003105 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3106 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003107 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003108#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003109 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3110 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003111#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003112 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003113
3114 return mmio;
3115}
3116
Avi Kivity5312bd82012-02-12 18:32:55 +02003117static uint16_t dummy_section(MemoryRegion *mr)
3118{
3119 MemoryRegionSection section = {
3120 .mr = mr,
3121 .offset_within_address_space = 0,
3122 .offset_within_region = 0,
3123 .size = UINT64_MAX,
3124 };
3125
3126 return phys_section_add(&section);
3127}
3128
Avi Kivity37ec01d2012-03-08 18:08:35 +02003129MemoryRegion *iotlb_to_region(target_phys_addr_t index)
Avi Kivityaa102232012-03-08 17:06:55 +02003130{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003131 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003132}
3133
Avi Kivitye9179ce2009-06-14 11:38:52 +03003134static void io_mem_init(void)
3135{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003136 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003137 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3138 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3139 "unassigned", UINT64_MAX);
3140 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3141 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003142 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3143 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003144 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3145 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003146}
3147
Avi Kivityac1970f2012-10-03 16:22:53 +02003148static void mem_begin(MemoryListener *listener)
3149{
3150 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
3151
3152 destroy_all_mappings(d);
3153 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
3154}
3155
Avi Kivity50c1e142012-02-08 21:36:02 +02003156static void core_begin(MemoryListener *listener)
3157{
Avi Kivity5312bd82012-02-12 18:32:55 +02003158 phys_sections_clear();
3159 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003160 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3161 phys_section_rom = dummy_section(&io_mem_rom);
3162 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003163}
3164
Avi Kivity1d711482012-10-02 18:54:45 +02003165static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02003166{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003167 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003168
3169 /* since each CPU stores ram addresses in its TLB cache, we must
3170 reset the modified entries */
3171 /* XXX: slow ! */
3172 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3173 tlb_flush(env, 1);
3174 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003175}
3176
Avi Kivity93632742012-02-08 16:54:16 +02003177static void core_log_global_start(MemoryListener *listener)
3178{
3179 cpu_physical_memory_set_dirty_tracking(1);
3180}
3181
3182static void core_log_global_stop(MemoryListener *listener)
3183{
3184 cpu_physical_memory_set_dirty_tracking(0);
3185}
3186
Avi Kivity4855d412012-02-08 21:16:05 +02003187static void io_region_add(MemoryListener *listener,
3188 MemoryRegionSection *section)
3189{
Avi Kivitya2d33522012-03-05 17:40:12 +02003190 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3191
3192 mrio->mr = section->mr;
3193 mrio->offset = section->offset_within_region;
3194 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003195 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003196 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003197}
3198
3199static void io_region_del(MemoryListener *listener,
3200 MemoryRegionSection *section)
3201{
3202 isa_unassign_ioport(section->offset_within_address_space, section->size);
3203}
3204
Avi Kivity93632742012-02-08 16:54:16 +02003205static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003206 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02003207 .log_global_start = core_log_global_start,
3208 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02003209 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02003210};
3211
Avi Kivity4855d412012-02-08 21:16:05 +02003212static MemoryListener io_memory_listener = {
3213 .region_add = io_region_add,
3214 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02003215 .priority = 0,
3216};
3217
Avi Kivity1d711482012-10-02 18:54:45 +02003218static MemoryListener tcg_memory_listener = {
3219 .commit = tcg_commit,
3220};
3221
Avi Kivityac1970f2012-10-03 16:22:53 +02003222void address_space_init_dispatch(AddressSpace *as)
3223{
3224 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
3225
3226 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
3227 d->listener = (MemoryListener) {
3228 .begin = mem_begin,
3229 .region_add = mem_add,
3230 .region_nop = mem_add,
3231 .priority = 0,
3232 };
3233 as->dispatch = d;
3234 memory_listener_register(&d->listener, as);
3235}
3236
Avi Kivity62152b82011-07-26 14:26:14 +03003237static void memory_map_init(void)
3238{
Anthony Liguori7267c092011-08-20 22:09:37 -05003239 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003240 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02003241 address_space_init(&address_space_memory, system_memory);
3242 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03003243
Anthony Liguori7267c092011-08-20 22:09:37 -05003244 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003245 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02003246 address_space_init(&address_space_io, system_io);
3247 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02003248
Avi Kivityf6790af2012-10-02 20:13:51 +02003249 memory_listener_register(&core_memory_listener, &address_space_memory);
3250 memory_listener_register(&io_memory_listener, &address_space_io);
3251 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Avi Kivity62152b82011-07-26 14:26:14 +03003252}
3253
3254MemoryRegion *get_system_memory(void)
3255{
3256 return system_memory;
3257}
3258
Avi Kivity309cb472011-08-08 16:09:03 +03003259MemoryRegion *get_system_io(void)
3260{
3261 return system_io;
3262}
3263
pbrooke2eef172008-06-08 01:09:01 +00003264#endif /* !defined(CONFIG_USER_ONLY) */
3265
bellard13eb76e2004-01-24 15:23:36 +00003266/* physical memory access (slow version, mainly for debug) */
3267#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003268int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003269 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003270{
3271 int l, flags;
3272 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003273 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003274
3275 while (len > 0) {
3276 page = addr & TARGET_PAGE_MASK;
3277 l = (page + TARGET_PAGE_SIZE) - addr;
3278 if (l > len)
3279 l = len;
3280 flags = page_get_flags(page);
3281 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003282 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003283 if (is_write) {
3284 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003285 return -1;
bellard579a97f2007-11-11 14:26:47 +00003286 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003287 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003288 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003289 memcpy(p, buf, l);
3290 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003291 } else {
3292 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003293 return -1;
bellard579a97f2007-11-11 14:26:47 +00003294 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003295 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003296 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003297 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003298 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003299 }
3300 len -= l;
3301 buf += l;
3302 addr += l;
3303 }
Paul Brooka68fe892010-03-01 00:08:59 +00003304 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003305}
bellard8df1cd02005-01-28 22:37:22 +00003306
bellard13eb76e2004-01-24 15:23:36 +00003307#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003308
3309static void invalidate_and_set_dirty(target_phys_addr_t addr,
3310 target_phys_addr_t length)
3311{
3312 if (!cpu_physical_memory_is_dirty(addr)) {
3313 /* invalidate code */
3314 tb_invalidate_phys_page_range(addr, addr + length, 0);
3315 /* set dirty bit */
3316 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
3317 }
Anthony PERARDe2269392012-10-03 13:49:22 +00003318 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003319}
3320
Avi Kivityac1970f2012-10-03 16:22:53 +02003321void address_space_rw(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf,
3322 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00003323{
Avi Kivityac1970f2012-10-03 16:22:53 +02003324 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003325 int l;
bellard13eb76e2004-01-24 15:23:36 +00003326 uint8_t *ptr;
3327 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003328 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003329 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003330
bellard13eb76e2004-01-24 15:23:36 +00003331 while (len > 0) {
3332 page = addr & TARGET_PAGE_MASK;
3333 l = (page + TARGET_PAGE_SIZE) - addr;
3334 if (l > len)
3335 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003336 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003337
bellard13eb76e2004-01-24 15:23:36 +00003338 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003339 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003340 target_phys_addr_t addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00003341 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003342 /* XXX: could force cpu_single_env to NULL to avoid
3343 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003344 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003345 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003346 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003347 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003348 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003349 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003350 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003351 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003352 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003353 l = 2;
3354 } else {
bellard1c213d12005-09-03 10:49:04 +00003355 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003356 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003357 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003358 l = 1;
3359 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003360 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003361 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003362 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003363 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003364 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003365 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003366 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003367 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003368 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003369 }
3370 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003371 if (!(memory_region_is_ram(section->mr) ||
3372 memory_region_is_romd(section->mr))) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003373 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003374 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003375 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003376 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003377 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003378 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003379 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003380 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003381 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003382 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003383 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003384 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003385 l = 2;
3386 } else {
bellard1c213d12005-09-03 10:49:04 +00003387 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003388 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003389 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003390 l = 1;
3391 }
3392 } else {
3393 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003394 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00003395 + memory_region_section_addr(section,
3396 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003397 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003398 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003399 }
3400 }
3401 len -= l;
3402 buf += l;
3403 addr += l;
3404 }
3405}
bellard8df1cd02005-01-28 22:37:22 +00003406
Avi Kivityac1970f2012-10-03 16:22:53 +02003407void address_space_write(AddressSpace *as, target_phys_addr_t addr,
3408 const uint8_t *buf, int len)
3409{
3410 address_space_rw(as, addr, (uint8_t *)buf, len, true);
3411}
3412
3413/**
3414 * address_space_read: read from an address space.
3415 *
3416 * @as: #AddressSpace to be accessed
3417 * @addr: address within that address space
3418 * @buf: buffer with the data transferred
3419 */
3420void address_space_read(AddressSpace *as, target_phys_addr_t addr, uint8_t *buf, int len)
3421{
3422 address_space_rw(as, addr, buf, len, false);
3423}
3424
3425
3426void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3427 int len, int is_write)
3428{
3429 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
3430}
3431
bellardd0ecd2a2006-04-23 17:14:48 +00003432/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003433void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003434 const uint8_t *buf, int len)
3435{
Avi Kivityac1970f2012-10-03 16:22:53 +02003436 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00003437 int l;
3438 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003439 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003440 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003441
bellardd0ecd2a2006-04-23 17:14:48 +00003442 while (len > 0) {
3443 page = addr & TARGET_PAGE_MASK;
3444 l = (page + TARGET_PAGE_SIZE) - addr;
3445 if (l > len)
3446 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003447 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003448
Blue Swirlcc5bea62012-04-14 14:56:48 +00003449 if (!(memory_region_is_ram(section->mr) ||
3450 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00003451 /* do nothing */
3452 } else {
3453 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003454 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003455 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003456 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003457 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003458 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003459 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003460 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003461 }
3462 len -= l;
3463 buf += l;
3464 addr += l;
3465 }
3466}
3467
aliguori6d16c2f2009-01-22 16:59:11 +00003468typedef struct {
3469 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003470 target_phys_addr_t addr;
3471 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003472} BounceBuffer;
3473
3474static BounceBuffer bounce;
3475
aliguoriba223c22009-01-22 16:59:16 +00003476typedef struct MapClient {
3477 void *opaque;
3478 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003479 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003480} MapClient;
3481
Blue Swirl72cf2d42009-09-12 07:36:22 +00003482static QLIST_HEAD(map_client_list, MapClient) map_client_list
3483 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003484
3485void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3486{
Anthony Liguori7267c092011-08-20 22:09:37 -05003487 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003488
3489 client->opaque = opaque;
3490 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003491 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003492 return client;
3493}
3494
3495void cpu_unregister_map_client(void *_client)
3496{
3497 MapClient *client = (MapClient *)_client;
3498
Blue Swirl72cf2d42009-09-12 07:36:22 +00003499 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003500 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003501}
3502
3503static void cpu_notify_map_clients(void)
3504{
3505 MapClient *client;
3506
Blue Swirl72cf2d42009-09-12 07:36:22 +00003507 while (!QLIST_EMPTY(&map_client_list)) {
3508 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003509 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003510 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003511 }
3512}
3513
aliguori6d16c2f2009-01-22 16:59:11 +00003514/* Map a physical memory region into a host virtual address.
3515 * May map a subset of the requested range, given by and returned in *plen.
3516 * May return NULL if resources needed to perform the mapping are exhausted.
3517 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003518 * Use cpu_register_map_client() to know when retrying the map operation is
3519 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003520 */
Avi Kivityac1970f2012-10-03 16:22:53 +02003521void *address_space_map(AddressSpace *as,
3522 target_phys_addr_t addr,
3523 target_phys_addr_t *plen,
3524 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00003525{
Avi Kivityac1970f2012-10-03 16:22:53 +02003526 AddressSpaceDispatch *d = as->dispatch;
Anthony Liguoric227f092009-10-01 16:12:16 -05003527 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003528 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003529 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003530 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003531 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003532 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003533 ram_addr_t rlen;
3534 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003535
3536 while (len > 0) {
3537 page = addr & TARGET_PAGE_MASK;
3538 l = (page + TARGET_PAGE_SIZE) - addr;
3539 if (l > len)
3540 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003541 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003542
Avi Kivityf3705d52012-03-08 16:16:34 +02003543 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003544 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003545 break;
3546 }
3547 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3548 bounce.addr = addr;
3549 bounce.len = l;
3550 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02003551 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003552 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003553
3554 *plen = l;
3555 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003556 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003557 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003558 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003559 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003560 }
aliguori6d16c2f2009-01-22 16:59:11 +00003561
3562 len -= l;
3563 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003564 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003565 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003566 rlen = todo;
3567 ret = qemu_ram_ptr_length(raddr, &rlen);
3568 *plen = rlen;
3569 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003570}
3571
Avi Kivityac1970f2012-10-03 16:22:53 +02003572/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003573 * Will also mark the memory as dirty if is_write == 1. access_len gives
3574 * the amount of memory that was actually read or written by the caller.
3575 */
Avi Kivityac1970f2012-10-03 16:22:53 +02003576void address_space_unmap(AddressSpace *as, void *buffer, target_phys_addr_t len,
3577 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003578{
3579 if (buffer != bounce.buffer) {
3580 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003581 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003582 while (access_len) {
3583 unsigned l;
3584 l = TARGET_PAGE_SIZE;
3585 if (l > access_len)
3586 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003587 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003588 addr1 += l;
3589 access_len -= l;
3590 }
3591 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003592 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003593 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003594 }
aliguori6d16c2f2009-01-22 16:59:11 +00003595 return;
3596 }
3597 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02003598 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003599 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003600 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003601 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003602 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003603}
bellardd0ecd2a2006-04-23 17:14:48 +00003604
Avi Kivityac1970f2012-10-03 16:22:53 +02003605void *cpu_physical_memory_map(target_phys_addr_t addr,
3606 target_phys_addr_t *plen,
3607 int is_write)
3608{
3609 return address_space_map(&address_space_memory, addr, plen, is_write);
3610}
3611
3612void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3613 int is_write, target_phys_addr_t access_len)
3614{
3615 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3616}
3617
bellard8df1cd02005-01-28 22:37:22 +00003618/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003619static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3620 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003621{
bellard8df1cd02005-01-28 22:37:22 +00003622 uint8_t *ptr;
3623 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003624 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003625
Avi Kivityac1970f2012-10-03 16:22:53 +02003626 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003627
Blue Swirlcc5bea62012-04-14 14:56:48 +00003628 if (!(memory_region_is_ram(section->mr) ||
3629 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00003630 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003631 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003632 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003633#if defined(TARGET_WORDS_BIGENDIAN)
3634 if (endian == DEVICE_LITTLE_ENDIAN) {
3635 val = bswap32(val);
3636 }
3637#else
3638 if (endian == DEVICE_BIG_ENDIAN) {
3639 val = bswap32(val);
3640 }
3641#endif
bellard8df1cd02005-01-28 22:37:22 +00003642 } else {
3643 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003644 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003645 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003646 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003647 switch (endian) {
3648 case DEVICE_LITTLE_ENDIAN:
3649 val = ldl_le_p(ptr);
3650 break;
3651 case DEVICE_BIG_ENDIAN:
3652 val = ldl_be_p(ptr);
3653 break;
3654 default:
3655 val = ldl_p(ptr);
3656 break;
3657 }
bellard8df1cd02005-01-28 22:37:22 +00003658 }
3659 return val;
3660}
3661
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003662uint32_t ldl_phys(target_phys_addr_t addr)
3663{
3664 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3665}
3666
3667uint32_t ldl_le_phys(target_phys_addr_t addr)
3668{
3669 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3670}
3671
3672uint32_t ldl_be_phys(target_phys_addr_t addr)
3673{
3674 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3675}
3676
bellard84b7b8e2005-11-28 21:19:04 +00003677/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003678static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3679 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003680{
bellard84b7b8e2005-11-28 21:19:04 +00003681 uint8_t *ptr;
3682 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003683 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003684
Avi Kivityac1970f2012-10-03 16:22:53 +02003685 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003686
Blue Swirlcc5bea62012-04-14 14:56:48 +00003687 if (!(memory_region_is_ram(section->mr) ||
3688 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00003689 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003690 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003691
3692 /* XXX This is broken when device endian != cpu endian.
3693 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003694#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003695 val = io_mem_read(section->mr, addr, 4) << 32;
3696 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003697#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003698 val = io_mem_read(section->mr, addr, 4);
3699 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003700#endif
3701 } else {
3702 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003703 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003704 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003705 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003706 switch (endian) {
3707 case DEVICE_LITTLE_ENDIAN:
3708 val = ldq_le_p(ptr);
3709 break;
3710 case DEVICE_BIG_ENDIAN:
3711 val = ldq_be_p(ptr);
3712 break;
3713 default:
3714 val = ldq_p(ptr);
3715 break;
3716 }
bellard84b7b8e2005-11-28 21:19:04 +00003717 }
3718 return val;
3719}
3720
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003721uint64_t ldq_phys(target_phys_addr_t addr)
3722{
3723 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3724}
3725
3726uint64_t ldq_le_phys(target_phys_addr_t addr)
3727{
3728 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3729}
3730
3731uint64_t ldq_be_phys(target_phys_addr_t addr)
3732{
3733 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3734}
3735
bellardaab33092005-10-30 20:48:42 +00003736/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003737uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003738{
3739 uint8_t val;
3740 cpu_physical_memory_read(addr, &val, 1);
3741 return val;
3742}
3743
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003744/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003745static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3746 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003747{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003748 uint8_t *ptr;
3749 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003750 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003751
Avi Kivityac1970f2012-10-03 16:22:53 +02003752 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003753
Blue Swirlcc5bea62012-04-14 14:56:48 +00003754 if (!(memory_region_is_ram(section->mr) ||
3755 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003756 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003757 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003758 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003759#if defined(TARGET_WORDS_BIGENDIAN)
3760 if (endian == DEVICE_LITTLE_ENDIAN) {
3761 val = bswap16(val);
3762 }
3763#else
3764 if (endian == DEVICE_BIG_ENDIAN) {
3765 val = bswap16(val);
3766 }
3767#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003768 } else {
3769 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003770 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003771 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003772 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003773 switch (endian) {
3774 case DEVICE_LITTLE_ENDIAN:
3775 val = lduw_le_p(ptr);
3776 break;
3777 case DEVICE_BIG_ENDIAN:
3778 val = lduw_be_p(ptr);
3779 break;
3780 default:
3781 val = lduw_p(ptr);
3782 break;
3783 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003784 }
3785 return val;
bellardaab33092005-10-30 20:48:42 +00003786}
3787
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003788uint32_t lduw_phys(target_phys_addr_t addr)
3789{
3790 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3791}
3792
3793uint32_t lduw_le_phys(target_phys_addr_t addr)
3794{
3795 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3796}
3797
3798uint32_t lduw_be_phys(target_phys_addr_t addr)
3799{
3800 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3801}
3802
bellard8df1cd02005-01-28 22:37:22 +00003803/* warning: addr must be aligned. The ram page is not masked as dirty
3804 and the code inside is not invalidated. It is useful if the dirty
3805 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003806void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003807{
bellard8df1cd02005-01-28 22:37:22 +00003808 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003809 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003810
Avi Kivityac1970f2012-10-03 16:22:53 +02003811 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003812
Avi Kivityf3705d52012-03-08 16:16:34 +02003813 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003814 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003815 if (memory_region_is_ram(section->mr)) {
3816 section = &phys_sections[phys_section_rom];
3817 }
3818 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003819 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003820 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003821 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003822 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003823 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003824 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003825
3826 if (unlikely(in_migration)) {
3827 if (!cpu_physical_memory_is_dirty(addr1)) {
3828 /* invalidate code */
3829 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3830 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003831 cpu_physical_memory_set_dirty_flags(
3832 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003833 }
3834 }
bellard8df1cd02005-01-28 22:37:22 +00003835 }
3836}
3837
Anthony Liguoric227f092009-10-01 16:12:16 -05003838void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003839{
j_mayerbc98a7e2007-04-04 07:55:12 +00003840 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003841 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00003842
Avi Kivityac1970f2012-10-03 16:22:53 +02003843 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003844
Avi Kivityf3705d52012-03-08 16:16:34 +02003845 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003846 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003847 if (memory_region_is_ram(section->mr)) {
3848 section = &phys_sections[phys_section_rom];
3849 }
j_mayerbc98a7e2007-04-04 07:55:12 +00003850#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003851 io_mem_write(section->mr, addr, val >> 32, 4);
3852 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003853#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003854 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3855 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003856#endif
3857 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003858 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003859 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003860 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00003861 stq_p(ptr, val);
3862 }
3863}
3864
bellard8df1cd02005-01-28 22:37:22 +00003865/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003866static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
3867 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003868{
bellard8df1cd02005-01-28 22:37:22 +00003869 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003870 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003871
Avi Kivityac1970f2012-10-03 16:22:53 +02003872 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003873
Avi Kivityf3705d52012-03-08 16:16:34 +02003874 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003875 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003876 if (memory_region_is_ram(section->mr)) {
3877 section = &phys_sections[phys_section_rom];
3878 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003879#if defined(TARGET_WORDS_BIGENDIAN)
3880 if (endian == DEVICE_LITTLE_ENDIAN) {
3881 val = bswap32(val);
3882 }
3883#else
3884 if (endian == DEVICE_BIG_ENDIAN) {
3885 val = bswap32(val);
3886 }
3887#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02003888 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003889 } else {
3890 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003891 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003892 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00003893 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003894 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003895 switch (endian) {
3896 case DEVICE_LITTLE_ENDIAN:
3897 stl_le_p(ptr, val);
3898 break;
3899 case DEVICE_BIG_ENDIAN:
3900 stl_be_p(ptr, val);
3901 break;
3902 default:
3903 stl_p(ptr, val);
3904 break;
3905 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003906 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00003907 }
3908}
3909
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003910void stl_phys(target_phys_addr_t addr, uint32_t val)
3911{
3912 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3913}
3914
3915void stl_le_phys(target_phys_addr_t addr, uint32_t val)
3916{
3917 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3918}
3919
3920void stl_be_phys(target_phys_addr_t addr, uint32_t val)
3921{
3922 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
3923}
3924
bellardaab33092005-10-30 20:48:42 +00003925/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003926void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003927{
3928 uint8_t v = val;
3929 cpu_physical_memory_write(addr, &v, 1);
3930}
3931
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003932/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003933static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
3934 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003935{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003936 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003937 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003938
Avi Kivityac1970f2012-10-03 16:22:53 +02003939 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003940
Avi Kivityf3705d52012-03-08 16:16:34 +02003941 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003942 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003943 if (memory_region_is_ram(section->mr)) {
3944 section = &phys_sections[phys_section_rom];
3945 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003946#if defined(TARGET_WORDS_BIGENDIAN)
3947 if (endian == DEVICE_LITTLE_ENDIAN) {
3948 val = bswap16(val);
3949 }
3950#else
3951 if (endian == DEVICE_BIG_ENDIAN) {
3952 val = bswap16(val);
3953 }
3954#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02003955 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003956 } else {
3957 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003958 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003959 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003960 /* RAM case */
3961 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003962 switch (endian) {
3963 case DEVICE_LITTLE_ENDIAN:
3964 stw_le_p(ptr, val);
3965 break;
3966 case DEVICE_BIG_ENDIAN:
3967 stw_be_p(ptr, val);
3968 break;
3969 default:
3970 stw_p(ptr, val);
3971 break;
3972 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003973 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003974 }
bellardaab33092005-10-30 20:48:42 +00003975}
3976
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003977void stw_phys(target_phys_addr_t addr, uint32_t val)
3978{
3979 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3980}
3981
3982void stw_le_phys(target_phys_addr_t addr, uint32_t val)
3983{
3984 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3985}
3986
3987void stw_be_phys(target_phys_addr_t addr, uint32_t val)
3988{
3989 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
3990}
3991
bellardaab33092005-10-30 20:48:42 +00003992/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003993void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003994{
3995 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01003996 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00003997}
3998
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003999void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4000{
4001 val = cpu_to_le64(val);
4002 cpu_physical_memory_write(addr, &val, 8);
4003}
4004
4005void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4006{
4007 val = cpu_to_be64(val);
4008 cpu_physical_memory_write(addr, &val, 8);
4009}
4010
aliguori5e2972f2009-03-28 17:51:36 +00004011/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004012int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004013 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004014{
4015 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004016 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004017 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004018
4019 while (len > 0) {
4020 page = addr & TARGET_PAGE_MASK;
4021 phys_addr = cpu_get_phys_page_debug(env, page);
4022 /* if no physical page mapped, return an error */
4023 if (phys_addr == -1)
4024 return -1;
4025 l = (page + TARGET_PAGE_SIZE) - addr;
4026 if (l > len)
4027 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004028 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004029 if (is_write)
4030 cpu_physical_memory_write_rom(phys_addr, buf, l);
4031 else
aliguori5e2972f2009-03-28 17:51:36 +00004032 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004033 len -= l;
4034 buf += l;
4035 addr += l;
4036 }
4037 return 0;
4038}
Paul Brooka68fe892010-03-01 00:08:59 +00004039#endif
bellard13eb76e2004-01-24 15:23:36 +00004040
pbrook2e70f6e2008-06-29 01:03:05 +00004041/* in deterministic execution mode, instructions doing device I/Os
4042 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004043void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004044{
4045 TranslationBlock *tb;
4046 uint32_t n, cflags;
4047 target_ulong pc, cs_base;
4048 uint64_t flags;
4049
Blue Swirl20503962012-04-09 14:20:20 +00004050 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004051 if (!tb) {
4052 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004053 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004054 }
4055 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004056 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004057 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004058 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004059 n = n - env->icount_decr.u16.low;
4060 /* Generate a new TB ending on the I/O insn. */
4061 n++;
4062 /* On MIPS and SH, delay slot instructions can only be restarted if
4063 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004064 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004065 branch. */
4066#if defined(TARGET_MIPS)
4067 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4068 env->active_tc.PC -= 4;
4069 env->icount_decr.u16.low++;
4070 env->hflags &= ~MIPS_HFLAG_BMASK;
4071 }
4072#elif defined(TARGET_SH4)
4073 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4074 && n > 1) {
4075 env->pc -= 2;
4076 env->icount_decr.u16.low++;
4077 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4078 }
4079#endif
4080 /* This should never happen. */
4081 if (n > CF_COUNT_MASK)
4082 cpu_abort(env, "TB too big during recompile");
4083
4084 cflags = n | CF_LAST_IO;
4085 pc = tb->pc;
4086 cs_base = tb->cs_base;
4087 flags = tb->flags;
4088 tb_phys_invalidate(tb, -1);
4089 /* FIXME: In theory this could raise an exception. In practice
4090 we have already translated the block once so it's probably ok. */
4091 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004092 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004093 the first in the TB) then we end up generating a whole new TB and
4094 repeating the fault, which is horribly inefficient.
4095 Better would be to execute just this insn uncached, or generate a
4096 second new TB. */
4097 cpu_resume_from_signal(env, NULL);
4098}
4099
Paul Brookb3755a92010-03-12 16:54:58 +00004100#if !defined(CONFIG_USER_ONLY)
4101
Stefan Weil055403b2010-10-22 23:03:32 +02004102void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004103{
4104 int i, target_code_size, max_target_code_size;
4105 int direct_jmp_count, direct_jmp2_count, cross_page;
4106 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004107
bellarde3db7222005-01-26 22:00:47 +00004108 target_code_size = 0;
4109 max_target_code_size = 0;
4110 cross_page = 0;
4111 direct_jmp_count = 0;
4112 direct_jmp2_count = 0;
4113 for(i = 0; i < nb_tbs; i++) {
4114 tb = &tbs[i];
4115 target_code_size += tb->size;
4116 if (tb->size > max_target_code_size)
4117 max_target_code_size = tb->size;
4118 if (tb->page_addr[1] != -1)
4119 cross_page++;
4120 if (tb->tb_next_offset[0] != 0xffff) {
4121 direct_jmp_count++;
4122 if (tb->tb_next_offset[1] != 0xffff) {
4123 direct_jmp2_count++;
4124 }
4125 }
4126 }
4127 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004128 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004129 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004130 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4131 cpu_fprintf(f, "TB count %d/%d\n",
4132 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004133 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004134 nb_tbs ? target_code_size / nb_tbs : 0,
4135 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004136 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004137 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4138 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004139 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4140 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004141 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4142 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004143 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004144 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4145 direct_jmp2_count,
4146 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004147 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004148 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4149 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4150 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004151 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004152}
4153
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004154/*
4155 * A helper function for the _utterly broken_ virtio device model to find out if
4156 * it's running on a big endian machine. Don't do this at home kids!
4157 */
4158bool virtio_is_big_endian(void);
4159bool virtio_is_big_endian(void)
4160{
4161#if defined(TARGET_WORDS_BIGENDIAN)
4162 return true;
4163#else
4164 return false;
4165#endif
4166}
4167
bellard61382a52003-10-27 21:22:23 +00004168#endif
Wen Congyang76f35532012-05-07 12:04:18 +08004169
4170#ifndef CONFIG_USER_ONLY
4171bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
4172{
4173 MemoryRegionSection *section;
4174
Avi Kivityac1970f2012-10-03 16:22:53 +02004175 section = phys_page_find(address_space_memory.dispatch,
4176 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08004177
4178 return !(memory_region_is_ram(section->mr) ||
4179 memory_region_is_romd(section->mr));
4180}
4181#endif