blob: 038e40d09b6028714023813bf2d3872fd4d922e1 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Blue Swirl0cac1b62012-04-09 16:50:52 +000060#include "cputlb.h"
61
Avi Kivity7762c2c2012-09-20 16:02:51 +030062#include "memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020063
bellardfd6ce8f2003-05-14 19:00:11 +000064//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000065//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000070
ths1196be32007-03-17 15:17:58 +000071//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000072//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000073
pbrook99773bd2006-04-16 15:14:59 +000074#if !defined(CONFIG_USER_ONLY)
75/* TB consistency checks only implemented for usermode emulation. */
76#undef DEBUG_TB_CHECK
77#endif
78
bellard9fa3e852004-01-04 18:06:42 +000079#define SMC_BITMAP_USE_THRESHOLD 10
80
blueswir1bdaf78e2008-10-04 07:24:27 +000081static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020082static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000083TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000084static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000085/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050086spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000087
Richard Henderson4438c8a2012-10-16 17:30:13 +100088uint8_t *code_gen_prologue;
blueswir1bdaf78e2008-10-04 07:24:27 +000089static uint8_t *code_gen_buffer;
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +100090static size_t code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +000091/* threshold to flush the translated code buffer */
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +100092static size_t code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +020093static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +000094
pbrooke2eef172008-06-08 01:09:01 +000095#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000096int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000097static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000098
Paolo Bonzini85d59fe2011-08-12 13:18:14 +020099RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300100
101static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300102static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300103
Avi Kivityf6790af2012-10-02 20:13:51 +0200104AddressSpace address_space_io;
105AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +0200106
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200107MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200108static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200109
pbrooke2eef172008-06-08 01:09:01 +0000110#endif
bellard9fa3e852004-01-04 18:06:42 +0000111
Andreas Färber9349b4f2012-03-14 01:38:32 +0100112CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000113/* current CPU in the current thread. It is only valid inside
114 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100115DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000116/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000117 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000118 2 = Adaptive rate instruction counting. */
119int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000120
bellard54936002003-05-13 00:25:15 +0000121typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000122 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000123 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000124 /* in order to optimize self modifying code, we count the number
125 of lookups we do to a given page to use a bitmap */
126 unsigned int code_write_count;
127 uint8_t *code_bitmap;
128#if defined(CONFIG_USER_ONLY)
129 unsigned long flags;
130#endif
bellard54936002003-05-13 00:25:15 +0000131} PageDesc;
132
Paul Brook41c1b1c2010-03-12 16:54:58 +0000133/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800134 while in user mode we want it to be based on virtual addresses. */
135#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000136#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
137# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
138#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800139# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000140#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000141#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800142# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000143#endif
bellard54936002003-05-13 00:25:15 +0000144
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800145/* Size of the L2 (and L3, etc) page tables. */
146#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000147#define L2_SIZE (1 << L2_BITS)
148
Avi Kivity3eef53d2012-02-10 14:57:31 +0200149#define P_L2_LEVELS \
150 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
151
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800152/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153#define V_L1_BITS_REM \
154 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156#if V_L1_BITS_REM < 4
157#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
158#else
159#define V_L1_BITS V_L1_BITS_REM
160#endif
161
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800162#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
163
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800164#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
165
Stefan Weilc6d50672012-03-16 20:23:49 +0100166uintptr_t qemu_real_host_page_size;
167uintptr_t qemu_host_page_size;
168uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170/* This is a multi-level map on the virtual address space.
171 The bottom level has pointers to PageDesc. */
172static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000173
pbrooke2eef172008-06-08 01:09:01 +0000174#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200175
Avi Kivity5312bd82012-02-12 18:32:55 +0200176static MemoryRegionSection *phys_sections;
177static unsigned phys_sections_nb, phys_sections_nb_alloc;
178static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200179static uint16_t phys_section_notdirty;
180static uint16_t phys_section_rom;
181static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200182
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200183/* Simple allocator for PhysPageEntry nodes */
184static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
185static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
186
Avi Kivity07f07b32012-02-13 20:45:32 +0200187#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200188
pbrooke2eef172008-06-08 01:09:01 +0000189static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300190static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000191
Avi Kivity1ec9b902012-01-02 12:47:48 +0200192static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000193#endif
bellard33417e72003-08-10 21:47:01 +0000194
bellarde3db7222005-01-26 22:00:47 +0000195/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000196static int tb_flush_count;
197static int tb_phys_invalidate_count;
198
bellard7cb69ca2008-05-10 10:55:51 +0000199#ifdef _WIN32
Richard Henderson4438c8a2012-10-16 17:30:13 +1000200static inline void map_exec(void *addr, long size)
bellard7cb69ca2008-05-10 10:55:51 +0000201{
202 DWORD old_protect;
203 VirtualProtect(addr, size,
204 PAGE_EXECUTE_READWRITE, &old_protect);
205
206}
207#else
Richard Henderson4438c8a2012-10-16 17:30:13 +1000208static inline void map_exec(void *addr, long size)
bellard7cb69ca2008-05-10 10:55:51 +0000209{
bellard43694152008-05-29 09:35:57 +0000210 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000211
bellard43694152008-05-29 09:35:57 +0000212 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000213 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000214 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000215
216 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000217 end += page_size - 1;
218 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000219
220 mprotect((void *)start, end - start,
221 PROT_READ | PROT_WRITE | PROT_EXEC);
222}
223#endif
224
bellardb346ff42003-06-15 20:05:50 +0000225static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000226{
bellard83fb7ad2004-07-05 21:25:26 +0000227 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000228 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000229#ifdef _WIN32
230 {
231 SYSTEM_INFO system_info;
232
233 GetSystemInfo(&system_info);
234 qemu_real_host_page_size = system_info.dwPageSize;
235 }
236#else
237 qemu_real_host_page_size = getpagesize();
238#endif
bellard83fb7ad2004-07-05 21:25:26 +0000239 if (qemu_host_page_size == 0)
240 qemu_host_page_size = qemu_real_host_page_size;
241 if (qemu_host_page_size < TARGET_PAGE_SIZE)
242 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000243 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000244
Paul Brook2e9a5712010-05-05 16:32:59 +0100245#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000246 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100247#ifdef HAVE_KINFO_GETVMMAP
248 struct kinfo_vmentry *freep;
249 int i, cnt;
250
251 freep = kinfo_getvmmap(getpid(), &cnt);
252 if (freep) {
253 mmap_lock();
254 for (i = 0; i < cnt; i++) {
255 unsigned long startaddr, endaddr;
256
257 startaddr = freep[i].kve_start;
258 endaddr = freep[i].kve_end;
259 if (h2g_valid(startaddr)) {
260 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
261
262 if (h2g_valid(endaddr)) {
263 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200264 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100265 } else {
266#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
267 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200268 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100269#endif
270 }
271 }
272 }
273 free(freep);
274 mmap_unlock();
275 }
276#else
balrog50a95692007-12-12 01:16:23 +0000277 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000278
pbrook07765902008-05-31 16:33:53 +0000279 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800280
Aurelien Jarnofd436902010-04-10 17:20:36 +0200281 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000282 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800283 mmap_lock();
284
balrog50a95692007-12-12 01:16:23 +0000285 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800286 unsigned long startaddr, endaddr;
287 int n;
288
289 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
290
291 if (n == 2 && h2g_valid(startaddr)) {
292 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
293
294 if (h2g_valid(endaddr)) {
295 endaddr = h2g(endaddr);
296 } else {
297 endaddr = ~0ul;
298 }
299 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000300 }
301 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800302
balrog50a95692007-12-12 01:16:23 +0000303 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800304 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000305 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100306#endif
balrog50a95692007-12-12 01:16:23 +0000307 }
308#endif
bellard54936002003-05-13 00:25:15 +0000309}
310
Paul Brook41c1b1c2010-03-12 16:54:58 +0000311static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000312{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000313 PageDesc *pd;
314 void **lp;
315 int i;
316
pbrook17e23772008-06-09 13:47:45 +0000317#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500318 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800319# define ALLOC(P, SIZE) \
320 do { \
321 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
322 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800323 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000324#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800325# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500326 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000327#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800328
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800329 /* Level 1. Always allocated. */
330 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
331
332 /* Level 2..N-1. */
333 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
334 void **p = *lp;
335
336 if (p == NULL) {
337 if (!alloc) {
338 return NULL;
339 }
340 ALLOC(p, sizeof(void *) * L2_SIZE);
341 *lp = p;
342 }
343
344 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000345 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800346
347 pd = *lp;
348 if (pd == NULL) {
349 if (!alloc) {
350 return NULL;
351 }
352 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
353 *lp = pd;
354 }
355
356#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800357
358 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000359}
360
Paul Brook41c1b1c2010-03-12 16:54:58 +0000361static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000362{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800363 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000364}
365
Paul Brook6d9a1302010-02-28 23:55:53 +0000366#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200367
Avi Kivityf7bf5462012-02-13 20:12:05 +0200368static void phys_map_node_reserve(unsigned nodes)
369{
370 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
371 typedef PhysPageEntry Node[L2_SIZE];
372 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
373 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
374 phys_map_nodes_nb + nodes);
375 phys_map_nodes = g_renew(Node, phys_map_nodes,
376 phys_map_nodes_nb_alloc);
377 }
378}
379
380static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200381{
382 unsigned i;
383 uint16_t ret;
384
Avi Kivityf7bf5462012-02-13 20:12:05 +0200385 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200386 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200387 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200388 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200389 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200390 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200391 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200392 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200393}
394
395static void phys_map_nodes_reset(void)
396{
397 phys_map_nodes_nb = 0;
398}
399
Avi Kivityf7bf5462012-02-13 20:12:05 +0200400
Avi Kivitya8170e52012-10-23 12:30:10 +0200401static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
402 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200403 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200404{
405 PhysPageEntry *p;
406 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200407 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200408
Avi Kivity07f07b32012-02-13 20:45:32 +0200409 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200410 lp->ptr = phys_map_node_alloc();
411 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200412 if (level == 0) {
413 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200414 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200415 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200416 }
417 }
418 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200419 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200420 }
Avi Kivity29990972012-02-13 20:21:20 +0200421 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200422
Avi Kivity29990972012-02-13 20:21:20 +0200423 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200424 if ((*index & (step - 1)) == 0 && *nb >= step) {
425 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200426 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200427 *index += step;
428 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200429 } else {
430 phys_page_set_level(lp, index, nb, leaf, level - 1);
431 }
432 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200433 }
434}
435
Avi Kivityac1970f2012-10-03 16:22:53 +0200436static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200437 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200438 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000439{
Avi Kivity29990972012-02-13 20:21:20 +0200440 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200441 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000442
Avi Kivityac1970f2012-10-03 16:22:53 +0200443 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000444}
445
Avi Kivitya8170e52012-10-23 12:30:10 +0200446MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000447{
Avi Kivityac1970f2012-10-03 16:22:53 +0200448 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200449 PhysPageEntry *p;
450 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200451 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200452
Avi Kivity07f07b32012-02-13 20:45:32 +0200453 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200454 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200455 goto not_found;
456 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200457 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200458 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200459 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200460
Avi Kivityc19e8802012-02-13 20:25:31 +0200461 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200462not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200463 return &phys_sections[s_index];
464}
465
Blue Swirle5548612012-04-21 13:08:33 +0000466bool memory_region_is_unassigned(MemoryRegion *mr)
467{
468 return mr != &io_mem_ram && mr != &io_mem_rom
469 && mr != &io_mem_notdirty && !mr->rom_device
470 && mr != &io_mem_watch;
471}
472
pbrookc8a706f2008-06-02 16:16:42 +0000473#define mmap_lock() do { } while(0)
474#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000475#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000476
bellard43694152008-05-29 09:35:57 +0000477#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100478/* Currently it is not recommended to allocate big chunks of data in
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000479 user mode. It will change when a dedicated libc will be used. */
480/* ??? 64-bit hosts ought to have no problem mmaping data outside the
481 region in which the guest needs to run. Revisit this. */
bellard43694152008-05-29 09:35:57 +0000482#define USE_STATIC_CODE_GEN_BUFFER
483#endif
484
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000485/* ??? Should configure for this, not list operating systems here. */
486#if (defined(__linux__) \
487 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
488 || defined(__DragonFly__) || defined(__OpenBSD__) \
489 || defined(__NetBSD__))
490# define USE_MMAP
491#endif
492
Richard Henderson74d590c2012-10-16 17:30:14 +1000493/* Minimum size of the code gen buffer. This number is randomly chosen,
494 but not so small that we can't have a fair number of TB's live. */
495#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
496
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000497/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
498 indicated, this is constrained by the range of direct branches on the
499 host cpu, as used by the TCG implementation of goto_tb. */
500#if defined(__x86_64__)
501# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
502#elif defined(__sparc__)
503# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
504#elif defined(__arm__)
505# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
506#elif defined(__s390x__)
507 /* We have a +- 4GB range on the branches; leave some slop. */
508# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
509#else
510# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
511#endif
512
Richard Henderson3d85a722012-10-16 17:30:11 +1000513#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
514
515#define DEFAULT_CODE_GEN_BUFFER_SIZE \
516 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
517 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000518
519static inline size_t size_code_gen_buffer(size_t tb_size)
520{
521 /* Size the buffer. */
522 if (tb_size == 0) {
523#ifdef USE_STATIC_CODE_GEN_BUFFER
524 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
525#else
526 /* ??? Needs adjustments. */
527 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
528 static buffer, we could size this on RESERVED_VA, on the text
529 segment size of the executable, or continue to use the default. */
530 tb_size = (unsigned long)(ram_size / 4);
531#endif
532 }
533 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
534 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
535 }
536 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
537 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
538 }
539 code_gen_buffer_size = tb_size;
540 return tb_size;
541}
542
bellard43694152008-05-29 09:35:57 +0000543#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200544static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000545 __attribute__((aligned(CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000546
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000547static inline void *alloc_code_gen_buffer(void)
bellard26a5f132008-05-28 12:30:31 +0000548{
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000549 map_exec(static_code_gen_buffer, code_gen_buffer_size);
550 return static_code_gen_buffer;
551}
552#elif defined(USE_MMAP)
553static inline void *alloc_code_gen_buffer(void)
554{
555 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
556 uintptr_t start = 0;
557 void *buf;
blueswir1141ac462008-07-26 15:05:57 +0000558
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000559 /* Constrain the position of the buffer based on the host cpu.
560 Note that these addresses are chosen in concert with the
561 addresses assigned in the relevant linker script file. */
Richard Henderson405def12012-10-16 17:30:12 +1000562# if defined(__PIE__) || defined(__PIC__)
563 /* Don't bother setting a preferred location if we're building
564 a position-independent executable. We're more likely to get
565 an address near the main executable if we let the kernel
566 choose the address. */
567# elif defined(__x86_64__) && defined(MAP_32BIT)
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000568 /* Force the memory down into low memory with the executable.
569 Leave the choice of exact location with the kernel. */
570 flags |= MAP_32BIT;
571 /* Cannot expect to map more than 800MB in low memory. */
572 if (code_gen_buffer_size > 800u * 1024 * 1024) {
573 code_gen_buffer_size = 800u * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000574 }
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000575# elif defined(__sparc__)
576 start = 0x40000000ul;
577# elif defined(__s390x__)
578 start = 0x90000000ul;
579# endif
580
581 buf = mmap((void *)start, code_gen_buffer_size,
582 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
583 return buf == MAP_FAILED ? NULL : buf;
584}
bellard26a5f132008-05-28 12:30:31 +0000585#else
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000586static inline void *alloc_code_gen_buffer(void)
587{
588 void *buf = g_malloc(code_gen_buffer_size);
589 if (buf) {
590 map_exec(buf, code_gen_buffer_size);
591 }
592 return buf;
593}
594#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
595
596static inline void code_gen_alloc(size_t tb_size)
597{
598 code_gen_buffer_size = size_code_gen_buffer(tb_size);
599 code_gen_buffer = alloc_code_gen_buffer();
600 if (code_gen_buffer == NULL) {
601 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
602 exit(1);
603 }
604
Richard Henderson4438c8a2012-10-16 17:30:13 +1000605 /* Steal room for the prologue at the end of the buffer. This ensures
606 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
607 from TB's to the prologue are going to be in range. It also means
608 that we don't need to mark (additional) portions of the data segment
609 as executable. */
610 code_gen_prologue = code_gen_buffer + code_gen_buffer_size - 1024;
611 code_gen_buffer_size -= 1024;
612
Peter Maydella884da82011-06-22 11:58:25 +0100613 code_gen_buffer_max_size = code_gen_buffer_size -
614 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000615 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500616 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000617}
618
619/* Must be called before using the QEMU cpus. 'tb_size' is the size
620 (in bytes) allocated to the translation buffer. Zero means default
621 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200622void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000623{
bellard26a5f132008-05-28 12:30:31 +0000624 cpu_gen_init();
625 code_gen_alloc(tb_size);
626 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700627 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000628 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700629#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
630 /* There's no guest base to take into account, so go ahead and
631 initialize the prologue now. */
632 tcg_prologue_init(&tcg_ctx);
633#endif
bellard26a5f132008-05-28 12:30:31 +0000634}
635
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200636bool tcg_enabled(void)
637{
638 return code_gen_buffer != NULL;
639}
640
641void cpu_exec_init_all(void)
642{
643#if !defined(CONFIG_USER_ONLY)
644 memory_map_init();
645 io_mem_init();
646#endif
647}
648
pbrook9656f322008-07-01 20:01:19 +0000649#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
650
Juan Quintelae59fb372009-09-29 22:48:21 +0200651static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200652{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100653 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200654
aurel323098dba2009-03-07 21:28:24 +0000655 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
656 version_id is increased. */
657 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000658 tlb_flush(env, 1);
659
660 return 0;
661}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200662
663static const VMStateDescription vmstate_cpu_common = {
664 .name = "cpu_common",
665 .version_id = 1,
666 .minimum_version_id = 1,
667 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200668 .post_load = cpu_common_post_load,
669 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100670 VMSTATE_UINT32(halted, CPUArchState),
671 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200672 VMSTATE_END_OF_LIST()
673 }
674};
pbrook9656f322008-07-01 20:01:19 +0000675#endif
676
Andreas Färber9349b4f2012-03-14 01:38:32 +0100677CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400678{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100679 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400680
681 while (env) {
682 if (env->cpu_index == cpu)
683 break;
684 env = env->next_cpu;
685 }
686
687 return env;
688}
689
Andreas Färber9349b4f2012-03-14 01:38:32 +0100690void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000691{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100692 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000693 int cpu_index;
694
pbrookc2764712009-03-07 15:24:59 +0000695#if defined(CONFIG_USER_ONLY)
696 cpu_list_lock();
697#endif
bellard6a00d602005-11-21 23:25:50 +0000698 env->next_cpu = NULL;
699 penv = &first_cpu;
700 cpu_index = 0;
701 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700702 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000703 cpu_index++;
704 }
705 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000706 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000707 QTAILQ_INIT(&env->breakpoints);
708 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100709#ifndef CONFIG_USER_ONLY
710 env->thread_id = qemu_get_thread_id();
711#endif
bellard6a00d602005-11-21 23:25:50 +0000712 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000713#if defined(CONFIG_USER_ONLY)
714 cpu_list_unlock();
715#endif
pbrookb3c77242008-06-30 16:31:04 +0000716#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600717 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
718 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000719 cpu_save, cpu_load, env);
720#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000721}
722
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100723/* Allocate a new translation block. Flush the translation buffer if
724 too many translation blocks or too much generated code. */
725static TranslationBlock *tb_alloc(target_ulong pc)
726{
727 TranslationBlock *tb;
728
729 if (nb_tbs >= code_gen_max_blocks ||
730 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
731 return NULL;
732 tb = &tbs[nb_tbs++];
733 tb->pc = pc;
734 tb->cflags = 0;
735 return tb;
736}
737
738void tb_free(TranslationBlock *tb)
739{
740 /* In practice this is mostly used for single use temporary TB
741 Ignore the hard cases and just back up if this TB happens to
742 be the last one generated. */
743 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
744 code_gen_ptr = tb->tc_ptr;
745 nb_tbs--;
746 }
747}
748
bellard9fa3e852004-01-04 18:06:42 +0000749static inline void invalidate_page_bitmap(PageDesc *p)
750{
751 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500752 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000753 p->code_bitmap = NULL;
754 }
755 p->code_write_count = 0;
756}
757
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800758/* Set to NULL all the 'first_tb' fields in all PageDescs. */
759
760static void page_flush_tb_1 (int level, void **lp)
761{
762 int i;
763
764 if (*lp == NULL) {
765 return;
766 }
767 if (level == 0) {
768 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000769 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800770 pd[i].first_tb = NULL;
771 invalidate_page_bitmap(pd + i);
772 }
773 } else {
774 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000775 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800776 page_flush_tb_1 (level - 1, pp + i);
777 }
778 }
779}
780
bellardfd6ce8f2003-05-14 19:00:11 +0000781static void page_flush_tb(void)
782{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800783 int i;
784 for (i = 0; i < V_L1_SIZE; i++) {
785 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000786 }
787}
788
789/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000790/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100791void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000792{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100793 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000794#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000795 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
796 (unsigned long)(code_gen_ptr - code_gen_buffer),
797 nb_tbs, nb_tbs > 0 ?
798 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000799#endif
bellard26a5f132008-05-28 12:30:31 +0000800 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000801 cpu_abort(env1, "Internal error: code buffer overflow\n");
802
bellardfd6ce8f2003-05-14 19:00:11 +0000803 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000804
bellard6a00d602005-11-21 23:25:50 +0000805 for(env = first_cpu; env != NULL; env = env->next_cpu) {
806 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
807 }
bellard9fa3e852004-01-04 18:06:42 +0000808
bellard8a8a6082004-10-03 13:36:49 +0000809 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000810 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000811
bellardfd6ce8f2003-05-14 19:00:11 +0000812 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000813 /* XXX: flush processor icache at this point if cache flush is
814 expensive */
bellarde3db7222005-01-26 22:00:47 +0000815 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000816}
817
818#ifdef DEBUG_TB_CHECK
819
j_mayerbc98a7e2007-04-04 07:55:12 +0000820static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000821{
822 TranslationBlock *tb;
823 int i;
824 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000825 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
826 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000827 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
828 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000829 printf("ERROR invalidate: address=" TARGET_FMT_lx
830 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000831 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000832 }
833 }
834 }
835}
836
837/* verify that all the pages have correct rights for code */
838static void tb_page_check(void)
839{
840 TranslationBlock *tb;
841 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000842
pbrook99773bd2006-04-16 15:14:59 +0000843 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
844 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000845 flags1 = page_get_flags(tb->pc);
846 flags2 = page_get_flags(tb->pc + tb->size - 1);
847 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
848 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000849 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000850 }
851 }
852 }
853}
854
855#endif
856
857/* invalidate one TB */
858static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
859 int next_offset)
860{
861 TranslationBlock *tb1;
862 for(;;) {
863 tb1 = *ptb;
864 if (tb1 == tb) {
865 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
866 break;
867 }
868 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
869 }
870}
871
bellard9fa3e852004-01-04 18:06:42 +0000872static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
873{
874 TranslationBlock *tb1;
875 unsigned int n1;
876
877 for(;;) {
878 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200879 n1 = (uintptr_t)tb1 & 3;
880 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000881 if (tb1 == tb) {
882 *ptb = tb1->page_next[n1];
883 break;
884 }
885 ptb = &tb1->page_next[n1];
886 }
887}
888
bellardd4e81642003-05-25 16:46:15 +0000889static inline void tb_jmp_remove(TranslationBlock *tb, int n)
890{
891 TranslationBlock *tb1, **ptb;
892 unsigned int n1;
893
894 ptb = &tb->jmp_next[n];
895 tb1 = *ptb;
896 if (tb1) {
897 /* find tb(n) in circular list */
898 for(;;) {
899 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200900 n1 = (uintptr_t)tb1 & 3;
901 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardd4e81642003-05-25 16:46:15 +0000902 if (n1 == n && tb1 == tb)
903 break;
904 if (n1 == 2) {
905 ptb = &tb1->jmp_first;
906 } else {
907 ptb = &tb1->jmp_next[n1];
908 }
909 }
910 /* now we can suppress tb(n) from the list */
911 *ptb = tb->jmp_next[n];
912
913 tb->jmp_next[n] = NULL;
914 }
915}
916
917/* reset the jump entry 'n' of a TB so that it is not chained to
918 another TB */
919static inline void tb_reset_jump(TranslationBlock *tb, int n)
920{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200921 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000922}
923
Paul Brook41c1b1c2010-03-12 16:54:58 +0000924void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000925{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100926 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000927 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000928 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000929 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000930 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000931
bellard9fa3e852004-01-04 18:06:42 +0000932 /* remove the TB from the hash list */
933 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
934 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000935 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000936 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000937
bellard9fa3e852004-01-04 18:06:42 +0000938 /* remove the TB from the page list */
939 if (tb->page_addr[0] != page_addr) {
940 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
941 tb_page_remove(&p->first_tb, tb);
942 invalidate_page_bitmap(p);
943 }
944 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
945 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
946 tb_page_remove(&p->first_tb, tb);
947 invalidate_page_bitmap(p);
948 }
949
bellard8a40a182005-11-20 10:35:40 +0000950 tb_invalidated_flag = 1;
951
952 /* remove the TB from the hash list */
953 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000954 for(env = first_cpu; env != NULL; env = env->next_cpu) {
955 if (env->tb_jmp_cache[h] == tb)
956 env->tb_jmp_cache[h] = NULL;
957 }
bellard8a40a182005-11-20 10:35:40 +0000958
959 /* suppress this TB from the two jump lists */
960 tb_jmp_remove(tb, 0);
961 tb_jmp_remove(tb, 1);
962
963 /* suppress any remaining jumps to this TB */
964 tb1 = tb->jmp_first;
965 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200966 n1 = (uintptr_t)tb1 & 3;
bellard8a40a182005-11-20 10:35:40 +0000967 if (n1 == 2)
968 break;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200969 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000970 tb2 = tb1->jmp_next[n1];
971 tb_reset_jump(tb1, n1);
972 tb1->jmp_next[n1] = NULL;
973 tb1 = tb2;
974 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200975 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +0000976
bellarde3db7222005-01-26 22:00:47 +0000977 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000978}
979
980static inline void set_bits(uint8_t *tab, int start, int len)
981{
982 int end, mask, end1;
983
984 end = start + len;
985 tab += start >> 3;
986 mask = 0xff << (start & 7);
987 if ((start & ~7) == (end & ~7)) {
988 if (start < end) {
989 mask &= ~(0xff << (end & 7));
990 *tab |= mask;
991 }
992 } else {
993 *tab++ |= mask;
994 start = (start + 8) & ~7;
995 end1 = end & ~7;
996 while (start < end1) {
997 *tab++ = 0xff;
998 start += 8;
999 }
1000 if (start < end) {
1001 mask = ~(0xff << (end & 7));
1002 *tab |= mask;
1003 }
1004 }
1005}
1006
1007static void build_page_bitmap(PageDesc *p)
1008{
1009 int n, tb_start, tb_end;
1010 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001011
Anthony Liguori7267c092011-08-20 22:09:37 -05001012 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001013
1014 tb = p->first_tb;
1015 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001016 n = (uintptr_t)tb & 3;
1017 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001018 /* NOTE: this is subtle as a TB may span two physical pages */
1019 if (n == 0) {
1020 /* NOTE: tb_end may be after the end of the page, but
1021 it is not a problem */
1022 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1023 tb_end = tb_start + tb->size;
1024 if (tb_end > TARGET_PAGE_SIZE)
1025 tb_end = TARGET_PAGE_SIZE;
1026 } else {
1027 tb_start = 0;
1028 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1029 }
1030 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1031 tb = tb->page_next[n];
1032 }
1033}
1034
Andreas Färber9349b4f2012-03-14 01:38:32 +01001035TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001036 target_ulong pc, target_ulong cs_base,
1037 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001038{
1039 TranslationBlock *tb;
1040 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001041 tb_page_addr_t phys_pc, phys_page2;
1042 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001043 int code_gen_size;
1044
Paul Brook41c1b1c2010-03-12 16:54:58 +00001045 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001046 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001047 if (!tb) {
1048 /* flush must be done */
1049 tb_flush(env);
1050 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001051 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001052 /* Don't forget to invalidate previous TB info. */
1053 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001054 }
1055 tc_ptr = code_gen_ptr;
1056 tb->tc_ptr = tc_ptr;
1057 tb->cs_base = cs_base;
1058 tb->flags = flags;
1059 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001060 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001061 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1062 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001063
bellardd720b932004-04-25 17:57:43 +00001064 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001065 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001066 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001067 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001068 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001069 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001070 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001071 return tb;
bellardd720b932004-04-25 17:57:43 +00001072}
ths3b46e622007-09-17 08:09:54 +00001073
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001074/*
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001075 * Invalidate all TBs which intersect with the target physical address range
1076 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1077 * 'is_cpu_write_access' should be true if called from a real cpu write
1078 * access: the virtual CPU will exit the current TB if code is modified inside
1079 * this TB.
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001080 */
1081void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1082 int is_cpu_write_access)
1083{
1084 while (start < end) {
1085 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1086 start &= TARGET_PAGE_MASK;
1087 start += TARGET_PAGE_SIZE;
1088 }
1089}
1090
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001091/*
1092 * Invalidate all TBs which intersect with the target physical address range
1093 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1094 * 'is_cpu_write_access' should be true if called from a real cpu write
1095 * access: the virtual CPU will exit the current TB if code is modified inside
1096 * this TB.
1097 */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001098void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001099 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001100{
aliguori6b917542008-11-18 19:46:41 +00001101 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001102 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001103 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001104 PageDesc *p;
1105 int n;
1106#ifdef TARGET_HAS_PRECISE_SMC
1107 int current_tb_not_found = is_cpu_write_access;
1108 TranslationBlock *current_tb = NULL;
1109 int current_tb_modified = 0;
1110 target_ulong current_pc = 0;
1111 target_ulong current_cs_base = 0;
1112 int current_flags = 0;
1113#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001114
1115 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001116 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001117 return;
ths5fafdf22007-09-16 21:08:06 +00001118 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001119 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1120 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001121 /* build code bitmap */
1122 build_page_bitmap(p);
1123 }
1124
1125 /* we remove all the TBs in the range [start, end[ */
1126 /* XXX: see if in some cases it could be faster to invalidate all the code */
1127 tb = p->first_tb;
1128 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001129 n = (uintptr_t)tb & 3;
1130 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001131 tb_next = tb->page_next[n];
1132 /* NOTE: this is subtle as a TB may span two physical pages */
1133 if (n == 0) {
1134 /* NOTE: tb_end may be after the end of the page, but
1135 it is not a problem */
1136 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1137 tb_end = tb_start + tb->size;
1138 } else {
1139 tb_start = tb->page_addr[1];
1140 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1141 }
1142 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001143#ifdef TARGET_HAS_PRECISE_SMC
1144 if (current_tb_not_found) {
1145 current_tb_not_found = 0;
1146 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001147 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001148 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001149 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001150 }
1151 }
1152 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001153 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001154 /* If we are modifying the current TB, we must stop
1155 its execution. We could be more precise by checking
1156 that the modification is after the current PC, but it
1157 would require a specialized function to partially
1158 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001159
bellardd720b932004-04-25 17:57:43 +00001160 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001161 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001162 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1163 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001164 }
1165#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001166 /* we need to do that to handle the case where a signal
1167 occurs while doing tb_phys_invalidate() */
1168 saved_tb = NULL;
1169 if (env) {
1170 saved_tb = env->current_tb;
1171 env->current_tb = NULL;
1172 }
bellard9fa3e852004-01-04 18:06:42 +00001173 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001174 if (env) {
1175 env->current_tb = saved_tb;
1176 if (env->interrupt_request && env->current_tb)
1177 cpu_interrupt(env, env->interrupt_request);
1178 }
bellard9fa3e852004-01-04 18:06:42 +00001179 }
1180 tb = tb_next;
1181 }
1182#if !defined(CONFIG_USER_ONLY)
1183 /* if no code remaining, no need to continue to use slow writes */
1184 if (!p->first_tb) {
1185 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001186 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001187 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001188 }
1189 }
1190#endif
1191#ifdef TARGET_HAS_PRECISE_SMC
1192 if (current_tb_modified) {
1193 /* we generate a block containing just the instruction
1194 modifying the memory. It will ensure that it cannot modify
1195 itself */
bellardea1c1802004-06-14 18:56:36 +00001196 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001197 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001198 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001199 }
1200#endif
1201}
1202
1203/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001204static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001205{
1206 PageDesc *p;
1207 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001208#if 0
bellarda4193c82004-06-03 14:01:43 +00001209 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001210 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1211 cpu_single_env->mem_io_vaddr, len,
1212 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001213 cpu_single_env->eip +
1214 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001215 }
1216#endif
bellard9fa3e852004-01-04 18:06:42 +00001217 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001218 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001219 return;
1220 if (p->code_bitmap) {
1221 offset = start & ~TARGET_PAGE_MASK;
1222 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1223 if (b & ((1 << len) - 1))
1224 goto do_invalidate;
1225 } else {
1226 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001227 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001228 }
1229}
1230
bellard9fa3e852004-01-04 18:06:42 +00001231#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001232static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001233 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001234{
aliguori6b917542008-11-18 19:46:41 +00001235 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001236 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001237 int n;
bellardd720b932004-04-25 17:57:43 +00001238#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001239 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001240 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001241 int current_tb_modified = 0;
1242 target_ulong current_pc = 0;
1243 target_ulong current_cs_base = 0;
1244 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001245#endif
bellard9fa3e852004-01-04 18:06:42 +00001246
1247 addr &= TARGET_PAGE_MASK;
1248 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001249 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001250 return;
1251 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001252#ifdef TARGET_HAS_PRECISE_SMC
1253 if (tb && pc != 0) {
1254 current_tb = tb_find_pc(pc);
1255 }
1256#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001257 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001258 n = (uintptr_t)tb & 3;
1259 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001260#ifdef TARGET_HAS_PRECISE_SMC
1261 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001262 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001263 /* If we are modifying the current TB, we must stop
1264 its execution. We could be more precise by checking
1265 that the modification is after the current PC, but it
1266 would require a specialized function to partially
1267 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001268
bellardd720b932004-04-25 17:57:43 +00001269 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001270 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001271 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1272 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001273 }
1274#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001275 tb_phys_invalidate(tb, addr);
1276 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001277 }
1278 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001279#ifdef TARGET_HAS_PRECISE_SMC
1280 if (current_tb_modified) {
1281 /* we generate a block containing just the instruction
1282 modifying the memory. It will ensure that it cannot modify
1283 itself */
bellardea1c1802004-06-14 18:56:36 +00001284 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001285 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001286 cpu_resume_from_signal(env, puc);
1287 }
1288#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001289}
bellard9fa3e852004-01-04 18:06:42 +00001290#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001291
1292/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001293static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001294 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001295{
1296 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001297#ifndef CONFIG_USER_ONLY
1298 bool page_already_protected;
1299#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001300
bellard9fa3e852004-01-04 18:06:42 +00001301 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001302 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001303 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001304#ifndef CONFIG_USER_ONLY
1305 page_already_protected = p->first_tb != NULL;
1306#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001307 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001308 invalidate_page_bitmap(p);
1309
bellard107db442004-06-22 18:48:46 +00001310#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001311
bellard9fa3e852004-01-04 18:06:42 +00001312#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001313 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001314 target_ulong addr;
1315 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001316 int prot;
1317
bellardfd6ce8f2003-05-14 19:00:11 +00001318 /* force the host page as non writable (writes will have a
1319 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001320 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001321 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001322 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1323 addr += TARGET_PAGE_SIZE) {
1324
1325 p2 = page_find (addr >> TARGET_PAGE_BITS);
1326 if (!p2)
1327 continue;
1328 prot |= p2->flags;
1329 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001330 }
ths5fafdf22007-09-16 21:08:06 +00001331 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001332 (prot & PAGE_BITS) & ~PAGE_WRITE);
1333#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001334 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001335 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001336#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001337 }
bellard9fa3e852004-01-04 18:06:42 +00001338#else
1339 /* if some code is already present, then the pages are already
1340 protected. So we handle the case where only the first TB is
1341 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001342 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001343 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001344 }
1345#endif
bellardd720b932004-04-25 17:57:43 +00001346
1347#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001348}
1349
bellard9fa3e852004-01-04 18:06:42 +00001350/* add a new TB and link it to the physical page tables. phys_page2 is
1351 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001352void tb_link_page(TranslationBlock *tb,
1353 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001354{
bellard9fa3e852004-01-04 18:06:42 +00001355 unsigned int h;
1356 TranslationBlock **ptb;
1357
pbrookc8a706f2008-06-02 16:16:42 +00001358 /* Grab the mmap lock to stop another thread invalidating this TB
1359 before we are done. */
1360 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001361 /* add in the physical hash table */
1362 h = tb_phys_hash_func(phys_pc);
1363 ptb = &tb_phys_hash[h];
1364 tb->phys_hash_next = *ptb;
1365 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001366
1367 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001368 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1369 if (phys_page2 != -1)
1370 tb_alloc_page(tb, 1, phys_page2);
1371 else
1372 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001373
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001374 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001375 tb->jmp_next[0] = NULL;
1376 tb->jmp_next[1] = NULL;
1377
1378 /* init original jump addresses */
1379 if (tb->tb_next_offset[0] != 0xffff)
1380 tb_reset_jump(tb, 0);
1381 if (tb->tb_next_offset[1] != 0xffff)
1382 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001383
1384#ifdef DEBUG_TB_CHECK
1385 tb_page_check();
1386#endif
pbrookc8a706f2008-06-02 16:16:42 +00001387 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001388}
1389
bellarda513fe12003-05-27 23:29:48 +00001390/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1391 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001392TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001393{
1394 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001395 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001396 TranslationBlock *tb;
1397
1398 if (nb_tbs <= 0)
1399 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001400 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1401 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001402 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001403 }
bellarda513fe12003-05-27 23:29:48 +00001404 /* binary search (cf Knuth) */
1405 m_min = 0;
1406 m_max = nb_tbs - 1;
1407 while (m_min <= m_max) {
1408 m = (m_min + m_max) >> 1;
1409 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001410 v = (uintptr_t)tb->tc_ptr;
bellarda513fe12003-05-27 23:29:48 +00001411 if (v == tc_ptr)
1412 return tb;
1413 else if (tc_ptr < v) {
1414 m_max = m - 1;
1415 } else {
1416 m_min = m + 1;
1417 }
ths5fafdf22007-09-16 21:08:06 +00001418 }
bellarda513fe12003-05-27 23:29:48 +00001419 return &tbs[m_max];
1420}
bellard75012672003-06-21 13:11:07 +00001421
bellardea041c02003-06-25 16:16:50 +00001422static void tb_reset_jump_recursive(TranslationBlock *tb);
1423
1424static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1425{
1426 TranslationBlock *tb1, *tb_next, **ptb;
1427 unsigned int n1;
1428
1429 tb1 = tb->jmp_next[n];
1430 if (tb1 != NULL) {
1431 /* find head of list */
1432 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001433 n1 = (uintptr_t)tb1 & 3;
1434 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001435 if (n1 == 2)
1436 break;
1437 tb1 = tb1->jmp_next[n1];
1438 }
1439 /* we are now sure now that tb jumps to tb1 */
1440 tb_next = tb1;
1441
1442 /* remove tb from the jmp_first list */
1443 ptb = &tb_next->jmp_first;
1444 for(;;) {
1445 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001446 n1 = (uintptr_t)tb1 & 3;
1447 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001448 if (n1 == n && tb1 == tb)
1449 break;
1450 ptb = &tb1->jmp_next[n1];
1451 }
1452 *ptb = tb->jmp_next[n];
1453 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001454
bellardea041c02003-06-25 16:16:50 +00001455 /* suppress the jump to next tb in generated code */
1456 tb_reset_jump(tb, n);
1457
bellard01243112004-01-04 15:48:17 +00001458 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001459 tb_reset_jump_recursive(tb_next);
1460 }
1461}
1462
1463static void tb_reset_jump_recursive(TranslationBlock *tb)
1464{
1465 tb_reset_jump_recursive2(tb, 0);
1466 tb_reset_jump_recursive2(tb, 1);
1467}
1468
bellard1fddef42005-04-17 19:16:13 +00001469#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001470#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001471static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001472{
1473 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1474}
1475#else
Avi Kivitya8170e52012-10-23 12:30:10 +02001476void tb_invalidate_phys_addr(hwaddr addr)
bellardd720b932004-04-25 17:57:43 +00001477{
Anthony Liguoric227f092009-10-01 16:12:16 -05001478 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001479 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001480
Avi Kivityac1970f2012-10-03 16:22:53 +02001481 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001482 if (!(memory_region_is_ram(section->mr)
1483 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001484 return;
1485 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001486 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001487 + memory_region_section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001488 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001489}
Max Filippov1e7855a2012-04-10 02:48:17 +04001490
1491static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1492{
Max Filippov9d70c4b2012-05-27 20:21:08 +04001493 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1494 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +04001495}
bellardc27004e2005-01-03 23:35:10 +00001496#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001497#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001498
Paul Brookc527ee82010-03-01 03:31:14 +00001499#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001500void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001501
1502{
1503}
1504
Andreas Färber9349b4f2012-03-14 01:38:32 +01001505int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001506 int flags, CPUWatchpoint **watchpoint)
1507{
1508 return -ENOSYS;
1509}
1510#else
pbrook6658ffb2007-03-16 23:58:11 +00001511/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001512int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001513 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001514{
aliguorib4051332008-11-18 20:14:20 +00001515 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001516 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001517
aliguorib4051332008-11-18 20:14:20 +00001518 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001519 if ((len & (len - 1)) || (addr & ~len_mask) ||
1520 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001521 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1522 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1523 return -EINVAL;
1524 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001525 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001526
aliguoria1d1bb32008-11-18 20:07:32 +00001527 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001528 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001529 wp->flags = flags;
1530
aliguori2dc9f412008-11-18 20:56:59 +00001531 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001532 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001533 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001534 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001535 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001536
pbrook6658ffb2007-03-16 23:58:11 +00001537 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001538
1539 if (watchpoint)
1540 *watchpoint = wp;
1541 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001542}
1543
aliguoria1d1bb32008-11-18 20:07:32 +00001544/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001545int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001546 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001547{
aliguorib4051332008-11-18 20:14:20 +00001548 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001549 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001550
Blue Swirl72cf2d42009-09-12 07:36:22 +00001551 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001552 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001553 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001554 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001555 return 0;
1556 }
1557 }
aliguoria1d1bb32008-11-18 20:07:32 +00001558 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001559}
1560
aliguoria1d1bb32008-11-18 20:07:32 +00001561/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001562void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001563{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001564 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001565
aliguoria1d1bb32008-11-18 20:07:32 +00001566 tlb_flush_page(env, watchpoint->vaddr);
1567
Anthony Liguori7267c092011-08-20 22:09:37 -05001568 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001569}
1570
aliguoria1d1bb32008-11-18 20:07:32 +00001571/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001572void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001573{
aliguoric0ce9982008-11-25 22:13:57 +00001574 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001575
Blue Swirl72cf2d42009-09-12 07:36:22 +00001576 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001577 if (wp->flags & mask)
1578 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001579 }
aliguoria1d1bb32008-11-18 20:07:32 +00001580}
Paul Brookc527ee82010-03-01 03:31:14 +00001581#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001582
1583/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001584int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001585 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001586{
bellard1fddef42005-04-17 19:16:13 +00001587#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001588 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001589
Anthony Liguori7267c092011-08-20 22:09:37 -05001590 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001591
1592 bp->pc = pc;
1593 bp->flags = flags;
1594
aliguori2dc9f412008-11-18 20:56:59 +00001595 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001596 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001597 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001598 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001599 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001600
1601 breakpoint_invalidate(env, pc);
1602
1603 if (breakpoint)
1604 *breakpoint = bp;
1605 return 0;
1606#else
1607 return -ENOSYS;
1608#endif
1609}
1610
1611/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001612int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001613{
1614#if defined(TARGET_HAS_ICE)
1615 CPUBreakpoint *bp;
1616
Blue Swirl72cf2d42009-09-12 07:36:22 +00001617 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001618 if (bp->pc == pc && bp->flags == flags) {
1619 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001620 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001621 }
bellard4c3a88a2003-07-26 12:06:08 +00001622 }
aliguoria1d1bb32008-11-18 20:07:32 +00001623 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001624#else
aliguoria1d1bb32008-11-18 20:07:32 +00001625 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001626#endif
1627}
1628
aliguoria1d1bb32008-11-18 20:07:32 +00001629/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001630void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001631{
bellard1fddef42005-04-17 19:16:13 +00001632#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001633 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001634
aliguoria1d1bb32008-11-18 20:07:32 +00001635 breakpoint_invalidate(env, breakpoint->pc);
1636
Anthony Liguori7267c092011-08-20 22:09:37 -05001637 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001638#endif
1639}
1640
1641/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001642void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001643{
1644#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001645 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001646
Blue Swirl72cf2d42009-09-12 07:36:22 +00001647 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001648 if (bp->flags & mask)
1649 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001650 }
bellard4c3a88a2003-07-26 12:06:08 +00001651#endif
1652}
1653
bellardc33a3462003-07-29 20:50:33 +00001654/* enable or disable single step mode. EXCP_DEBUG is returned by the
1655 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001656void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001657{
bellard1fddef42005-04-17 19:16:13 +00001658#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001659 if (env->singlestep_enabled != enabled) {
1660 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001661 if (kvm_enabled())
1662 kvm_update_guest_debug(env, 0);
1663 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001664 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001665 /* XXX: only flush what is necessary */
1666 tb_flush(env);
1667 }
bellardc33a3462003-07-29 20:50:33 +00001668 }
1669#endif
1670}
1671
Andreas Färber9349b4f2012-03-14 01:38:32 +01001672static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001673{
pbrookd5975362008-06-07 20:50:51 +00001674 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1675 problem and hope the cpu will stop of its own accord. For userspace
1676 emulation this often isn't actually as bad as it sounds. Often
1677 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001678 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001679 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001680
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001681 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001682 tb = env->current_tb;
1683 /* if the cpu is currently executing code, we must unlink it and
1684 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001685 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001686 env->current_tb = NULL;
1687 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001688 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001689 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001690}
1691
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001692#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001693/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001694static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001695{
Andreas Färber60e82572012-05-02 22:23:49 +02001696 CPUState *cpu = ENV_GET_CPU(env);
aurel323098dba2009-03-07 21:28:24 +00001697 int old_mask;
1698
1699 old_mask = env->interrupt_request;
1700 env->interrupt_request |= mask;
1701
aliguori8edac962009-04-24 18:03:45 +00001702 /*
1703 * If called from iothread context, wake the target cpu in
1704 * case its halted.
1705 */
Andreas Färber60e82572012-05-02 22:23:49 +02001706 if (!qemu_cpu_is_self(cpu)) {
Andreas Färberc08d7422012-05-03 04:34:15 +02001707 qemu_cpu_kick(cpu);
aliguori8edac962009-04-24 18:03:45 +00001708 return;
1709 }
aliguori8edac962009-04-24 18:03:45 +00001710
pbrook2e70f6e2008-06-29 01:03:05 +00001711 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001712 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001713 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001714 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001715 cpu_abort(env, "Raised interrupt while not in I/O function");
1716 }
pbrook2e70f6e2008-06-29 01:03:05 +00001717 } else {
aurel323098dba2009-03-07 21:28:24 +00001718 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001719 }
1720}
1721
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001722CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1723
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001724#else /* CONFIG_USER_ONLY */
1725
Andreas Färber9349b4f2012-03-14 01:38:32 +01001726void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001727{
1728 env->interrupt_request |= mask;
1729 cpu_unlink_tb(env);
1730}
1731#endif /* CONFIG_USER_ONLY */
1732
Andreas Färber9349b4f2012-03-14 01:38:32 +01001733void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001734{
1735 env->interrupt_request &= ~mask;
1736}
1737
Andreas Färber9349b4f2012-03-14 01:38:32 +01001738void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001739{
1740 env->exit_request = 1;
1741 cpu_unlink_tb(env);
1742}
1743
Andreas Färber9349b4f2012-03-14 01:38:32 +01001744void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001745{
1746 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001747 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001748
1749 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001750 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001751 fprintf(stderr, "qemu: fatal: ");
1752 vfprintf(stderr, fmt, ap);
1753 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001754 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +00001755 if (qemu_log_enabled()) {
1756 qemu_log("qemu: fatal: ");
1757 qemu_log_vprintf(fmt, ap2);
1758 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001759 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +00001760 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001761 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001762 }
pbrook493ae1f2007-11-23 16:53:59 +00001763 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001764 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001765#if defined(CONFIG_USER_ONLY)
1766 {
1767 struct sigaction act;
1768 sigfillset(&act.sa_mask);
1769 act.sa_handler = SIG_DFL;
1770 sigaction(SIGABRT, &act, NULL);
1771 }
1772#endif
bellard75012672003-06-21 13:11:07 +00001773 abort();
1774}
1775
Andreas Färber9349b4f2012-03-14 01:38:32 +01001776CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001777{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001778 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1779 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001780 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001781#if defined(TARGET_HAS_ICE)
1782 CPUBreakpoint *bp;
1783 CPUWatchpoint *wp;
1784#endif
1785
Andreas Färber9349b4f2012-03-14 01:38:32 +01001786 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001787
1788 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001789 new_env->next_cpu = next_cpu;
1790 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001791
1792 /* Clone all break/watchpoints.
1793 Note: Once we support ptrace with hw-debug register access, make sure
1794 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001795 QTAILQ_INIT(&env->breakpoints);
1796 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001797#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001798 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001799 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1800 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001801 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001802 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1803 wp->flags, NULL);
1804 }
1805#endif
1806
thsc5be9f02007-02-28 20:20:53 +00001807 return new_env;
1808}
1809
bellard01243112004-01-04 15:48:17 +00001810#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001811void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001812{
1813 unsigned int i;
1814
1815 /* Discard jump cache entries for any tb which might potentially
1816 overlap the flushed page. */
1817 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1818 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001819 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001820
1821 i = tb_jmp_cache_hash_page(addr);
1822 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001823 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001824}
1825
Juan Quintelad24981d2012-05-22 00:42:40 +02001826static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1827 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +00001828{
Juan Quintelad24981d2012-05-22 00:42:40 +02001829 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +00001830
bellard1ccde1c2004-02-06 19:46:14 +00001831 /* we modify the TLB cache so that the dirty bit will be set again
1832 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001833 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001834 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001835 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001836 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001837 != (end - 1) - start) {
1838 abort();
1839 }
Blue Swirle5548612012-04-21 13:08:33 +00001840 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001841
1842}
1843
1844/* Note: start and end must be within the same ram block. */
1845void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1846 int dirty_flags)
1847{
1848 uintptr_t length;
1849
1850 start &= TARGET_PAGE_MASK;
1851 end = TARGET_PAGE_ALIGN(end);
1852
1853 length = end - start;
1854 if (length == 0)
1855 return;
1856 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1857
1858 if (tcg_enabled()) {
1859 tlb_reset_dirty_range_all(start, end, length);
1860 }
bellard1ccde1c2004-02-06 19:46:14 +00001861}
1862
aliguori74576192008-10-06 14:02:03 +00001863int cpu_physical_memory_set_dirty_tracking(int enable)
1864{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001865 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001866 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001867 return ret;
aliguori74576192008-10-06 14:02:03 +00001868}
1869
Avi Kivitya8170e52012-10-23 12:30:10 +02001870hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +00001871 MemoryRegionSection *section,
1872 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +02001873 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +00001874 int prot,
1875 target_ulong *address)
1876{
Avi Kivitya8170e52012-10-23 12:30:10 +02001877 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001878 CPUWatchpoint *wp;
1879
Blue Swirlcc5bea62012-04-14 14:56:48 +00001880 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001881 /* Normal RAM. */
1882 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001883 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001884 if (!section->readonly) {
1885 iotlb |= phys_section_notdirty;
1886 } else {
1887 iotlb |= phys_section_rom;
1888 }
1889 } else {
1890 /* IO handlers are currently passed a physical address.
1891 It would be nice to pass an offset from the base address
1892 of that region. This would avoid having to special case RAM,
1893 and avoid full address decoding in every device.
1894 We can't use the high bits of pd for this because
1895 IO_MEM_ROMD uses these as a ram address. */
1896 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001897 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001898 }
1899
1900 /* Make accesses to pages with watchpoints go via the
1901 watchpoint trap routines. */
1902 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1903 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1904 /* Avoid trapping reads of pages with a write breakpoint. */
1905 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1906 iotlb = phys_section_watch + paddr;
1907 *address |= TLB_MMIO;
1908 break;
1909 }
1910 }
1911 }
1912
1913 return iotlb;
1914}
1915
bellard01243112004-01-04 15:48:17 +00001916#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001917/*
1918 * Walks guest process memory "regions" one by one
1919 * and calls callback function 'fn' for each region.
1920 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001921
1922struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00001923{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001924 walk_memory_regions_fn fn;
1925 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001926 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001927 int prot;
1928};
bellard9fa3e852004-01-04 18:06:42 +00001929
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001930static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001931 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001932{
1933 if (data->start != -1ul) {
1934 int rc = data->fn(data->priv, data->start, end, data->prot);
1935 if (rc != 0) {
1936 return rc;
bellard9fa3e852004-01-04 18:06:42 +00001937 }
bellard33417e72003-08-10 21:47:01 +00001938 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001939
1940 data->start = (new_prot ? end : -1ul);
1941 data->prot = new_prot;
1942
1943 return 0;
1944}
1945
1946static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001947 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001948{
Paul Brookb480d9b2010-03-12 23:23:29 +00001949 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001950 int i, rc;
1951
1952 if (*lp == NULL) {
1953 return walk_memory_regions_end(data, base, 0);
1954 }
1955
1956 if (level == 0) {
1957 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001958 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001959 int prot = pd[i].flags;
1960
1961 pa = base | (i << TARGET_PAGE_BITS);
1962 if (prot != data->prot) {
1963 rc = walk_memory_regions_end(data, pa, prot);
1964 if (rc != 0) {
1965 return rc;
1966 }
1967 }
1968 }
1969 } else {
1970 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001971 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001972 pa = base | ((abi_ulong)i <<
1973 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001974 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1975 if (rc != 0) {
1976 return rc;
1977 }
1978 }
1979 }
1980
1981 return 0;
1982}
1983
1984int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1985{
1986 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001987 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001988
1989 data.fn = fn;
1990 data.priv = priv;
1991 data.start = -1ul;
1992 data.prot = 0;
1993
1994 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001995 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001996 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1997 if (rc != 0) {
1998 return rc;
1999 }
2000 }
2001
2002 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002003}
2004
Paul Brookb480d9b2010-03-12 23:23:29 +00002005static int dump_region(void *priv, abi_ulong start,
2006 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002007{
2008 FILE *f = (FILE *)priv;
2009
Paul Brookb480d9b2010-03-12 23:23:29 +00002010 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2011 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002012 start, end, end - start,
2013 ((prot & PAGE_READ) ? 'r' : '-'),
2014 ((prot & PAGE_WRITE) ? 'w' : '-'),
2015 ((prot & PAGE_EXEC) ? 'x' : '-'));
2016
2017 return (0);
2018}
2019
2020/* dump memory mappings */
2021void page_dump(FILE *f)
2022{
2023 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2024 "start", "end", "size", "prot");
2025 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002026}
2027
pbrook53a59602006-03-25 19:31:22 +00002028int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002029{
bellard9fa3e852004-01-04 18:06:42 +00002030 PageDesc *p;
2031
2032 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002033 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002034 return 0;
2035 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002036}
2037
Richard Henderson376a7902010-03-10 15:57:04 -08002038/* Modify the flags of a page and invalidate the code if necessary.
2039 The flag PAGE_WRITE_ORG is positioned automatically depending
2040 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002041void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002042{
Richard Henderson376a7902010-03-10 15:57:04 -08002043 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002044
Richard Henderson376a7902010-03-10 15:57:04 -08002045 /* This function should never be called with addresses outside the
2046 guest address space. If this assert fires, it probably indicates
2047 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002048#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2049 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002050#endif
2051 assert(start < end);
2052
bellard9fa3e852004-01-04 18:06:42 +00002053 start = start & TARGET_PAGE_MASK;
2054 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002055
2056 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002057 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002058 }
2059
2060 for (addr = start, len = end - start;
2061 len != 0;
2062 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2063 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2064
2065 /* If the write protection bit is set, then we invalidate
2066 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002067 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002068 (flags & PAGE_WRITE) &&
2069 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002070 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002071 }
2072 p->flags = flags;
2073 }
bellard9fa3e852004-01-04 18:06:42 +00002074}
2075
ths3d97b402007-11-02 19:02:07 +00002076int page_check_range(target_ulong start, target_ulong len, int flags)
2077{
2078 PageDesc *p;
2079 target_ulong end;
2080 target_ulong addr;
2081
Richard Henderson376a7902010-03-10 15:57:04 -08002082 /* This function should never be called with addresses outside the
2083 guest address space. If this assert fires, it probably indicates
2084 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002085#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2086 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002087#endif
2088
Richard Henderson3e0650a2010-03-29 10:54:42 -07002089 if (len == 0) {
2090 return 0;
2091 }
Richard Henderson376a7902010-03-10 15:57:04 -08002092 if (start + len - 1 < start) {
2093 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002094 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002095 }
balrog55f280c2008-10-28 10:24:11 +00002096
ths3d97b402007-11-02 19:02:07 +00002097 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2098 start = start & TARGET_PAGE_MASK;
2099
Richard Henderson376a7902010-03-10 15:57:04 -08002100 for (addr = start, len = end - start;
2101 len != 0;
2102 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002103 p = page_find(addr >> TARGET_PAGE_BITS);
2104 if( !p )
2105 return -1;
2106 if( !(p->flags & PAGE_VALID) )
2107 return -1;
2108
bellarddae32702007-11-14 10:51:00 +00002109 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002110 return -1;
bellarddae32702007-11-14 10:51:00 +00002111 if (flags & PAGE_WRITE) {
2112 if (!(p->flags & PAGE_WRITE_ORG))
2113 return -1;
2114 /* unprotect the page if it was put read-only because it
2115 contains translated code */
2116 if (!(p->flags & PAGE_WRITE)) {
2117 if (!page_unprotect(addr, 0, NULL))
2118 return -1;
2119 }
2120 return 0;
2121 }
ths3d97b402007-11-02 19:02:07 +00002122 }
2123 return 0;
2124}
2125
bellard9fa3e852004-01-04 18:06:42 +00002126/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002127 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002128int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002129{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002130 unsigned int prot;
2131 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002132 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002133
pbrookc8a706f2008-06-02 16:16:42 +00002134 /* Technically this isn't safe inside a signal handler. However we
2135 know this only ever happens in a synchronous SEGV handler, so in
2136 practice it seems to be ok. */
2137 mmap_lock();
2138
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002139 p = page_find(address >> TARGET_PAGE_BITS);
2140 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002141 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002142 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002143 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002144
bellard9fa3e852004-01-04 18:06:42 +00002145 /* if the page was really writable, then we change its
2146 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002147 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2148 host_start = address & qemu_host_page_mask;
2149 host_end = host_start + qemu_host_page_size;
2150
2151 prot = 0;
2152 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2153 p = page_find(addr >> TARGET_PAGE_BITS);
2154 p->flags |= PAGE_WRITE;
2155 prot |= p->flags;
2156
bellard9fa3e852004-01-04 18:06:42 +00002157 /* and since the content will be modified, we must invalidate
2158 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002159 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002160#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002161 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002162#endif
bellard9fa3e852004-01-04 18:06:42 +00002163 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002164 mprotect((void *)g2h(host_start), qemu_host_page_size,
2165 prot & PAGE_BITS);
2166
2167 mmap_unlock();
2168 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002169 }
pbrookc8a706f2008-06-02 16:16:42 +00002170 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002171 return 0;
2172}
bellard9fa3e852004-01-04 18:06:42 +00002173#endif /* defined(CONFIG_USER_ONLY) */
2174
pbrooke2eef172008-06-08 01:09:01 +00002175#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002176
Paul Brookc04b2b72010-03-01 03:31:14 +00002177#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2178typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002179 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +02002180 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002181 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002182} subpage_t;
2183
Anthony Liguoric227f092009-10-01 16:12:16 -05002184static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002185 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +02002186static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002187static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002188{
Avi Kivity5312bd82012-02-12 18:32:55 +02002189 MemoryRegionSection *section = &phys_sections[section_index];
2190 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002191
2192 if (mr->subpage) {
2193 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2194 memory_region_destroy(&subpage->iomem);
2195 g_free(subpage);
2196 }
2197}
2198
Avi Kivity4346ae32012-02-10 17:00:01 +02002199static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002200{
2201 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002202 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002203
Avi Kivityc19e8802012-02-13 20:25:31 +02002204 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002205 return;
2206 }
2207
Avi Kivityc19e8802012-02-13 20:25:31 +02002208 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002209 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002210 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002211 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002212 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002213 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002214 }
Avi Kivity54688b12012-02-09 17:34:32 +02002215 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002216 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002217 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002218}
2219
Avi Kivityac1970f2012-10-03 16:22:53 +02002220static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +02002221{
Avi Kivityac1970f2012-10-03 16:22:53 +02002222 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002223 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002224}
2225
Avi Kivity5312bd82012-02-12 18:32:55 +02002226static uint16_t phys_section_add(MemoryRegionSection *section)
2227{
2228 if (phys_sections_nb == phys_sections_nb_alloc) {
2229 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2230 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2231 phys_sections_nb_alloc);
2232 }
2233 phys_sections[phys_sections_nb] = *section;
2234 return phys_sections_nb++;
2235}
2236
2237static void phys_sections_clear(void)
2238{
2239 phys_sections_nb = 0;
2240}
2241
Avi Kivityac1970f2012-10-03 16:22:53 +02002242static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02002243{
2244 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02002245 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02002246 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +02002247 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002248 MemoryRegionSection subsection = {
2249 .offset_within_address_space = base,
2250 .size = TARGET_PAGE_SIZE,
2251 };
Avi Kivitya8170e52012-10-23 12:30:10 +02002252 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02002253
Avi Kivityf3705d52012-03-08 16:16:34 +02002254 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002255
Avi Kivityf3705d52012-03-08 16:16:34 +02002256 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002257 subpage = subpage_init(base);
2258 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02002259 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +02002260 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002261 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002262 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002263 }
2264 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -04002265 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +02002266 subpage_register(subpage, start, end, phys_section_add(section));
2267}
2268
2269
Avi Kivityac1970f2012-10-03 16:22:53 +02002270static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002271{
Avi Kivitya8170e52012-10-23 12:30:10 +02002272 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +02002273 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +02002274 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002275 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002276
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002277 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002278
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002279 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +02002280 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +02002281 section_index);
bellard33417e72003-08-10 21:47:01 +00002282}
2283
Avi Kivityac1970f2012-10-03 16:22:53 +02002284static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02002285{
Avi Kivityac1970f2012-10-03 16:22:53 +02002286 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002287 MemoryRegionSection now = *section, remain = *section;
2288
2289 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2290 || (now.size < TARGET_PAGE_SIZE)) {
2291 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2292 - now.offset_within_address_space,
2293 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02002294 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002295 remain.size -= now.size;
2296 remain.offset_within_address_space += now.size;
2297 remain.offset_within_region += now.size;
2298 }
Tyler Hall69b67642012-07-25 18:45:04 -04002299 while (remain.size >= TARGET_PAGE_SIZE) {
2300 now = remain;
2301 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
2302 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +02002303 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04002304 } else {
2305 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +02002306 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04002307 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02002308 remain.size -= now.size;
2309 remain.offset_within_address_space += now.size;
2310 remain.offset_within_region += now.size;
2311 }
2312 now = remain;
2313 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002314 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002315 }
2316}
2317
Sheng Yang62a27442010-01-26 19:21:16 +08002318void qemu_flush_coalesced_mmio_buffer(void)
2319{
2320 if (kvm_enabled())
2321 kvm_flush_coalesced_mmio_buffer();
2322}
2323
Marcelo Tosattic9027602010-03-01 20:25:08 -03002324#if defined(__linux__) && !defined(TARGET_S390X)
2325
2326#include <sys/vfs.h>
2327
2328#define HUGETLBFS_MAGIC 0x958458f6
2329
2330static long gethugepagesize(const char *path)
2331{
2332 struct statfs fs;
2333 int ret;
2334
2335 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002336 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002337 } while (ret != 0 && errno == EINTR);
2338
2339 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002340 perror(path);
2341 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002342 }
2343
2344 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002345 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002346
2347 return fs.f_bsize;
2348}
2349
Alex Williamson04b16652010-07-02 11:13:17 -06002350static void *file_ram_alloc(RAMBlock *block,
2351 ram_addr_t memory,
2352 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002353{
2354 char *filename;
2355 void *area;
2356 int fd;
2357#ifdef MAP_POPULATE
2358 int flags;
2359#endif
2360 unsigned long hpagesize;
2361
2362 hpagesize = gethugepagesize(path);
2363 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002364 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002365 }
2366
2367 if (memory < hpagesize) {
2368 return NULL;
2369 }
2370
2371 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2372 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2373 return NULL;
2374 }
2375
2376 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002377 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002378 }
2379
2380 fd = mkstemp(filename);
2381 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002382 perror("unable to create backing store for hugepages");
2383 free(filename);
2384 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002385 }
2386 unlink(filename);
2387 free(filename);
2388
2389 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2390
2391 /*
2392 * ftruncate is not supported by hugetlbfs in older
2393 * hosts, so don't bother bailing out on errors.
2394 * If anything goes wrong with it under other filesystems,
2395 * mmap will fail.
2396 */
2397 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002398 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002399
2400#ifdef MAP_POPULATE
2401 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2402 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2403 * to sidestep this quirk.
2404 */
2405 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2406 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2407#else
2408 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2409#endif
2410 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002411 perror("file_ram_alloc: can't mmap RAM pages");
2412 close(fd);
2413 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002414 }
Alex Williamson04b16652010-07-02 11:13:17 -06002415 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002416 return area;
2417}
2418#endif
2419
Alex Williamsond17b5282010-06-25 11:08:38 -06002420static ram_addr_t find_ram_offset(ram_addr_t size)
2421{
Alex Williamson04b16652010-07-02 11:13:17 -06002422 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002423 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002424
2425 if (QLIST_EMPTY(&ram_list.blocks))
2426 return 0;
2427
2428 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002429 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002430
2431 end = block->offset + block->length;
2432
2433 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2434 if (next_block->offset >= end) {
2435 next = MIN(next, next_block->offset);
2436 }
2437 }
2438 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002439 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002440 mingap = next - end;
2441 }
2442 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002443
2444 if (offset == RAM_ADDR_MAX) {
2445 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2446 (uint64_t)size);
2447 abort();
2448 }
2449
Alex Williamson04b16652010-07-02 11:13:17 -06002450 return offset;
2451}
2452
Juan Quintela652d7ec2012-07-20 10:37:54 +02002453ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06002454{
Alex Williamsond17b5282010-06-25 11:08:38 -06002455 RAMBlock *block;
2456 ram_addr_t last = 0;
2457
2458 QLIST_FOREACH(block, &ram_list.blocks, next)
2459 last = MAX(last, block->offset + block->length);
2460
2461 return last;
2462}
2463
Jason Baronddb97f12012-08-02 15:44:16 -04002464static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2465{
2466 int ret;
2467 QemuOpts *machine_opts;
2468
2469 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2470 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2471 if (machine_opts &&
2472 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2473 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2474 if (ret) {
2475 perror("qemu_madvise");
2476 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2477 "but dump_guest_core=off specified\n");
2478 }
2479 }
2480}
2481
Avi Kivityc5705a72011-12-20 15:59:12 +02002482void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002483{
2484 RAMBlock *new_block, *block;
2485
Avi Kivityc5705a72011-12-20 15:59:12 +02002486 new_block = NULL;
2487 QLIST_FOREACH(block, &ram_list.blocks, next) {
2488 if (block->offset == addr) {
2489 new_block = block;
2490 break;
2491 }
2492 }
2493 assert(new_block);
2494 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002495
Anthony Liguori09e5ab62012-02-03 12:28:43 -06002496 if (dev) {
2497 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002498 if (id) {
2499 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002500 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002501 }
2502 }
2503 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2504
2505 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002506 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002507 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2508 new_block->idstr);
2509 abort();
2510 }
2511 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002512}
2513
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002514static int memory_try_enable_merging(void *addr, size_t len)
2515{
2516 QemuOpts *opts;
2517
2518 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2519 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
2520 /* disabled by the user */
2521 return 0;
2522 }
2523
2524 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
2525}
2526
Avi Kivityc5705a72011-12-20 15:59:12 +02002527ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2528 MemoryRegion *mr)
2529{
2530 RAMBlock *new_block;
2531
2532 size = TARGET_PAGE_ALIGN(size);
2533 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002534
Avi Kivity7c637362011-12-21 13:09:49 +02002535 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002536 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002537 if (host) {
2538 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002539 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002540 } else {
2541 if (mem_path) {
2542#if defined (__linux__) && !defined(TARGET_S390X)
2543 new_block->host = file_ram_alloc(new_block, size, mem_path);
2544 if (!new_block->host) {
2545 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002546 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002547 }
2548#else
2549 fprintf(stderr, "-mem-path option unsupported\n");
2550 exit(1);
2551#endif
2552 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02002553 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002554 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00002555 } else if (kvm_enabled()) {
2556 /* some s390/kvm configurations have special constraints */
2557 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01002558 } else {
2559 new_block->host = qemu_vmalloc(size);
2560 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002561 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002562 }
2563 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002564 new_block->length = size;
2565
2566 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2567
Anthony Liguori7267c092011-08-20 22:09:37 -05002568 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002569 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04002570 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2571 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02002572 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002573
Jason Baronddb97f12012-08-02 15:44:16 -04002574 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03002575 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04002576
Cam Macdonell84b89d72010-07-26 18:10:57 -06002577 if (kvm_enabled())
2578 kvm_setup_guest_memory(new_block->host, size);
2579
2580 return new_block->offset;
2581}
2582
Avi Kivityc5705a72011-12-20 15:59:12 +02002583ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002584{
Avi Kivityc5705a72011-12-20 15:59:12 +02002585 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002586}
bellarde9a1ab12007-02-08 23:08:38 +00002587
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002588void qemu_ram_free_from_ptr(ram_addr_t addr)
2589{
2590 RAMBlock *block;
2591
2592 QLIST_FOREACH(block, &ram_list.blocks, next) {
2593 if (addr == block->offset) {
2594 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002595 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002596 return;
2597 }
2598 }
2599}
2600
Anthony Liguoric227f092009-10-01 16:12:16 -05002601void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002602{
Alex Williamson04b16652010-07-02 11:13:17 -06002603 RAMBlock *block;
2604
2605 QLIST_FOREACH(block, &ram_list.blocks, next) {
2606 if (addr == block->offset) {
2607 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002608 if (block->flags & RAM_PREALLOC_MASK) {
2609 ;
2610 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002611#if defined (__linux__) && !defined(TARGET_S390X)
2612 if (block->fd) {
2613 munmap(block->host, block->length);
2614 close(block->fd);
2615 } else {
2616 qemu_vfree(block->host);
2617 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002618#else
2619 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002620#endif
2621 } else {
2622#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2623 munmap(block->host, block->length);
2624#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002625 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002626 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002627 } else {
2628 qemu_vfree(block->host);
2629 }
Alex Williamson04b16652010-07-02 11:13:17 -06002630#endif
2631 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002632 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002633 return;
2634 }
2635 }
2636
bellarde9a1ab12007-02-08 23:08:38 +00002637}
2638
Huang Yingcd19cfa2011-03-02 08:56:19 +01002639#ifndef _WIN32
2640void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2641{
2642 RAMBlock *block;
2643 ram_addr_t offset;
2644 int flags;
2645 void *area, *vaddr;
2646
2647 QLIST_FOREACH(block, &ram_list.blocks, next) {
2648 offset = addr - block->offset;
2649 if (offset < block->length) {
2650 vaddr = block->host + offset;
2651 if (block->flags & RAM_PREALLOC_MASK) {
2652 ;
2653 } else {
2654 flags = MAP_FIXED;
2655 munmap(vaddr, length);
2656 if (mem_path) {
2657#if defined(__linux__) && !defined(TARGET_S390X)
2658 if (block->fd) {
2659#ifdef MAP_POPULATE
2660 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2661 MAP_PRIVATE;
2662#else
2663 flags |= MAP_PRIVATE;
2664#endif
2665 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2666 flags, block->fd, offset);
2667 } else {
2668 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2669 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2670 flags, -1, 0);
2671 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002672#else
2673 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002674#endif
2675 } else {
2676#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2677 flags |= MAP_SHARED | MAP_ANONYMOUS;
2678 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2679 flags, -1, 0);
2680#else
2681 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2682 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2683 flags, -1, 0);
2684#endif
2685 }
2686 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002687 fprintf(stderr, "Could not remap addr: "
2688 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002689 length, addr);
2690 exit(1);
2691 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002692 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04002693 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002694 }
2695 return;
2696 }
2697 }
2698}
2699#endif /* !_WIN32 */
2700
pbrookdc828ca2009-04-09 22:21:07 +00002701/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002702 With the exception of the softmmu code in this file, this should
2703 only be used for local memory (e.g. video ram) that the device owns,
2704 and knows it isn't going to access beyond the end of the block.
2705
2706 It should not be used for general purpose DMA.
2707 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2708 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002709void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002710{
pbrook94a6b542009-04-11 17:15:54 +00002711 RAMBlock *block;
2712
Alex Williamsonf471a172010-06-11 11:11:42 -06002713 QLIST_FOREACH(block, &ram_list.blocks, next) {
2714 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002715 /* Move this entry to to start of the list. */
2716 if (block != QLIST_FIRST(&ram_list.blocks)) {
2717 QLIST_REMOVE(block, next);
2718 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2719 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002720 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002721 /* We need to check if the requested address is in the RAM
2722 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002723 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002724 */
2725 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002726 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002727 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002728 block->host =
2729 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002730 }
2731 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002732 return block->host + (addr - block->offset);
2733 }
pbrook94a6b542009-04-11 17:15:54 +00002734 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002735
2736 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2737 abort();
2738
2739 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002740}
2741
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002742/* Return a host pointer to ram allocated with qemu_ram_alloc.
2743 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2744 */
2745void *qemu_safe_ram_ptr(ram_addr_t addr)
2746{
2747 RAMBlock *block;
2748
2749 QLIST_FOREACH(block, &ram_list.blocks, next) {
2750 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002751 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002752 /* We need to check if the requested address is in the RAM
2753 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002754 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002755 */
2756 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002757 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002758 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002759 block->host =
2760 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002761 }
2762 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002763 return block->host + (addr - block->offset);
2764 }
2765 }
2766
2767 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2768 abort();
2769
2770 return NULL;
2771}
2772
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002773/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2774 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002775void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002776{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002777 if (*size == 0) {
2778 return NULL;
2779 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002780 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002781 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002782 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002783 RAMBlock *block;
2784
2785 QLIST_FOREACH(block, &ram_list.blocks, next) {
2786 if (addr - block->offset < block->length) {
2787 if (addr - block->offset + *size > block->length)
2788 *size = block->length - addr + block->offset;
2789 return block->host + (addr - block->offset);
2790 }
2791 }
2792
2793 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2794 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002795 }
2796}
2797
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002798void qemu_put_ram_ptr(void *addr)
2799{
2800 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002801}
2802
Marcelo Tosattie8902612010-10-11 15:31:19 -03002803int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002804{
pbrook94a6b542009-04-11 17:15:54 +00002805 RAMBlock *block;
2806 uint8_t *host = ptr;
2807
Jan Kiszka868bb332011-06-21 22:59:09 +02002808 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002809 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002810 return 0;
2811 }
2812
Alex Williamsonf471a172010-06-11 11:11:42 -06002813 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002814 /* This case append when the block is not mapped. */
2815 if (block->host == NULL) {
2816 continue;
2817 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002818 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002819 *ram_addr = block->offset + (host - block->host);
2820 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002821 }
pbrook94a6b542009-04-11 17:15:54 +00002822 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002823
Marcelo Tosattie8902612010-10-11 15:31:19 -03002824 return -1;
2825}
Alex Williamsonf471a172010-06-11 11:11:42 -06002826
Marcelo Tosattie8902612010-10-11 15:31:19 -03002827/* Some of the softmmu routines need to translate from a host pointer
2828 (typically a TLB entry) back to a ram offset. */
2829ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2830{
2831 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002832
Marcelo Tosattie8902612010-10-11 15:31:19 -03002833 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2834 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2835 abort();
2836 }
2837 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002838}
2839
Avi Kivitya8170e52012-10-23 12:30:10 +02002840static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002841 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002842{
pbrook67d3b952006-12-18 05:03:52 +00002843#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002844 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002845#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002846#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002847 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002848#endif
2849 return 0;
2850}
2851
Avi Kivitya8170e52012-10-23 12:30:10 +02002852static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002853 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002854{
2855#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002856 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002857#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002858#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002859 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002860#endif
2861}
2862
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002863static const MemoryRegionOps unassigned_mem_ops = {
2864 .read = unassigned_mem_read,
2865 .write = unassigned_mem_write,
2866 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002867};
2868
Avi Kivitya8170e52012-10-23 12:30:10 +02002869static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002870 unsigned size)
2871{
2872 abort();
2873}
2874
Avi Kivitya8170e52012-10-23 12:30:10 +02002875static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002876 uint64_t value, unsigned size)
2877{
2878 abort();
2879}
2880
2881static const MemoryRegionOps error_mem_ops = {
2882 .read = error_mem_read,
2883 .write = error_mem_write,
2884 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002885};
2886
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002887static const MemoryRegionOps rom_mem_ops = {
2888 .read = error_mem_read,
2889 .write = unassigned_mem_write,
2890 .endianness = DEVICE_NATIVE_ENDIAN,
2891};
2892
Avi Kivitya8170e52012-10-23 12:30:10 +02002893static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002894 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002895{
bellard3a7d9292005-08-21 09:26:42 +00002896 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002897 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002898 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2899#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002900 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002901 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002902#endif
2903 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002904 switch (size) {
2905 case 1:
2906 stb_p(qemu_get_ram_ptr(ram_addr), val);
2907 break;
2908 case 2:
2909 stw_p(qemu_get_ram_ptr(ram_addr), val);
2910 break;
2911 case 4:
2912 stl_p(qemu_get_ram_ptr(ram_addr), val);
2913 break;
2914 default:
2915 abort();
2916 }
bellardf23db162005-08-21 19:12:28 +00002917 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002918 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002919 /* we remove the notdirty callback only if the code has been
2920 flushed */
2921 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002922 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002923}
2924
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002925static const MemoryRegionOps notdirty_mem_ops = {
2926 .read = error_mem_read,
2927 .write = notdirty_mem_write,
2928 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002929};
2930
pbrook0f459d12008-06-09 00:20:13 +00002931/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002932static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002933{
Andreas Färber9349b4f2012-03-14 01:38:32 +01002934 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002935 target_ulong pc, cs_base;
2936 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002937 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002938 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002939 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002940
aliguori06d55cc2008-11-18 20:24:06 +00002941 if (env->watchpoint_hit) {
2942 /* We re-entered the check after replacing the TB. Now raise
2943 * the debug interrupt so that is will trigger after the
2944 * current instruction. */
2945 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2946 return;
2947 }
pbrook2e70f6e2008-06-29 01:03:05 +00002948 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002949 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002950 if ((vaddr == (wp->vaddr & len_mask) ||
2951 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002952 wp->flags |= BP_WATCHPOINT_HIT;
2953 if (!env->watchpoint_hit) {
2954 env->watchpoint_hit = wp;
2955 tb = tb_find_pc(env->mem_io_pc);
2956 if (!tb) {
2957 cpu_abort(env, "check_watchpoint: could not find TB for "
2958 "pc=%p", (void *)env->mem_io_pc);
2959 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00002960 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00002961 tb_phys_invalidate(tb, -1);
2962 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2963 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04002964 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00002965 } else {
2966 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2967 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04002968 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002969 }
aliguori06d55cc2008-11-18 20:24:06 +00002970 }
aliguori6e140f22008-11-18 20:37:55 +00002971 } else {
2972 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002973 }
2974 }
2975}
2976
pbrook6658ffb2007-03-16 23:58:11 +00002977/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2978 so these check for a hit then pass through to the normal out-of-line
2979 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002980static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002981 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00002982{
Avi Kivity1ec9b902012-01-02 12:47:48 +02002983 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
2984 switch (size) {
2985 case 1: return ldub_phys(addr);
2986 case 2: return lduw_phys(addr);
2987 case 4: return ldl_phys(addr);
2988 default: abort();
2989 }
pbrook6658ffb2007-03-16 23:58:11 +00002990}
2991
Avi Kivitya8170e52012-10-23 12:30:10 +02002992static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002993 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00002994{
Avi Kivity1ec9b902012-01-02 12:47:48 +02002995 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
2996 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002997 case 1:
2998 stb_phys(addr, val);
2999 break;
3000 case 2:
3001 stw_phys(addr, val);
3002 break;
3003 case 4:
3004 stl_phys(addr, val);
3005 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003006 default: abort();
3007 }
pbrook6658ffb2007-03-16 23:58:11 +00003008}
3009
Avi Kivity1ec9b902012-01-02 12:47:48 +02003010static const MemoryRegionOps watch_mem_ops = {
3011 .read = watch_mem_read,
3012 .write = watch_mem_write,
3013 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003014};
pbrook6658ffb2007-03-16 23:58:11 +00003015
Avi Kivitya8170e52012-10-23 12:30:10 +02003016static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02003017 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003018{
Avi Kivity70c68e42012-01-02 12:32:48 +02003019 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003020 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003021 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003022#if defined(DEBUG_SUBPAGE)
3023 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3024 mmio, len, addr, idx);
3025#endif
blueswir1db7b5422007-05-26 17:36:03 +00003026
Avi Kivity5312bd82012-02-12 18:32:55 +02003027 section = &phys_sections[mmio->sub_section[idx]];
3028 addr += mmio->base;
3029 addr -= section->offset_within_address_space;
3030 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003031 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003032}
3033
Avi Kivitya8170e52012-10-23 12:30:10 +02003034static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02003035 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003036{
Avi Kivity70c68e42012-01-02 12:32:48 +02003037 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003038 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003039 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003040#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003041 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3042 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003043 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003044#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003045
Avi Kivity5312bd82012-02-12 18:32:55 +02003046 section = &phys_sections[mmio->sub_section[idx]];
3047 addr += mmio->base;
3048 addr -= section->offset_within_address_space;
3049 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003050 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003051}
3052
Avi Kivity70c68e42012-01-02 12:32:48 +02003053static const MemoryRegionOps subpage_ops = {
3054 .read = subpage_read,
3055 .write = subpage_write,
3056 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003057};
3058
Avi Kivitya8170e52012-10-23 12:30:10 +02003059static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02003060 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003061{
3062 ram_addr_t raddr = addr;
3063 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003064 switch (size) {
3065 case 1: return ldub_p(ptr);
3066 case 2: return lduw_p(ptr);
3067 case 4: return ldl_p(ptr);
3068 default: abort();
3069 }
Andreas Färber56384e82011-11-30 16:26:21 +01003070}
3071
Avi Kivitya8170e52012-10-23 12:30:10 +02003072static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02003073 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003074{
3075 ram_addr_t raddr = addr;
3076 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003077 switch (size) {
3078 case 1: return stb_p(ptr, value);
3079 case 2: return stw_p(ptr, value);
3080 case 4: return stl_p(ptr, value);
3081 default: abort();
3082 }
Andreas Färber56384e82011-11-30 16:26:21 +01003083}
3084
Avi Kivityde712f92012-01-02 12:41:07 +02003085static const MemoryRegionOps subpage_ram_ops = {
3086 .read = subpage_ram_read,
3087 .write = subpage_ram_write,
3088 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003089};
3090
Anthony Liguoric227f092009-10-01 16:12:16 -05003091static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003092 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003093{
3094 int idx, eidx;
3095
3096 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3097 return -1;
3098 idx = SUBPAGE_IDX(start);
3099 eidx = SUBPAGE_IDX(end);
3100#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003101 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003102 mmio, start, end, idx, eidx, memory);
3103#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003104 if (memory_region_is_ram(phys_sections[section].mr)) {
3105 MemoryRegionSection new_section = phys_sections[section];
3106 new_section.mr = &io_mem_subpage_ram;
3107 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003108 }
blueswir1db7b5422007-05-26 17:36:03 +00003109 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003110 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003111 }
3112
3113 return 0;
3114}
3115
Avi Kivitya8170e52012-10-23 12:30:10 +02003116static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00003117{
Anthony Liguoric227f092009-10-01 16:12:16 -05003118 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003119
Anthony Liguori7267c092011-08-20 22:09:37 -05003120 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003121
3122 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003123 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3124 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003125 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003126#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003127 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3128 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003129#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003130 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003131
3132 return mmio;
3133}
3134
Avi Kivity5312bd82012-02-12 18:32:55 +02003135static uint16_t dummy_section(MemoryRegion *mr)
3136{
3137 MemoryRegionSection section = {
3138 .mr = mr,
3139 .offset_within_address_space = 0,
3140 .offset_within_region = 0,
3141 .size = UINT64_MAX,
3142 };
3143
3144 return phys_section_add(&section);
3145}
3146
Avi Kivitya8170e52012-10-23 12:30:10 +02003147MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02003148{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003149 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003150}
3151
Avi Kivitye9179ce2009-06-14 11:38:52 +03003152static void io_mem_init(void)
3153{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003154 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003155 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3156 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3157 "unassigned", UINT64_MAX);
3158 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3159 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003160 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3161 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003162 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3163 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003164}
3165
Avi Kivityac1970f2012-10-03 16:22:53 +02003166static void mem_begin(MemoryListener *listener)
3167{
3168 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
3169
3170 destroy_all_mappings(d);
3171 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
3172}
3173
Avi Kivity50c1e142012-02-08 21:36:02 +02003174static void core_begin(MemoryListener *listener)
3175{
Avi Kivity5312bd82012-02-12 18:32:55 +02003176 phys_sections_clear();
3177 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003178 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3179 phys_section_rom = dummy_section(&io_mem_rom);
3180 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003181}
3182
Avi Kivity1d711482012-10-02 18:54:45 +02003183static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02003184{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003185 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003186
3187 /* since each CPU stores ram addresses in its TLB cache, we must
3188 reset the modified entries */
3189 /* XXX: slow ! */
3190 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3191 tlb_flush(env, 1);
3192 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003193}
3194
Avi Kivity93632742012-02-08 16:54:16 +02003195static void core_log_global_start(MemoryListener *listener)
3196{
3197 cpu_physical_memory_set_dirty_tracking(1);
3198}
3199
3200static void core_log_global_stop(MemoryListener *listener)
3201{
3202 cpu_physical_memory_set_dirty_tracking(0);
3203}
3204
Avi Kivity4855d412012-02-08 21:16:05 +02003205static void io_region_add(MemoryListener *listener,
3206 MemoryRegionSection *section)
3207{
Avi Kivitya2d33522012-03-05 17:40:12 +02003208 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3209
3210 mrio->mr = section->mr;
3211 mrio->offset = section->offset_within_region;
3212 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003213 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003214 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003215}
3216
3217static void io_region_del(MemoryListener *listener,
3218 MemoryRegionSection *section)
3219{
3220 isa_unassign_ioport(section->offset_within_address_space, section->size);
3221}
3222
Avi Kivity93632742012-02-08 16:54:16 +02003223static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003224 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02003225 .log_global_start = core_log_global_start,
3226 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02003227 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02003228};
3229
Avi Kivity4855d412012-02-08 21:16:05 +02003230static MemoryListener io_memory_listener = {
3231 .region_add = io_region_add,
3232 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02003233 .priority = 0,
3234};
3235
Avi Kivity1d711482012-10-02 18:54:45 +02003236static MemoryListener tcg_memory_listener = {
3237 .commit = tcg_commit,
3238};
3239
Avi Kivityac1970f2012-10-03 16:22:53 +02003240void address_space_init_dispatch(AddressSpace *as)
3241{
3242 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
3243
3244 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
3245 d->listener = (MemoryListener) {
3246 .begin = mem_begin,
3247 .region_add = mem_add,
3248 .region_nop = mem_add,
3249 .priority = 0,
3250 };
3251 as->dispatch = d;
3252 memory_listener_register(&d->listener, as);
3253}
3254
Avi Kivity83f3c252012-10-07 12:59:55 +02003255void address_space_destroy_dispatch(AddressSpace *as)
3256{
3257 AddressSpaceDispatch *d = as->dispatch;
3258
3259 memory_listener_unregister(&d->listener);
3260 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
3261 g_free(d);
3262 as->dispatch = NULL;
3263}
3264
Avi Kivity62152b82011-07-26 14:26:14 +03003265static void memory_map_init(void)
3266{
Anthony Liguori7267c092011-08-20 22:09:37 -05003267 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003268 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02003269 address_space_init(&address_space_memory, system_memory);
3270 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03003271
Anthony Liguori7267c092011-08-20 22:09:37 -05003272 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003273 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02003274 address_space_init(&address_space_io, system_io);
3275 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02003276
Avi Kivityf6790af2012-10-02 20:13:51 +02003277 memory_listener_register(&core_memory_listener, &address_space_memory);
3278 memory_listener_register(&io_memory_listener, &address_space_io);
3279 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Avi Kivity62152b82011-07-26 14:26:14 +03003280}
3281
3282MemoryRegion *get_system_memory(void)
3283{
3284 return system_memory;
3285}
3286
Avi Kivity309cb472011-08-08 16:09:03 +03003287MemoryRegion *get_system_io(void)
3288{
3289 return system_io;
3290}
3291
pbrooke2eef172008-06-08 01:09:01 +00003292#endif /* !defined(CONFIG_USER_ONLY) */
3293
bellard13eb76e2004-01-24 15:23:36 +00003294/* physical memory access (slow version, mainly for debug) */
3295#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003296int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003297 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003298{
3299 int l, flags;
3300 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003301 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003302
3303 while (len > 0) {
3304 page = addr & TARGET_PAGE_MASK;
3305 l = (page + TARGET_PAGE_SIZE) - addr;
3306 if (l > len)
3307 l = len;
3308 flags = page_get_flags(page);
3309 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003310 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003311 if (is_write) {
3312 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003313 return -1;
bellard579a97f2007-11-11 14:26:47 +00003314 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003315 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003316 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003317 memcpy(p, buf, l);
3318 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003319 } else {
3320 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003321 return -1;
bellard579a97f2007-11-11 14:26:47 +00003322 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003323 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003324 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003325 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003326 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003327 }
3328 len -= l;
3329 buf += l;
3330 addr += l;
3331 }
Paul Brooka68fe892010-03-01 00:08:59 +00003332 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003333}
bellard8df1cd02005-01-28 22:37:22 +00003334
bellard13eb76e2004-01-24 15:23:36 +00003335#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003336
Avi Kivitya8170e52012-10-23 12:30:10 +02003337static void invalidate_and_set_dirty(hwaddr addr,
3338 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003339{
3340 if (!cpu_physical_memory_is_dirty(addr)) {
3341 /* invalidate code */
3342 tb_invalidate_phys_page_range(addr, addr + length, 0);
3343 /* set dirty bit */
3344 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
3345 }
Anthony PERARDe2269392012-10-03 13:49:22 +00003346 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003347}
3348
Avi Kivitya8170e52012-10-23 12:30:10 +02003349void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02003350 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00003351{
Avi Kivityac1970f2012-10-03 16:22:53 +02003352 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003353 int l;
bellard13eb76e2004-01-24 15:23:36 +00003354 uint8_t *ptr;
3355 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02003356 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003357 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003358
bellard13eb76e2004-01-24 15:23:36 +00003359 while (len > 0) {
3360 page = addr & TARGET_PAGE_MASK;
3361 l = (page + TARGET_PAGE_SIZE) - addr;
3362 if (l > len)
3363 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003364 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003365
bellard13eb76e2004-01-24 15:23:36 +00003366 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003367 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02003368 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00003369 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003370 /* XXX: could force cpu_single_env to NULL to avoid
3371 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003372 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003373 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003374 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003375 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003376 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003377 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003378 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003379 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003380 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003381 l = 2;
3382 } else {
bellard1c213d12005-09-03 10:49:04 +00003383 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003384 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003385 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003386 l = 1;
3387 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003388 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003389 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003390 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003391 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003392 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003393 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003394 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003395 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003396 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003397 }
3398 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003399 if (!(memory_region_is_ram(section->mr) ||
3400 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02003401 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00003402 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003403 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003404 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003405 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003406 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003407 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003408 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003409 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003410 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003411 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003412 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003413 l = 2;
3414 } else {
bellard1c213d12005-09-03 10:49:04 +00003415 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003416 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003417 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003418 l = 1;
3419 }
3420 } else {
3421 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003422 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00003423 + memory_region_section_addr(section,
3424 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003425 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003426 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003427 }
3428 }
3429 len -= l;
3430 buf += l;
3431 addr += l;
3432 }
3433}
bellard8df1cd02005-01-28 22:37:22 +00003434
Avi Kivitya8170e52012-10-23 12:30:10 +02003435void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02003436 const uint8_t *buf, int len)
3437{
3438 address_space_rw(as, addr, (uint8_t *)buf, len, true);
3439}
3440
3441/**
3442 * address_space_read: read from an address space.
3443 *
3444 * @as: #AddressSpace to be accessed
3445 * @addr: address within that address space
3446 * @buf: buffer with the data transferred
3447 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003448void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003449{
3450 address_space_rw(as, addr, buf, len, false);
3451}
3452
3453
Avi Kivitya8170e52012-10-23 12:30:10 +02003454void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02003455 int len, int is_write)
3456{
3457 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
3458}
3459
bellardd0ecd2a2006-04-23 17:14:48 +00003460/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02003461void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003462 const uint8_t *buf, int len)
3463{
Avi Kivityac1970f2012-10-03 16:22:53 +02003464 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00003465 int l;
3466 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02003467 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003468 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003469
bellardd0ecd2a2006-04-23 17:14:48 +00003470 while (len > 0) {
3471 page = addr & TARGET_PAGE_MASK;
3472 l = (page + TARGET_PAGE_SIZE) - addr;
3473 if (l > len)
3474 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003475 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003476
Blue Swirlcc5bea62012-04-14 14:56:48 +00003477 if (!(memory_region_is_ram(section->mr) ||
3478 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00003479 /* do nothing */
3480 } else {
3481 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003482 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003483 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003484 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003485 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003486 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003487 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003488 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003489 }
3490 len -= l;
3491 buf += l;
3492 addr += l;
3493 }
3494}
3495
aliguori6d16c2f2009-01-22 16:59:11 +00003496typedef struct {
3497 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02003498 hwaddr addr;
3499 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00003500} BounceBuffer;
3501
3502static BounceBuffer bounce;
3503
aliguoriba223c22009-01-22 16:59:16 +00003504typedef struct MapClient {
3505 void *opaque;
3506 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003507 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003508} MapClient;
3509
Blue Swirl72cf2d42009-09-12 07:36:22 +00003510static QLIST_HEAD(map_client_list, MapClient) map_client_list
3511 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003512
3513void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3514{
Anthony Liguori7267c092011-08-20 22:09:37 -05003515 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003516
3517 client->opaque = opaque;
3518 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003519 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003520 return client;
3521}
3522
3523void cpu_unregister_map_client(void *_client)
3524{
3525 MapClient *client = (MapClient *)_client;
3526
Blue Swirl72cf2d42009-09-12 07:36:22 +00003527 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003528 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003529}
3530
3531static void cpu_notify_map_clients(void)
3532{
3533 MapClient *client;
3534
Blue Swirl72cf2d42009-09-12 07:36:22 +00003535 while (!QLIST_EMPTY(&map_client_list)) {
3536 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003537 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003538 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003539 }
3540}
3541
aliguori6d16c2f2009-01-22 16:59:11 +00003542/* Map a physical memory region into a host virtual address.
3543 * May map a subset of the requested range, given by and returned in *plen.
3544 * May return NULL if resources needed to perform the mapping are exhausted.
3545 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003546 * Use cpu_register_map_client() to know when retrying the map operation is
3547 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003548 */
Avi Kivityac1970f2012-10-03 16:22:53 +02003549void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02003550 hwaddr addr,
3551 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003552 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00003553{
Avi Kivityac1970f2012-10-03 16:22:53 +02003554 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02003555 hwaddr len = *plen;
3556 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003557 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003558 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003559 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003560 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003561 ram_addr_t rlen;
3562 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003563
3564 while (len > 0) {
3565 page = addr & TARGET_PAGE_MASK;
3566 l = (page + TARGET_PAGE_SIZE) - addr;
3567 if (l > len)
3568 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003569 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003570
Avi Kivityf3705d52012-03-08 16:16:34 +02003571 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003572 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003573 break;
3574 }
3575 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3576 bounce.addr = addr;
3577 bounce.len = l;
3578 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02003579 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003580 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003581
3582 *plen = l;
3583 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003584 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003585 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003586 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003587 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003588 }
aliguori6d16c2f2009-01-22 16:59:11 +00003589
3590 len -= l;
3591 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003592 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003593 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003594 rlen = todo;
3595 ret = qemu_ram_ptr_length(raddr, &rlen);
3596 *plen = rlen;
3597 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003598}
3599
Avi Kivityac1970f2012-10-03 16:22:53 +02003600/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003601 * Will also mark the memory as dirty if is_write == 1. access_len gives
3602 * the amount of memory that was actually read or written by the caller.
3603 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003604void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3605 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003606{
3607 if (buffer != bounce.buffer) {
3608 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003609 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003610 while (access_len) {
3611 unsigned l;
3612 l = TARGET_PAGE_SIZE;
3613 if (l > access_len)
3614 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003615 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003616 addr1 += l;
3617 access_len -= l;
3618 }
3619 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003620 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003621 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003622 }
aliguori6d16c2f2009-01-22 16:59:11 +00003623 return;
3624 }
3625 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02003626 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003627 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003628 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003629 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003630 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003631}
bellardd0ecd2a2006-04-23 17:14:48 +00003632
Avi Kivitya8170e52012-10-23 12:30:10 +02003633void *cpu_physical_memory_map(hwaddr addr,
3634 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003635 int is_write)
3636{
3637 return address_space_map(&address_space_memory, addr, plen, is_write);
3638}
3639
Avi Kivitya8170e52012-10-23 12:30:10 +02003640void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3641 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003642{
3643 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3644}
3645
bellard8df1cd02005-01-28 22:37:22 +00003646/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003647static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003648 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003649{
bellard8df1cd02005-01-28 22:37:22 +00003650 uint8_t *ptr;
3651 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003652 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003653
Avi Kivityac1970f2012-10-03 16:22:53 +02003654 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003655
Blue Swirlcc5bea62012-04-14 14:56:48 +00003656 if (!(memory_region_is_ram(section->mr) ||
3657 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00003658 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003659 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003660 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003661#if defined(TARGET_WORDS_BIGENDIAN)
3662 if (endian == DEVICE_LITTLE_ENDIAN) {
3663 val = bswap32(val);
3664 }
3665#else
3666 if (endian == DEVICE_BIG_ENDIAN) {
3667 val = bswap32(val);
3668 }
3669#endif
bellard8df1cd02005-01-28 22:37:22 +00003670 } else {
3671 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003672 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003673 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003674 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003675 switch (endian) {
3676 case DEVICE_LITTLE_ENDIAN:
3677 val = ldl_le_p(ptr);
3678 break;
3679 case DEVICE_BIG_ENDIAN:
3680 val = ldl_be_p(ptr);
3681 break;
3682 default:
3683 val = ldl_p(ptr);
3684 break;
3685 }
bellard8df1cd02005-01-28 22:37:22 +00003686 }
3687 return val;
3688}
3689
Avi Kivitya8170e52012-10-23 12:30:10 +02003690uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003691{
3692 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3693}
3694
Avi Kivitya8170e52012-10-23 12:30:10 +02003695uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003696{
3697 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3698}
3699
Avi Kivitya8170e52012-10-23 12:30:10 +02003700uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003701{
3702 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3703}
3704
bellard84b7b8e2005-11-28 21:19:04 +00003705/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003706static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003707 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003708{
bellard84b7b8e2005-11-28 21:19:04 +00003709 uint8_t *ptr;
3710 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003711 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003712
Avi Kivityac1970f2012-10-03 16:22:53 +02003713 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003714
Blue Swirlcc5bea62012-04-14 14:56:48 +00003715 if (!(memory_region_is_ram(section->mr) ||
3716 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00003717 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003718 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003719
3720 /* XXX This is broken when device endian != cpu endian.
3721 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003722#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003723 val = io_mem_read(section->mr, addr, 4) << 32;
3724 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003725#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003726 val = io_mem_read(section->mr, addr, 4);
3727 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003728#endif
3729 } else {
3730 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003731 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003732 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003733 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003734 switch (endian) {
3735 case DEVICE_LITTLE_ENDIAN:
3736 val = ldq_le_p(ptr);
3737 break;
3738 case DEVICE_BIG_ENDIAN:
3739 val = ldq_be_p(ptr);
3740 break;
3741 default:
3742 val = ldq_p(ptr);
3743 break;
3744 }
bellard84b7b8e2005-11-28 21:19:04 +00003745 }
3746 return val;
3747}
3748
Avi Kivitya8170e52012-10-23 12:30:10 +02003749uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003750{
3751 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3752}
3753
Avi Kivitya8170e52012-10-23 12:30:10 +02003754uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003755{
3756 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3757}
3758
Avi Kivitya8170e52012-10-23 12:30:10 +02003759uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003760{
3761 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3762}
3763
bellardaab33092005-10-30 20:48:42 +00003764/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02003765uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00003766{
3767 uint8_t val;
3768 cpu_physical_memory_read(addr, &val, 1);
3769 return val;
3770}
3771
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003772/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003773static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003774 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003775{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003776 uint8_t *ptr;
3777 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003778 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003779
Avi Kivityac1970f2012-10-03 16:22:53 +02003780 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003781
Blue Swirlcc5bea62012-04-14 14:56:48 +00003782 if (!(memory_region_is_ram(section->mr) ||
3783 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003784 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003785 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003786 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003787#if defined(TARGET_WORDS_BIGENDIAN)
3788 if (endian == DEVICE_LITTLE_ENDIAN) {
3789 val = bswap16(val);
3790 }
3791#else
3792 if (endian == DEVICE_BIG_ENDIAN) {
3793 val = bswap16(val);
3794 }
3795#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003796 } else {
3797 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003798 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003799 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003800 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003801 switch (endian) {
3802 case DEVICE_LITTLE_ENDIAN:
3803 val = lduw_le_p(ptr);
3804 break;
3805 case DEVICE_BIG_ENDIAN:
3806 val = lduw_be_p(ptr);
3807 break;
3808 default:
3809 val = lduw_p(ptr);
3810 break;
3811 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003812 }
3813 return val;
bellardaab33092005-10-30 20:48:42 +00003814}
3815
Avi Kivitya8170e52012-10-23 12:30:10 +02003816uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003817{
3818 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3819}
3820
Avi Kivitya8170e52012-10-23 12:30:10 +02003821uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003822{
3823 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3824}
3825
Avi Kivitya8170e52012-10-23 12:30:10 +02003826uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003827{
3828 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3829}
3830
bellard8df1cd02005-01-28 22:37:22 +00003831/* warning: addr must be aligned. The ram page is not masked as dirty
3832 and the code inside is not invalidated. It is useful if the dirty
3833 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02003834void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003835{
bellard8df1cd02005-01-28 22:37:22 +00003836 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003837 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003838
Avi Kivityac1970f2012-10-03 16:22:53 +02003839 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003840
Avi Kivityf3705d52012-03-08 16:16:34 +02003841 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003842 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003843 if (memory_region_is_ram(section->mr)) {
3844 section = &phys_sections[phys_section_rom];
3845 }
3846 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003847 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003848 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003849 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003850 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003851 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003852 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003853
3854 if (unlikely(in_migration)) {
3855 if (!cpu_physical_memory_is_dirty(addr1)) {
3856 /* invalidate code */
3857 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3858 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003859 cpu_physical_memory_set_dirty_flags(
3860 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003861 }
3862 }
bellard8df1cd02005-01-28 22:37:22 +00003863 }
3864}
3865
Avi Kivitya8170e52012-10-23 12:30:10 +02003866void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003867{
j_mayerbc98a7e2007-04-04 07:55:12 +00003868 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003869 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00003870
Avi Kivityac1970f2012-10-03 16:22:53 +02003871 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003872
Avi Kivityf3705d52012-03-08 16:16:34 +02003873 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003874 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003875 if (memory_region_is_ram(section->mr)) {
3876 section = &phys_sections[phys_section_rom];
3877 }
j_mayerbc98a7e2007-04-04 07:55:12 +00003878#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003879 io_mem_write(section->mr, addr, val >> 32, 4);
3880 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003881#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003882 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3883 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003884#endif
3885 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003886 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003887 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003888 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00003889 stq_p(ptr, val);
3890 }
3891}
3892
bellard8df1cd02005-01-28 22:37:22 +00003893/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003894static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003895 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003896{
bellard8df1cd02005-01-28 22:37:22 +00003897 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003898 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003899
Avi Kivityac1970f2012-10-03 16:22:53 +02003900 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003901
Avi Kivityf3705d52012-03-08 16:16:34 +02003902 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003903 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003904 if (memory_region_is_ram(section->mr)) {
3905 section = &phys_sections[phys_section_rom];
3906 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003907#if defined(TARGET_WORDS_BIGENDIAN)
3908 if (endian == DEVICE_LITTLE_ENDIAN) {
3909 val = bswap32(val);
3910 }
3911#else
3912 if (endian == DEVICE_BIG_ENDIAN) {
3913 val = bswap32(val);
3914 }
3915#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02003916 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003917 } else {
3918 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003919 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003920 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00003921 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003922 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003923 switch (endian) {
3924 case DEVICE_LITTLE_ENDIAN:
3925 stl_le_p(ptr, val);
3926 break;
3927 case DEVICE_BIG_ENDIAN:
3928 stl_be_p(ptr, val);
3929 break;
3930 default:
3931 stl_p(ptr, val);
3932 break;
3933 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003934 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00003935 }
3936}
3937
Avi Kivitya8170e52012-10-23 12:30:10 +02003938void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003939{
3940 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3941}
3942
Avi Kivitya8170e52012-10-23 12:30:10 +02003943void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003944{
3945 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3946}
3947
Avi Kivitya8170e52012-10-23 12:30:10 +02003948void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003949{
3950 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
3951}
3952
bellardaab33092005-10-30 20:48:42 +00003953/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02003954void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003955{
3956 uint8_t v = val;
3957 cpu_physical_memory_write(addr, &v, 1);
3958}
3959
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003960/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003961static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003962 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003963{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003964 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003965 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003966
Avi Kivityac1970f2012-10-03 16:22:53 +02003967 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003968
Avi Kivityf3705d52012-03-08 16:16:34 +02003969 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003970 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003971 if (memory_region_is_ram(section->mr)) {
3972 section = &phys_sections[phys_section_rom];
3973 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003974#if defined(TARGET_WORDS_BIGENDIAN)
3975 if (endian == DEVICE_LITTLE_ENDIAN) {
3976 val = bswap16(val);
3977 }
3978#else
3979 if (endian == DEVICE_BIG_ENDIAN) {
3980 val = bswap16(val);
3981 }
3982#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02003983 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003984 } else {
3985 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003986 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003987 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003988 /* RAM case */
3989 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003990 switch (endian) {
3991 case DEVICE_LITTLE_ENDIAN:
3992 stw_le_p(ptr, val);
3993 break;
3994 case DEVICE_BIG_ENDIAN:
3995 stw_be_p(ptr, val);
3996 break;
3997 default:
3998 stw_p(ptr, val);
3999 break;
4000 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00004001 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004002 }
bellardaab33092005-10-30 20:48:42 +00004003}
4004
Avi Kivitya8170e52012-10-23 12:30:10 +02004005void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004006{
4007 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4008}
4009
Avi Kivitya8170e52012-10-23 12:30:10 +02004010void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004011{
4012 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4013}
4014
Avi Kivitya8170e52012-10-23 12:30:10 +02004015void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004016{
4017 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4018}
4019
bellardaab33092005-10-30 20:48:42 +00004020/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02004021void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004022{
4023 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004024 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004025}
4026
Avi Kivitya8170e52012-10-23 12:30:10 +02004027void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004028{
4029 val = cpu_to_le64(val);
4030 cpu_physical_memory_write(addr, &val, 8);
4031}
4032
Avi Kivitya8170e52012-10-23 12:30:10 +02004033void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004034{
4035 val = cpu_to_be64(val);
4036 cpu_physical_memory_write(addr, &val, 8);
4037}
4038
aliguori5e2972f2009-03-28 17:51:36 +00004039/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004040int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004041 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004042{
4043 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02004044 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004045 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004046
4047 while (len > 0) {
4048 page = addr & TARGET_PAGE_MASK;
4049 phys_addr = cpu_get_phys_page_debug(env, page);
4050 /* if no physical page mapped, return an error */
4051 if (phys_addr == -1)
4052 return -1;
4053 l = (page + TARGET_PAGE_SIZE) - addr;
4054 if (l > len)
4055 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004056 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004057 if (is_write)
4058 cpu_physical_memory_write_rom(phys_addr, buf, l);
4059 else
aliguori5e2972f2009-03-28 17:51:36 +00004060 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004061 len -= l;
4062 buf += l;
4063 addr += l;
4064 }
4065 return 0;
4066}
Paul Brooka68fe892010-03-01 00:08:59 +00004067#endif
bellard13eb76e2004-01-24 15:23:36 +00004068
pbrook2e70f6e2008-06-29 01:03:05 +00004069/* in deterministic execution mode, instructions doing device I/Os
4070 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004071void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004072{
4073 TranslationBlock *tb;
4074 uint32_t n, cflags;
4075 target_ulong pc, cs_base;
4076 uint64_t flags;
4077
Blue Swirl20503962012-04-09 14:20:20 +00004078 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004079 if (!tb) {
4080 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004081 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004082 }
4083 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004084 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004085 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004086 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004087 n = n - env->icount_decr.u16.low;
4088 /* Generate a new TB ending on the I/O insn. */
4089 n++;
4090 /* On MIPS and SH, delay slot instructions can only be restarted if
4091 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004092 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004093 branch. */
4094#if defined(TARGET_MIPS)
4095 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4096 env->active_tc.PC -= 4;
4097 env->icount_decr.u16.low++;
4098 env->hflags &= ~MIPS_HFLAG_BMASK;
4099 }
4100#elif defined(TARGET_SH4)
4101 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4102 && n > 1) {
4103 env->pc -= 2;
4104 env->icount_decr.u16.low++;
4105 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4106 }
4107#endif
4108 /* This should never happen. */
4109 if (n > CF_COUNT_MASK)
4110 cpu_abort(env, "TB too big during recompile");
4111
4112 cflags = n | CF_LAST_IO;
4113 pc = tb->pc;
4114 cs_base = tb->cs_base;
4115 flags = tb->flags;
4116 tb_phys_invalidate(tb, -1);
4117 /* FIXME: In theory this could raise an exception. In practice
4118 we have already translated the block once so it's probably ok. */
4119 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004120 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004121 the first in the TB) then we end up generating a whole new TB and
4122 repeating the fault, which is horribly inefficient.
4123 Better would be to execute just this insn uncached, or generate a
4124 second new TB. */
4125 cpu_resume_from_signal(env, NULL);
4126}
4127
Paul Brookb3755a92010-03-12 16:54:58 +00004128#if !defined(CONFIG_USER_ONLY)
4129
Stefan Weil055403b2010-10-22 23:03:32 +02004130void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004131{
4132 int i, target_code_size, max_target_code_size;
4133 int direct_jmp_count, direct_jmp2_count, cross_page;
4134 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004135
bellarde3db7222005-01-26 22:00:47 +00004136 target_code_size = 0;
4137 max_target_code_size = 0;
4138 cross_page = 0;
4139 direct_jmp_count = 0;
4140 direct_jmp2_count = 0;
4141 for(i = 0; i < nb_tbs; i++) {
4142 tb = &tbs[i];
4143 target_code_size += tb->size;
4144 if (tb->size > max_target_code_size)
4145 max_target_code_size = tb->size;
4146 if (tb->page_addr[1] != -1)
4147 cross_page++;
4148 if (tb->tb_next_offset[0] != 0xffff) {
4149 direct_jmp_count++;
4150 if (tb->tb_next_offset[1] != 0xffff) {
4151 direct_jmp2_count++;
4152 }
4153 }
4154 }
4155 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004156 cpu_fprintf(f, "Translation buffer state:\n");
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +10004157 cpu_fprintf(f, "gen code size %td/%zd\n",
bellard26a5f132008-05-28 12:30:31 +00004158 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4159 cpu_fprintf(f, "TB count %d/%d\n",
4160 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004161 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004162 nb_tbs ? target_code_size / nb_tbs : 0,
4163 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004164 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004165 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4166 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004167 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4168 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004169 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4170 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004171 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004172 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4173 direct_jmp2_count,
4174 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004175 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004176 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4177 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4178 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004179 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004180}
4181
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004182/*
4183 * A helper function for the _utterly broken_ virtio device model to find out if
4184 * it's running on a big endian machine. Don't do this at home kids!
4185 */
4186bool virtio_is_big_endian(void);
4187bool virtio_is_big_endian(void)
4188{
4189#if defined(TARGET_WORDS_BIGENDIAN)
4190 return true;
4191#else
4192 return false;
4193#endif
4194}
4195
bellard61382a52003-10-27 21:22:23 +00004196#endif
Wen Congyang76f35532012-05-07 12:04:18 +08004197
4198#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02004199bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08004200{
4201 MemoryRegionSection *section;
4202
Avi Kivityac1970f2012-10-03 16:22:53 +02004203 section = phys_page_find(address_space_memory.dispatch,
4204 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08004205
4206 return !(memory_region_is_ram(section->mr) ||
4207 memory_region_is_romd(section->mr));
4208}
4209#endif