blob: 8bff743a623a2e0c371954a53ea20255f07139b9 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Blue Swirl0cac1b62012-04-09 16:50:52 +000060#include "cputlb.h"
61
Avi Kivity7762c2c2012-09-20 16:02:51 +030062#include "memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020063
bellardfd6ce8f2003-05-14 19:00:11 +000064//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000065//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000070
ths1196be32007-03-17 15:17:58 +000071//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000072//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000073
pbrook99773bd2006-04-16 15:14:59 +000074#if !defined(CONFIG_USER_ONLY)
75/* TB consistency checks only implemented for usermode emulation. */
76#undef DEBUG_TB_CHECK
77#endif
78
bellard9fa3e852004-01-04 18:06:42 +000079#define SMC_BITMAP_USE_THRESHOLD 10
80
blueswir1bdaf78e2008-10-04 07:24:27 +000081static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020082static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000083TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000084static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000085/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050086spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000087
Richard Henderson4438c8a2012-10-16 17:30:13 +100088uint8_t *code_gen_prologue;
blueswir1bdaf78e2008-10-04 07:24:27 +000089static uint8_t *code_gen_buffer;
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +100090static size_t code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +000091/* threshold to flush the translated code buffer */
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +100092static size_t code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +020093static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +000094
pbrooke2eef172008-06-08 01:09:01 +000095#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000096int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000097static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000098
Paolo Bonzini85d59fe2011-08-12 13:18:14 +020099RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300100
101static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300102static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300103
Avi Kivityf6790af2012-10-02 20:13:51 +0200104AddressSpace address_space_io;
105AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +0200106
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200107MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200108static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200109
pbrooke2eef172008-06-08 01:09:01 +0000110#endif
bellard9fa3e852004-01-04 18:06:42 +0000111
Andreas Färber9349b4f2012-03-14 01:38:32 +0100112CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000113/* current CPU in the current thread. It is only valid inside
114 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100115DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000116/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000117 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000118 2 = Adaptive rate instruction counting. */
119int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000120
bellard54936002003-05-13 00:25:15 +0000121typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000122 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000123 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000124 /* in order to optimize self modifying code, we count the number
125 of lookups we do to a given page to use a bitmap */
126 unsigned int code_write_count;
127 uint8_t *code_bitmap;
128#if defined(CONFIG_USER_ONLY)
129 unsigned long flags;
130#endif
bellard54936002003-05-13 00:25:15 +0000131} PageDesc;
132
Paul Brook41c1b1c2010-03-12 16:54:58 +0000133/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800134 while in user mode we want it to be based on virtual addresses. */
135#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000136#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
137# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
138#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800139# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000140#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000141#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800142# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000143#endif
bellard54936002003-05-13 00:25:15 +0000144
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800145/* Size of the L2 (and L3, etc) page tables. */
146#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000147#define L2_SIZE (1 << L2_BITS)
148
Avi Kivity3eef53d2012-02-10 14:57:31 +0200149#define P_L2_LEVELS \
150 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
151
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800152/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153#define V_L1_BITS_REM \
154 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156#if V_L1_BITS_REM < 4
157#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
158#else
159#define V_L1_BITS V_L1_BITS_REM
160#endif
161
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800162#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
163
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800164#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
165
Stefan Weilc6d50672012-03-16 20:23:49 +0100166uintptr_t qemu_real_host_page_size;
167uintptr_t qemu_host_page_size;
168uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170/* This is a multi-level map on the virtual address space.
171 The bottom level has pointers to PageDesc. */
172static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000173
pbrooke2eef172008-06-08 01:09:01 +0000174#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200175
Avi Kivity5312bd82012-02-12 18:32:55 +0200176static MemoryRegionSection *phys_sections;
177static unsigned phys_sections_nb, phys_sections_nb_alloc;
178static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200179static uint16_t phys_section_notdirty;
180static uint16_t phys_section_rom;
181static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200182
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200183/* Simple allocator for PhysPageEntry nodes */
184static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
185static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
186
Avi Kivity07f07b32012-02-13 20:45:32 +0200187#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200188
pbrooke2eef172008-06-08 01:09:01 +0000189static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300190static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000191
Avi Kivity1ec9b902012-01-02 12:47:48 +0200192static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000193#endif
bellard33417e72003-08-10 21:47:01 +0000194
bellarde3db7222005-01-26 22:00:47 +0000195/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000196static int tb_flush_count;
197static int tb_phys_invalidate_count;
198
bellard7cb69ca2008-05-10 10:55:51 +0000199#ifdef _WIN32
Richard Henderson4438c8a2012-10-16 17:30:13 +1000200static inline void map_exec(void *addr, long size)
bellard7cb69ca2008-05-10 10:55:51 +0000201{
202 DWORD old_protect;
203 VirtualProtect(addr, size,
204 PAGE_EXECUTE_READWRITE, &old_protect);
205
206}
207#else
Richard Henderson4438c8a2012-10-16 17:30:13 +1000208static inline void map_exec(void *addr, long size)
bellard7cb69ca2008-05-10 10:55:51 +0000209{
bellard43694152008-05-29 09:35:57 +0000210 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000211
bellard43694152008-05-29 09:35:57 +0000212 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000213 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000214 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000215
216 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000217 end += page_size - 1;
218 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000219
220 mprotect((void *)start, end - start,
221 PROT_READ | PROT_WRITE | PROT_EXEC);
222}
223#endif
224
bellardb346ff42003-06-15 20:05:50 +0000225static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000226{
bellard83fb7ad2004-07-05 21:25:26 +0000227 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000228 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000229#ifdef _WIN32
230 {
231 SYSTEM_INFO system_info;
232
233 GetSystemInfo(&system_info);
234 qemu_real_host_page_size = system_info.dwPageSize;
235 }
236#else
237 qemu_real_host_page_size = getpagesize();
238#endif
bellard83fb7ad2004-07-05 21:25:26 +0000239 if (qemu_host_page_size == 0)
240 qemu_host_page_size = qemu_real_host_page_size;
241 if (qemu_host_page_size < TARGET_PAGE_SIZE)
242 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000243 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000244
Paul Brook2e9a5712010-05-05 16:32:59 +0100245#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000246 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100247#ifdef HAVE_KINFO_GETVMMAP
248 struct kinfo_vmentry *freep;
249 int i, cnt;
250
251 freep = kinfo_getvmmap(getpid(), &cnt);
252 if (freep) {
253 mmap_lock();
254 for (i = 0; i < cnt; i++) {
255 unsigned long startaddr, endaddr;
256
257 startaddr = freep[i].kve_start;
258 endaddr = freep[i].kve_end;
259 if (h2g_valid(startaddr)) {
260 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
261
262 if (h2g_valid(endaddr)) {
263 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200264 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100265 } else {
266#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
267 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200268 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100269#endif
270 }
271 }
272 }
273 free(freep);
274 mmap_unlock();
275 }
276#else
balrog50a95692007-12-12 01:16:23 +0000277 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000278
pbrook07765902008-05-31 16:33:53 +0000279 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800280
Aurelien Jarnofd436902010-04-10 17:20:36 +0200281 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000282 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800283 mmap_lock();
284
balrog50a95692007-12-12 01:16:23 +0000285 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800286 unsigned long startaddr, endaddr;
287 int n;
288
289 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
290
291 if (n == 2 && h2g_valid(startaddr)) {
292 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
293
294 if (h2g_valid(endaddr)) {
295 endaddr = h2g(endaddr);
296 } else {
297 endaddr = ~0ul;
298 }
299 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000300 }
301 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800302
balrog50a95692007-12-12 01:16:23 +0000303 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800304 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000305 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100306#endif
balrog50a95692007-12-12 01:16:23 +0000307 }
308#endif
bellard54936002003-05-13 00:25:15 +0000309}
310
Paul Brook41c1b1c2010-03-12 16:54:58 +0000311static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000312{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000313 PageDesc *pd;
314 void **lp;
315 int i;
316
pbrook17e23772008-06-09 13:47:45 +0000317#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500318 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800319# define ALLOC(P, SIZE) \
320 do { \
321 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
322 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800323 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000324#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800325# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500326 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000327#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800328
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800329 /* Level 1. Always allocated. */
330 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
331
332 /* Level 2..N-1. */
333 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
334 void **p = *lp;
335
336 if (p == NULL) {
337 if (!alloc) {
338 return NULL;
339 }
340 ALLOC(p, sizeof(void *) * L2_SIZE);
341 *lp = p;
342 }
343
344 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000345 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800346
347 pd = *lp;
348 if (pd == NULL) {
349 if (!alloc) {
350 return NULL;
351 }
352 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
353 *lp = pd;
354 }
355
356#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800357
358 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000359}
360
Paul Brook41c1b1c2010-03-12 16:54:58 +0000361static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000362{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800363 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000364}
365
Paul Brook6d9a1302010-02-28 23:55:53 +0000366#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200367
Avi Kivityf7bf5462012-02-13 20:12:05 +0200368static void phys_map_node_reserve(unsigned nodes)
369{
370 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
371 typedef PhysPageEntry Node[L2_SIZE];
372 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
373 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
374 phys_map_nodes_nb + nodes);
375 phys_map_nodes = g_renew(Node, phys_map_nodes,
376 phys_map_nodes_nb_alloc);
377 }
378}
379
380static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200381{
382 unsigned i;
383 uint16_t ret;
384
Avi Kivityf7bf5462012-02-13 20:12:05 +0200385 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200386 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200387 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200388 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200389 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200390 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200391 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200392 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200393}
394
395static void phys_map_nodes_reset(void)
396{
397 phys_map_nodes_nb = 0;
398}
399
Avi Kivityf7bf5462012-02-13 20:12:05 +0200400
Avi Kivitya8170e52012-10-23 12:30:10 +0200401static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
402 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200403 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200404{
405 PhysPageEntry *p;
406 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200407 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200408
Avi Kivity07f07b32012-02-13 20:45:32 +0200409 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200410 lp->ptr = phys_map_node_alloc();
411 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200412 if (level == 0) {
413 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200414 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200415 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200416 }
417 }
418 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200419 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200420 }
Avi Kivity29990972012-02-13 20:21:20 +0200421 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200422
Avi Kivity29990972012-02-13 20:21:20 +0200423 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200424 if ((*index & (step - 1)) == 0 && *nb >= step) {
425 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200426 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200427 *index += step;
428 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200429 } else {
430 phys_page_set_level(lp, index, nb, leaf, level - 1);
431 }
432 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200433 }
434}
435
Avi Kivityac1970f2012-10-03 16:22:53 +0200436static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200437 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200438 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000439{
Avi Kivity29990972012-02-13 20:21:20 +0200440 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200441 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000442
Avi Kivityac1970f2012-10-03 16:22:53 +0200443 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000444}
445
Avi Kivitya8170e52012-10-23 12:30:10 +0200446MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000447{
Avi Kivityac1970f2012-10-03 16:22:53 +0200448 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200449 PhysPageEntry *p;
450 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200451 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200452
Avi Kivity07f07b32012-02-13 20:45:32 +0200453 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200454 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200455 goto not_found;
456 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200457 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200458 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200459 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200460
Avi Kivityc19e8802012-02-13 20:25:31 +0200461 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200462not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200463 return &phys_sections[s_index];
464}
465
Blue Swirle5548612012-04-21 13:08:33 +0000466bool memory_region_is_unassigned(MemoryRegion *mr)
467{
468 return mr != &io_mem_ram && mr != &io_mem_rom
469 && mr != &io_mem_notdirty && !mr->rom_device
470 && mr != &io_mem_watch;
471}
472
pbrookc8a706f2008-06-02 16:16:42 +0000473#define mmap_lock() do { } while(0)
474#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000475#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000476
bellard43694152008-05-29 09:35:57 +0000477#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100478/* Currently it is not recommended to allocate big chunks of data in
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000479 user mode. It will change when a dedicated libc will be used. */
480/* ??? 64-bit hosts ought to have no problem mmaping data outside the
481 region in which the guest needs to run. Revisit this. */
bellard43694152008-05-29 09:35:57 +0000482#define USE_STATIC_CODE_GEN_BUFFER
483#endif
484
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000485/* ??? Should configure for this, not list operating systems here. */
486#if (defined(__linux__) \
487 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
488 || defined(__DragonFly__) || defined(__OpenBSD__) \
489 || defined(__NetBSD__))
490# define USE_MMAP
491#endif
492
Richard Henderson74d590c2012-10-16 17:30:14 +1000493/* Minimum size of the code gen buffer. This number is randomly chosen,
494 but not so small that we can't have a fair number of TB's live. */
495#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
496
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000497/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
498 indicated, this is constrained by the range of direct branches on the
499 host cpu, as used by the TCG implementation of goto_tb. */
500#if defined(__x86_64__)
501# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
502#elif defined(__sparc__)
503# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
504#elif defined(__arm__)
505# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
506#elif defined(__s390x__)
507 /* We have a +- 4GB range on the branches; leave some slop. */
508# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
509#else
510# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
511#endif
512
Richard Henderson3d85a722012-10-16 17:30:11 +1000513#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
514
515#define DEFAULT_CODE_GEN_BUFFER_SIZE \
516 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
517 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000518
519static inline size_t size_code_gen_buffer(size_t tb_size)
520{
521 /* Size the buffer. */
522 if (tb_size == 0) {
523#ifdef USE_STATIC_CODE_GEN_BUFFER
524 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
525#else
526 /* ??? Needs adjustments. */
527 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
528 static buffer, we could size this on RESERVED_VA, on the text
529 segment size of the executable, or continue to use the default. */
530 tb_size = (unsigned long)(ram_size / 4);
531#endif
532 }
533 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
534 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
535 }
536 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
537 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
538 }
539 code_gen_buffer_size = tb_size;
540 return tb_size;
541}
542
bellard43694152008-05-29 09:35:57 +0000543#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200544static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000545 __attribute__((aligned(CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000546
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000547static inline void *alloc_code_gen_buffer(void)
bellard26a5f132008-05-28 12:30:31 +0000548{
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000549 map_exec(static_code_gen_buffer, code_gen_buffer_size);
550 return static_code_gen_buffer;
551}
552#elif defined(USE_MMAP)
553static inline void *alloc_code_gen_buffer(void)
554{
555 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
556 uintptr_t start = 0;
557 void *buf;
blueswir1141ac462008-07-26 15:05:57 +0000558
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000559 /* Constrain the position of the buffer based on the host cpu.
560 Note that these addresses are chosen in concert with the
561 addresses assigned in the relevant linker script file. */
Richard Henderson405def12012-10-16 17:30:12 +1000562# if defined(__PIE__) || defined(__PIC__)
563 /* Don't bother setting a preferred location if we're building
564 a position-independent executable. We're more likely to get
565 an address near the main executable if we let the kernel
566 choose the address. */
567# elif defined(__x86_64__) && defined(MAP_32BIT)
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000568 /* Force the memory down into low memory with the executable.
569 Leave the choice of exact location with the kernel. */
570 flags |= MAP_32BIT;
571 /* Cannot expect to map more than 800MB in low memory. */
572 if (code_gen_buffer_size > 800u * 1024 * 1024) {
573 code_gen_buffer_size = 800u * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000574 }
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000575# elif defined(__sparc__)
576 start = 0x40000000ul;
577# elif defined(__s390x__)
578 start = 0x90000000ul;
579# endif
580
581 buf = mmap((void *)start, code_gen_buffer_size,
582 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
583 return buf == MAP_FAILED ? NULL : buf;
584}
bellard26a5f132008-05-28 12:30:31 +0000585#else
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000586static inline void *alloc_code_gen_buffer(void)
587{
588 void *buf = g_malloc(code_gen_buffer_size);
589 if (buf) {
590 map_exec(buf, code_gen_buffer_size);
591 }
592 return buf;
593}
594#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
595
596static inline void code_gen_alloc(size_t tb_size)
597{
598 code_gen_buffer_size = size_code_gen_buffer(tb_size);
599 code_gen_buffer = alloc_code_gen_buffer();
600 if (code_gen_buffer == NULL) {
601 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
602 exit(1);
603 }
604
Richard Henderson4438c8a2012-10-16 17:30:13 +1000605 /* Steal room for the prologue at the end of the buffer. This ensures
606 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
607 from TB's to the prologue are going to be in range. It also means
608 that we don't need to mark (additional) portions of the data segment
609 as executable. */
610 code_gen_prologue = code_gen_buffer + code_gen_buffer_size - 1024;
611 code_gen_buffer_size -= 1024;
612
Peter Maydella884da82011-06-22 11:58:25 +0100613 code_gen_buffer_max_size = code_gen_buffer_size -
614 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000615 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500616 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000617}
618
619/* Must be called before using the QEMU cpus. 'tb_size' is the size
620 (in bytes) allocated to the translation buffer. Zero means default
621 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200622void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000623{
bellard26a5f132008-05-28 12:30:31 +0000624 cpu_gen_init();
625 code_gen_alloc(tb_size);
626 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700627 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000628 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700629#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
630 /* There's no guest base to take into account, so go ahead and
631 initialize the prologue now. */
632 tcg_prologue_init(&tcg_ctx);
633#endif
bellard26a5f132008-05-28 12:30:31 +0000634}
635
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200636bool tcg_enabled(void)
637{
638 return code_gen_buffer != NULL;
639}
640
641void cpu_exec_init_all(void)
642{
643#if !defined(CONFIG_USER_ONLY)
644 memory_map_init();
645 io_mem_init();
646#endif
647}
648
pbrook9656f322008-07-01 20:01:19 +0000649#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
650
Juan Quintelae59fb372009-09-29 22:48:21 +0200651static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200652{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100653 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200654
aurel323098dba2009-03-07 21:28:24 +0000655 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
656 version_id is increased. */
657 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000658 tlb_flush(env, 1);
659
660 return 0;
661}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200662
663static const VMStateDescription vmstate_cpu_common = {
664 .name = "cpu_common",
665 .version_id = 1,
666 .minimum_version_id = 1,
667 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200668 .post_load = cpu_common_post_load,
669 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100670 VMSTATE_UINT32(halted, CPUArchState),
671 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200672 VMSTATE_END_OF_LIST()
673 }
674};
pbrook9656f322008-07-01 20:01:19 +0000675#endif
676
Andreas Färber9349b4f2012-03-14 01:38:32 +0100677CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400678{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100679 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400680
681 while (env) {
682 if (env->cpu_index == cpu)
683 break;
684 env = env->next_cpu;
685 }
686
687 return env;
688}
689
Andreas Färber9349b4f2012-03-14 01:38:32 +0100690void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000691{
Andreas Färber9f09e182012-05-03 06:59:07 +0200692#ifndef CONFIG_USER_ONLY
693 CPUState *cpu = ENV_GET_CPU(env);
694#endif
Andreas Färber9349b4f2012-03-14 01:38:32 +0100695 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000696 int cpu_index;
697
pbrookc2764712009-03-07 15:24:59 +0000698#if defined(CONFIG_USER_ONLY)
699 cpu_list_lock();
700#endif
bellard6a00d602005-11-21 23:25:50 +0000701 env->next_cpu = NULL;
702 penv = &first_cpu;
703 cpu_index = 0;
704 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700705 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000706 cpu_index++;
707 }
708 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000709 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000710 QTAILQ_INIT(&env->breakpoints);
711 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100712#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200713 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100714#endif
bellard6a00d602005-11-21 23:25:50 +0000715 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000716#if defined(CONFIG_USER_ONLY)
717 cpu_list_unlock();
718#endif
pbrookb3c77242008-06-30 16:31:04 +0000719#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600720 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
721 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000722 cpu_save, cpu_load, env);
723#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000724}
725
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100726/* Allocate a new translation block. Flush the translation buffer if
727 too many translation blocks or too much generated code. */
728static TranslationBlock *tb_alloc(target_ulong pc)
729{
730 TranslationBlock *tb;
731
732 if (nb_tbs >= code_gen_max_blocks ||
733 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
734 return NULL;
735 tb = &tbs[nb_tbs++];
736 tb->pc = pc;
737 tb->cflags = 0;
738 return tb;
739}
740
741void tb_free(TranslationBlock *tb)
742{
743 /* In practice this is mostly used for single use temporary TB
744 Ignore the hard cases and just back up if this TB happens to
745 be the last one generated. */
746 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
747 code_gen_ptr = tb->tc_ptr;
748 nb_tbs--;
749 }
750}
751
bellard9fa3e852004-01-04 18:06:42 +0000752static inline void invalidate_page_bitmap(PageDesc *p)
753{
754 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500755 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000756 p->code_bitmap = NULL;
757 }
758 p->code_write_count = 0;
759}
760
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800761/* Set to NULL all the 'first_tb' fields in all PageDescs. */
762
763static void page_flush_tb_1 (int level, void **lp)
764{
765 int i;
766
767 if (*lp == NULL) {
768 return;
769 }
770 if (level == 0) {
771 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000772 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800773 pd[i].first_tb = NULL;
774 invalidate_page_bitmap(pd + i);
775 }
776 } else {
777 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000778 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800779 page_flush_tb_1 (level - 1, pp + i);
780 }
781 }
782}
783
bellardfd6ce8f2003-05-14 19:00:11 +0000784static void page_flush_tb(void)
785{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800786 int i;
787 for (i = 0; i < V_L1_SIZE; i++) {
788 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000789 }
790}
791
792/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000793/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100794void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000795{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100796 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000797#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000798 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
799 (unsigned long)(code_gen_ptr - code_gen_buffer),
800 nb_tbs, nb_tbs > 0 ?
801 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000802#endif
bellard26a5f132008-05-28 12:30:31 +0000803 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000804 cpu_abort(env1, "Internal error: code buffer overflow\n");
805
bellardfd6ce8f2003-05-14 19:00:11 +0000806 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000807
bellard6a00d602005-11-21 23:25:50 +0000808 for(env = first_cpu; env != NULL; env = env->next_cpu) {
809 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
810 }
bellard9fa3e852004-01-04 18:06:42 +0000811
bellard8a8a6082004-10-03 13:36:49 +0000812 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000813 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000814
bellardfd6ce8f2003-05-14 19:00:11 +0000815 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000816 /* XXX: flush processor icache at this point if cache flush is
817 expensive */
bellarde3db7222005-01-26 22:00:47 +0000818 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000819}
820
821#ifdef DEBUG_TB_CHECK
822
j_mayerbc98a7e2007-04-04 07:55:12 +0000823static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000824{
825 TranslationBlock *tb;
826 int i;
827 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000828 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
829 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000830 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
831 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000832 printf("ERROR invalidate: address=" TARGET_FMT_lx
833 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000834 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000835 }
836 }
837 }
838}
839
840/* verify that all the pages have correct rights for code */
841static void tb_page_check(void)
842{
843 TranslationBlock *tb;
844 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000845
pbrook99773bd2006-04-16 15:14:59 +0000846 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
847 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000848 flags1 = page_get_flags(tb->pc);
849 flags2 = page_get_flags(tb->pc + tb->size - 1);
850 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
851 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000852 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000853 }
854 }
855 }
856}
857
858#endif
859
860/* invalidate one TB */
861static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
862 int next_offset)
863{
864 TranslationBlock *tb1;
865 for(;;) {
866 tb1 = *ptb;
867 if (tb1 == tb) {
868 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
869 break;
870 }
871 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
872 }
873}
874
bellard9fa3e852004-01-04 18:06:42 +0000875static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
876{
877 TranslationBlock *tb1;
878 unsigned int n1;
879
880 for(;;) {
881 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200882 n1 = (uintptr_t)tb1 & 3;
883 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000884 if (tb1 == tb) {
885 *ptb = tb1->page_next[n1];
886 break;
887 }
888 ptb = &tb1->page_next[n1];
889 }
890}
891
bellardd4e81642003-05-25 16:46:15 +0000892static inline void tb_jmp_remove(TranslationBlock *tb, int n)
893{
894 TranslationBlock *tb1, **ptb;
895 unsigned int n1;
896
897 ptb = &tb->jmp_next[n];
898 tb1 = *ptb;
899 if (tb1) {
900 /* find tb(n) in circular list */
901 for(;;) {
902 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200903 n1 = (uintptr_t)tb1 & 3;
904 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardd4e81642003-05-25 16:46:15 +0000905 if (n1 == n && tb1 == tb)
906 break;
907 if (n1 == 2) {
908 ptb = &tb1->jmp_first;
909 } else {
910 ptb = &tb1->jmp_next[n1];
911 }
912 }
913 /* now we can suppress tb(n) from the list */
914 *ptb = tb->jmp_next[n];
915
916 tb->jmp_next[n] = NULL;
917 }
918}
919
920/* reset the jump entry 'n' of a TB so that it is not chained to
921 another TB */
922static inline void tb_reset_jump(TranslationBlock *tb, int n)
923{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200924 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000925}
926
Paul Brook41c1b1c2010-03-12 16:54:58 +0000927void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000928{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100929 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000930 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000931 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000932 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000933 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000934
bellard9fa3e852004-01-04 18:06:42 +0000935 /* remove the TB from the hash list */
936 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
937 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000938 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000939 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000940
bellard9fa3e852004-01-04 18:06:42 +0000941 /* remove the TB from the page list */
942 if (tb->page_addr[0] != page_addr) {
943 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
944 tb_page_remove(&p->first_tb, tb);
945 invalidate_page_bitmap(p);
946 }
947 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
948 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
949 tb_page_remove(&p->first_tb, tb);
950 invalidate_page_bitmap(p);
951 }
952
bellard8a40a182005-11-20 10:35:40 +0000953 tb_invalidated_flag = 1;
954
955 /* remove the TB from the hash list */
956 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000957 for(env = first_cpu; env != NULL; env = env->next_cpu) {
958 if (env->tb_jmp_cache[h] == tb)
959 env->tb_jmp_cache[h] = NULL;
960 }
bellard8a40a182005-11-20 10:35:40 +0000961
962 /* suppress this TB from the two jump lists */
963 tb_jmp_remove(tb, 0);
964 tb_jmp_remove(tb, 1);
965
966 /* suppress any remaining jumps to this TB */
967 tb1 = tb->jmp_first;
968 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200969 n1 = (uintptr_t)tb1 & 3;
bellard8a40a182005-11-20 10:35:40 +0000970 if (n1 == 2)
971 break;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200972 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000973 tb2 = tb1->jmp_next[n1];
974 tb_reset_jump(tb1, n1);
975 tb1->jmp_next[n1] = NULL;
976 tb1 = tb2;
977 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200978 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +0000979
bellarde3db7222005-01-26 22:00:47 +0000980 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000981}
982
983static inline void set_bits(uint8_t *tab, int start, int len)
984{
985 int end, mask, end1;
986
987 end = start + len;
988 tab += start >> 3;
989 mask = 0xff << (start & 7);
990 if ((start & ~7) == (end & ~7)) {
991 if (start < end) {
992 mask &= ~(0xff << (end & 7));
993 *tab |= mask;
994 }
995 } else {
996 *tab++ |= mask;
997 start = (start + 8) & ~7;
998 end1 = end & ~7;
999 while (start < end1) {
1000 *tab++ = 0xff;
1001 start += 8;
1002 }
1003 if (start < end) {
1004 mask = ~(0xff << (end & 7));
1005 *tab |= mask;
1006 }
1007 }
1008}
1009
1010static void build_page_bitmap(PageDesc *p)
1011{
1012 int n, tb_start, tb_end;
1013 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001014
Anthony Liguori7267c092011-08-20 22:09:37 -05001015 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001016
1017 tb = p->first_tb;
1018 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001019 n = (uintptr_t)tb & 3;
1020 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001021 /* NOTE: this is subtle as a TB may span two physical pages */
1022 if (n == 0) {
1023 /* NOTE: tb_end may be after the end of the page, but
1024 it is not a problem */
1025 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1026 tb_end = tb_start + tb->size;
1027 if (tb_end > TARGET_PAGE_SIZE)
1028 tb_end = TARGET_PAGE_SIZE;
1029 } else {
1030 tb_start = 0;
1031 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1032 }
1033 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1034 tb = tb->page_next[n];
1035 }
1036}
1037
Andreas Färber9349b4f2012-03-14 01:38:32 +01001038TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001039 target_ulong pc, target_ulong cs_base,
1040 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001041{
1042 TranslationBlock *tb;
1043 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001044 tb_page_addr_t phys_pc, phys_page2;
1045 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001046 int code_gen_size;
1047
Paul Brook41c1b1c2010-03-12 16:54:58 +00001048 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001049 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001050 if (!tb) {
1051 /* flush must be done */
1052 tb_flush(env);
1053 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001054 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001055 /* Don't forget to invalidate previous TB info. */
1056 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001057 }
1058 tc_ptr = code_gen_ptr;
1059 tb->tc_ptr = tc_ptr;
1060 tb->cs_base = cs_base;
1061 tb->flags = flags;
1062 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001063 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001064 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1065 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001066
bellardd720b932004-04-25 17:57:43 +00001067 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001068 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001069 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001070 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001071 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001072 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001073 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001074 return tb;
bellardd720b932004-04-25 17:57:43 +00001075}
ths3b46e622007-09-17 08:09:54 +00001076
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001077/*
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001078 * Invalidate all TBs which intersect with the target physical address range
1079 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1080 * 'is_cpu_write_access' should be true if called from a real cpu write
1081 * access: the virtual CPU will exit the current TB if code is modified inside
1082 * this TB.
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001083 */
1084void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1085 int is_cpu_write_access)
1086{
1087 while (start < end) {
1088 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1089 start &= TARGET_PAGE_MASK;
1090 start += TARGET_PAGE_SIZE;
1091 }
1092}
1093
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001094/*
1095 * Invalidate all TBs which intersect with the target physical address range
1096 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1097 * 'is_cpu_write_access' should be true if called from a real cpu write
1098 * access: the virtual CPU will exit the current TB if code is modified inside
1099 * this TB.
1100 */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001101void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001102 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001103{
aliguori6b917542008-11-18 19:46:41 +00001104 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001105 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001106 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001107 PageDesc *p;
1108 int n;
1109#ifdef TARGET_HAS_PRECISE_SMC
1110 int current_tb_not_found = is_cpu_write_access;
1111 TranslationBlock *current_tb = NULL;
1112 int current_tb_modified = 0;
1113 target_ulong current_pc = 0;
1114 target_ulong current_cs_base = 0;
1115 int current_flags = 0;
1116#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001117
1118 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001119 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001120 return;
ths5fafdf22007-09-16 21:08:06 +00001121 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001122 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1123 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001124 /* build code bitmap */
1125 build_page_bitmap(p);
1126 }
1127
1128 /* we remove all the TBs in the range [start, end[ */
1129 /* XXX: see if in some cases it could be faster to invalidate all the code */
1130 tb = p->first_tb;
1131 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001132 n = (uintptr_t)tb & 3;
1133 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001134 tb_next = tb->page_next[n];
1135 /* NOTE: this is subtle as a TB may span two physical pages */
1136 if (n == 0) {
1137 /* NOTE: tb_end may be after the end of the page, but
1138 it is not a problem */
1139 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1140 tb_end = tb_start + tb->size;
1141 } else {
1142 tb_start = tb->page_addr[1];
1143 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1144 }
1145 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001146#ifdef TARGET_HAS_PRECISE_SMC
1147 if (current_tb_not_found) {
1148 current_tb_not_found = 0;
1149 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001150 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001151 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001152 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001153 }
1154 }
1155 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001156 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001157 /* If we are modifying the current TB, we must stop
1158 its execution. We could be more precise by checking
1159 that the modification is after the current PC, but it
1160 would require a specialized function to partially
1161 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001162
bellardd720b932004-04-25 17:57:43 +00001163 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001164 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001165 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1166 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001167 }
1168#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001169 /* we need to do that to handle the case where a signal
1170 occurs while doing tb_phys_invalidate() */
1171 saved_tb = NULL;
1172 if (env) {
1173 saved_tb = env->current_tb;
1174 env->current_tb = NULL;
1175 }
bellard9fa3e852004-01-04 18:06:42 +00001176 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001177 if (env) {
1178 env->current_tb = saved_tb;
1179 if (env->interrupt_request && env->current_tb)
1180 cpu_interrupt(env, env->interrupt_request);
1181 }
bellard9fa3e852004-01-04 18:06:42 +00001182 }
1183 tb = tb_next;
1184 }
1185#if !defined(CONFIG_USER_ONLY)
1186 /* if no code remaining, no need to continue to use slow writes */
1187 if (!p->first_tb) {
1188 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001189 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001190 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001191 }
1192 }
1193#endif
1194#ifdef TARGET_HAS_PRECISE_SMC
1195 if (current_tb_modified) {
1196 /* we generate a block containing just the instruction
1197 modifying the memory. It will ensure that it cannot modify
1198 itself */
bellardea1c1802004-06-14 18:56:36 +00001199 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001200 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001201 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001202 }
1203#endif
1204}
1205
1206/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001207static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001208{
1209 PageDesc *p;
1210 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001211#if 0
bellarda4193c82004-06-03 14:01:43 +00001212 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001213 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1214 cpu_single_env->mem_io_vaddr, len,
1215 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001216 cpu_single_env->eip +
1217 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001218 }
1219#endif
bellard9fa3e852004-01-04 18:06:42 +00001220 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001221 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001222 return;
1223 if (p->code_bitmap) {
1224 offset = start & ~TARGET_PAGE_MASK;
1225 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1226 if (b & ((1 << len) - 1))
1227 goto do_invalidate;
1228 } else {
1229 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001230 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001231 }
1232}
1233
bellard9fa3e852004-01-04 18:06:42 +00001234#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001235static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001236 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001237{
aliguori6b917542008-11-18 19:46:41 +00001238 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001239 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001240 int n;
bellardd720b932004-04-25 17:57:43 +00001241#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001242 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001243 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001244 int current_tb_modified = 0;
1245 target_ulong current_pc = 0;
1246 target_ulong current_cs_base = 0;
1247 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001248#endif
bellard9fa3e852004-01-04 18:06:42 +00001249
1250 addr &= TARGET_PAGE_MASK;
1251 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001252 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001253 return;
1254 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001255#ifdef TARGET_HAS_PRECISE_SMC
1256 if (tb && pc != 0) {
1257 current_tb = tb_find_pc(pc);
1258 }
1259#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001260 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001261 n = (uintptr_t)tb & 3;
1262 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001263#ifdef TARGET_HAS_PRECISE_SMC
1264 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001265 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001266 /* If we are modifying the current TB, we must stop
1267 its execution. We could be more precise by checking
1268 that the modification is after the current PC, but it
1269 would require a specialized function to partially
1270 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001271
bellardd720b932004-04-25 17:57:43 +00001272 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001273 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001274 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1275 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001276 }
1277#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001278 tb_phys_invalidate(tb, addr);
1279 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001280 }
1281 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001282#ifdef TARGET_HAS_PRECISE_SMC
1283 if (current_tb_modified) {
1284 /* we generate a block containing just the instruction
1285 modifying the memory. It will ensure that it cannot modify
1286 itself */
bellardea1c1802004-06-14 18:56:36 +00001287 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001288 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001289 cpu_resume_from_signal(env, puc);
1290 }
1291#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001292}
bellard9fa3e852004-01-04 18:06:42 +00001293#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001294
1295/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001296static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001297 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001298{
1299 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001300#ifndef CONFIG_USER_ONLY
1301 bool page_already_protected;
1302#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001303
bellard9fa3e852004-01-04 18:06:42 +00001304 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001305 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001306 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001307#ifndef CONFIG_USER_ONLY
1308 page_already_protected = p->first_tb != NULL;
1309#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001310 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001311 invalidate_page_bitmap(p);
1312
bellard107db442004-06-22 18:48:46 +00001313#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001314
bellard9fa3e852004-01-04 18:06:42 +00001315#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001316 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001317 target_ulong addr;
1318 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001319 int prot;
1320
bellardfd6ce8f2003-05-14 19:00:11 +00001321 /* force the host page as non writable (writes will have a
1322 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001323 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001324 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001325 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1326 addr += TARGET_PAGE_SIZE) {
1327
1328 p2 = page_find (addr >> TARGET_PAGE_BITS);
1329 if (!p2)
1330 continue;
1331 prot |= p2->flags;
1332 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001333 }
ths5fafdf22007-09-16 21:08:06 +00001334 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001335 (prot & PAGE_BITS) & ~PAGE_WRITE);
1336#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001337 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001338 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001339#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001340 }
bellard9fa3e852004-01-04 18:06:42 +00001341#else
1342 /* if some code is already present, then the pages are already
1343 protected. So we handle the case where only the first TB is
1344 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001345 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001346 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001347 }
1348#endif
bellardd720b932004-04-25 17:57:43 +00001349
1350#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001351}
1352
bellard9fa3e852004-01-04 18:06:42 +00001353/* add a new TB and link it to the physical page tables. phys_page2 is
1354 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001355void tb_link_page(TranslationBlock *tb,
1356 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001357{
bellard9fa3e852004-01-04 18:06:42 +00001358 unsigned int h;
1359 TranslationBlock **ptb;
1360
pbrookc8a706f2008-06-02 16:16:42 +00001361 /* Grab the mmap lock to stop another thread invalidating this TB
1362 before we are done. */
1363 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001364 /* add in the physical hash table */
1365 h = tb_phys_hash_func(phys_pc);
1366 ptb = &tb_phys_hash[h];
1367 tb->phys_hash_next = *ptb;
1368 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001369
1370 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001371 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1372 if (phys_page2 != -1)
1373 tb_alloc_page(tb, 1, phys_page2);
1374 else
1375 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001376
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001377 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001378 tb->jmp_next[0] = NULL;
1379 tb->jmp_next[1] = NULL;
1380
1381 /* init original jump addresses */
1382 if (tb->tb_next_offset[0] != 0xffff)
1383 tb_reset_jump(tb, 0);
1384 if (tb->tb_next_offset[1] != 0xffff)
1385 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001386
1387#ifdef DEBUG_TB_CHECK
1388 tb_page_check();
1389#endif
pbrookc8a706f2008-06-02 16:16:42 +00001390 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001391}
1392
Yeongkyoon Leefdbb84d2012-10-31 16:04:24 +09001393#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
1394/* check whether the given addr is in TCG generated code buffer or not */
1395bool is_tcg_gen_code(uintptr_t tc_ptr)
1396{
1397 /* This can be called during code generation, code_gen_buffer_max_size
1398 is used instead of code_gen_ptr for upper boundary checking */
1399 return (tc_ptr >= (uintptr_t)code_gen_buffer &&
1400 tc_ptr < (uintptr_t)(code_gen_buffer + code_gen_buffer_max_size));
1401}
1402#endif
1403
bellarda513fe12003-05-27 23:29:48 +00001404/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1405 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001406TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001407{
1408 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001409 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001410 TranslationBlock *tb;
1411
1412 if (nb_tbs <= 0)
1413 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001414 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1415 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001416 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001417 }
bellarda513fe12003-05-27 23:29:48 +00001418 /* binary search (cf Knuth) */
1419 m_min = 0;
1420 m_max = nb_tbs - 1;
1421 while (m_min <= m_max) {
1422 m = (m_min + m_max) >> 1;
1423 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001424 v = (uintptr_t)tb->tc_ptr;
bellarda513fe12003-05-27 23:29:48 +00001425 if (v == tc_ptr)
1426 return tb;
1427 else if (tc_ptr < v) {
1428 m_max = m - 1;
1429 } else {
1430 m_min = m + 1;
1431 }
ths5fafdf22007-09-16 21:08:06 +00001432 }
bellarda513fe12003-05-27 23:29:48 +00001433 return &tbs[m_max];
1434}
bellard75012672003-06-21 13:11:07 +00001435
bellardea041c02003-06-25 16:16:50 +00001436static void tb_reset_jump_recursive(TranslationBlock *tb);
1437
1438static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1439{
1440 TranslationBlock *tb1, *tb_next, **ptb;
1441 unsigned int n1;
1442
1443 tb1 = tb->jmp_next[n];
1444 if (tb1 != NULL) {
1445 /* find head of list */
1446 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001447 n1 = (uintptr_t)tb1 & 3;
1448 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001449 if (n1 == 2)
1450 break;
1451 tb1 = tb1->jmp_next[n1];
1452 }
1453 /* we are now sure now that tb jumps to tb1 */
1454 tb_next = tb1;
1455
1456 /* remove tb from the jmp_first list */
1457 ptb = &tb_next->jmp_first;
1458 for(;;) {
1459 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001460 n1 = (uintptr_t)tb1 & 3;
1461 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001462 if (n1 == n && tb1 == tb)
1463 break;
1464 ptb = &tb1->jmp_next[n1];
1465 }
1466 *ptb = tb->jmp_next[n];
1467 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001468
bellardea041c02003-06-25 16:16:50 +00001469 /* suppress the jump to next tb in generated code */
1470 tb_reset_jump(tb, n);
1471
bellard01243112004-01-04 15:48:17 +00001472 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001473 tb_reset_jump_recursive(tb_next);
1474 }
1475}
1476
1477static void tb_reset_jump_recursive(TranslationBlock *tb)
1478{
1479 tb_reset_jump_recursive2(tb, 0);
1480 tb_reset_jump_recursive2(tb, 1);
1481}
1482
bellard1fddef42005-04-17 19:16:13 +00001483#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001484#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001485static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001486{
1487 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1488}
1489#else
Avi Kivitya8170e52012-10-23 12:30:10 +02001490void tb_invalidate_phys_addr(hwaddr addr)
bellardd720b932004-04-25 17:57:43 +00001491{
Anthony Liguoric227f092009-10-01 16:12:16 -05001492 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001493 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001494
Avi Kivityac1970f2012-10-03 16:22:53 +02001495 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001496 if (!(memory_region_is_ram(section->mr)
1497 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001498 return;
1499 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001500 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001501 + memory_region_section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001502 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001503}
Max Filippov1e7855a2012-04-10 02:48:17 +04001504
1505static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1506{
Max Filippov9d70c4b2012-05-27 20:21:08 +04001507 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1508 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +04001509}
bellardc27004e2005-01-03 23:35:10 +00001510#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001511#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001512
Paul Brookc527ee82010-03-01 03:31:14 +00001513#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001514void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001515
1516{
1517}
1518
Andreas Färber9349b4f2012-03-14 01:38:32 +01001519int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001520 int flags, CPUWatchpoint **watchpoint)
1521{
1522 return -ENOSYS;
1523}
1524#else
pbrook6658ffb2007-03-16 23:58:11 +00001525/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001526int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001527 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001528{
aliguorib4051332008-11-18 20:14:20 +00001529 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001530 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001531
aliguorib4051332008-11-18 20:14:20 +00001532 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001533 if ((len & (len - 1)) || (addr & ~len_mask) ||
1534 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001535 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1536 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1537 return -EINVAL;
1538 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001539 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001540
aliguoria1d1bb32008-11-18 20:07:32 +00001541 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001542 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001543 wp->flags = flags;
1544
aliguori2dc9f412008-11-18 20:56:59 +00001545 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001546 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001547 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001548 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001549 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001550
pbrook6658ffb2007-03-16 23:58:11 +00001551 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001552
1553 if (watchpoint)
1554 *watchpoint = wp;
1555 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001556}
1557
aliguoria1d1bb32008-11-18 20:07:32 +00001558/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001559int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001560 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001561{
aliguorib4051332008-11-18 20:14:20 +00001562 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001563 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001564
Blue Swirl72cf2d42009-09-12 07:36:22 +00001565 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001566 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001567 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001568 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001569 return 0;
1570 }
1571 }
aliguoria1d1bb32008-11-18 20:07:32 +00001572 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001573}
1574
aliguoria1d1bb32008-11-18 20:07:32 +00001575/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001576void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001577{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001578 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001579
aliguoria1d1bb32008-11-18 20:07:32 +00001580 tlb_flush_page(env, watchpoint->vaddr);
1581
Anthony Liguori7267c092011-08-20 22:09:37 -05001582 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001583}
1584
aliguoria1d1bb32008-11-18 20:07:32 +00001585/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001586void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001587{
aliguoric0ce9982008-11-25 22:13:57 +00001588 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001589
Blue Swirl72cf2d42009-09-12 07:36:22 +00001590 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001591 if (wp->flags & mask)
1592 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001593 }
aliguoria1d1bb32008-11-18 20:07:32 +00001594}
Paul Brookc527ee82010-03-01 03:31:14 +00001595#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001596
1597/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001598int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001599 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001600{
bellard1fddef42005-04-17 19:16:13 +00001601#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001602 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001603
Anthony Liguori7267c092011-08-20 22:09:37 -05001604 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001605
1606 bp->pc = pc;
1607 bp->flags = flags;
1608
aliguori2dc9f412008-11-18 20:56:59 +00001609 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001610 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001611 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001612 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001613 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001614
1615 breakpoint_invalidate(env, pc);
1616
1617 if (breakpoint)
1618 *breakpoint = bp;
1619 return 0;
1620#else
1621 return -ENOSYS;
1622#endif
1623}
1624
1625/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001626int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001627{
1628#if defined(TARGET_HAS_ICE)
1629 CPUBreakpoint *bp;
1630
Blue Swirl72cf2d42009-09-12 07:36:22 +00001631 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001632 if (bp->pc == pc && bp->flags == flags) {
1633 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001634 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001635 }
bellard4c3a88a2003-07-26 12:06:08 +00001636 }
aliguoria1d1bb32008-11-18 20:07:32 +00001637 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001638#else
aliguoria1d1bb32008-11-18 20:07:32 +00001639 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001640#endif
1641}
1642
aliguoria1d1bb32008-11-18 20:07:32 +00001643/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001644void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001645{
bellard1fddef42005-04-17 19:16:13 +00001646#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001647 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001648
aliguoria1d1bb32008-11-18 20:07:32 +00001649 breakpoint_invalidate(env, breakpoint->pc);
1650
Anthony Liguori7267c092011-08-20 22:09:37 -05001651 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001652#endif
1653}
1654
1655/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001656void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001657{
1658#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001659 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001660
Blue Swirl72cf2d42009-09-12 07:36:22 +00001661 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001662 if (bp->flags & mask)
1663 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001664 }
bellard4c3a88a2003-07-26 12:06:08 +00001665#endif
1666}
1667
bellardc33a3462003-07-29 20:50:33 +00001668/* enable or disable single step mode. EXCP_DEBUG is returned by the
1669 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001670void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001671{
bellard1fddef42005-04-17 19:16:13 +00001672#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001673 if (env->singlestep_enabled != enabled) {
1674 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001675 if (kvm_enabled())
1676 kvm_update_guest_debug(env, 0);
1677 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001678 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001679 /* XXX: only flush what is necessary */
1680 tb_flush(env);
1681 }
bellardc33a3462003-07-29 20:50:33 +00001682 }
1683#endif
1684}
1685
Andreas Färber9349b4f2012-03-14 01:38:32 +01001686static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001687{
pbrookd5975362008-06-07 20:50:51 +00001688 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1689 problem and hope the cpu will stop of its own accord. For userspace
1690 emulation this often isn't actually as bad as it sounds. Often
1691 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001692 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001693 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001694
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001695 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001696 tb = env->current_tb;
1697 /* if the cpu is currently executing code, we must unlink it and
1698 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001699 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001700 env->current_tb = NULL;
1701 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001702 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001703 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001704}
1705
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001706#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001707/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001708static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001709{
Andreas Färber60e82572012-05-02 22:23:49 +02001710 CPUState *cpu = ENV_GET_CPU(env);
aurel323098dba2009-03-07 21:28:24 +00001711 int old_mask;
1712
1713 old_mask = env->interrupt_request;
1714 env->interrupt_request |= mask;
1715
aliguori8edac962009-04-24 18:03:45 +00001716 /*
1717 * If called from iothread context, wake the target cpu in
1718 * case its halted.
1719 */
Andreas Färber60e82572012-05-02 22:23:49 +02001720 if (!qemu_cpu_is_self(cpu)) {
Andreas Färberc08d7422012-05-03 04:34:15 +02001721 qemu_cpu_kick(cpu);
aliguori8edac962009-04-24 18:03:45 +00001722 return;
1723 }
aliguori8edac962009-04-24 18:03:45 +00001724
pbrook2e70f6e2008-06-29 01:03:05 +00001725 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001726 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001727 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001728 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001729 cpu_abort(env, "Raised interrupt while not in I/O function");
1730 }
pbrook2e70f6e2008-06-29 01:03:05 +00001731 } else {
aurel323098dba2009-03-07 21:28:24 +00001732 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001733 }
1734}
1735
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001736CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1737
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001738#else /* CONFIG_USER_ONLY */
1739
Andreas Färber9349b4f2012-03-14 01:38:32 +01001740void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001741{
1742 env->interrupt_request |= mask;
1743 cpu_unlink_tb(env);
1744}
1745#endif /* CONFIG_USER_ONLY */
1746
Andreas Färber9349b4f2012-03-14 01:38:32 +01001747void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001748{
1749 env->interrupt_request &= ~mask;
1750}
1751
Andreas Färber9349b4f2012-03-14 01:38:32 +01001752void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001753{
1754 env->exit_request = 1;
1755 cpu_unlink_tb(env);
1756}
1757
Andreas Färber9349b4f2012-03-14 01:38:32 +01001758void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001759{
1760 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001761 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001762
1763 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001764 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001765 fprintf(stderr, "qemu: fatal: ");
1766 vfprintf(stderr, fmt, ap);
1767 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001768 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +00001769 if (qemu_log_enabled()) {
1770 qemu_log("qemu: fatal: ");
1771 qemu_log_vprintf(fmt, ap2);
1772 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001773 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +00001774 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001775 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001776 }
pbrook493ae1f2007-11-23 16:53:59 +00001777 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001778 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001779#if defined(CONFIG_USER_ONLY)
1780 {
1781 struct sigaction act;
1782 sigfillset(&act.sa_mask);
1783 act.sa_handler = SIG_DFL;
1784 sigaction(SIGABRT, &act, NULL);
1785 }
1786#endif
bellard75012672003-06-21 13:11:07 +00001787 abort();
1788}
1789
Andreas Färber9349b4f2012-03-14 01:38:32 +01001790CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001791{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001792 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1793 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001794 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001795#if defined(TARGET_HAS_ICE)
1796 CPUBreakpoint *bp;
1797 CPUWatchpoint *wp;
1798#endif
1799
Andreas Färber9349b4f2012-03-14 01:38:32 +01001800 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001801
1802 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001803 new_env->next_cpu = next_cpu;
1804 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001805
1806 /* Clone all break/watchpoints.
1807 Note: Once we support ptrace with hw-debug register access, make sure
1808 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001809 QTAILQ_INIT(&env->breakpoints);
1810 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001811#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001812 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001813 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1814 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001815 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001816 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1817 wp->flags, NULL);
1818 }
1819#endif
1820
thsc5be9f02007-02-28 20:20:53 +00001821 return new_env;
1822}
1823
bellard01243112004-01-04 15:48:17 +00001824#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001825void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001826{
1827 unsigned int i;
1828
1829 /* Discard jump cache entries for any tb which might potentially
1830 overlap the flushed page. */
1831 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1832 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001833 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001834
1835 i = tb_jmp_cache_hash_page(addr);
1836 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001837 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001838}
1839
Juan Quintelad24981d2012-05-22 00:42:40 +02001840static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1841 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +00001842{
Juan Quintelad24981d2012-05-22 00:42:40 +02001843 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +00001844
bellard1ccde1c2004-02-06 19:46:14 +00001845 /* we modify the TLB cache so that the dirty bit will be set again
1846 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001847 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001848 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001849 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001850 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001851 != (end - 1) - start) {
1852 abort();
1853 }
Blue Swirle5548612012-04-21 13:08:33 +00001854 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001855
1856}
1857
1858/* Note: start and end must be within the same ram block. */
1859void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1860 int dirty_flags)
1861{
1862 uintptr_t length;
1863
1864 start &= TARGET_PAGE_MASK;
1865 end = TARGET_PAGE_ALIGN(end);
1866
1867 length = end - start;
1868 if (length == 0)
1869 return;
1870 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1871
1872 if (tcg_enabled()) {
1873 tlb_reset_dirty_range_all(start, end, length);
1874 }
bellard1ccde1c2004-02-06 19:46:14 +00001875}
1876
aliguori74576192008-10-06 14:02:03 +00001877int cpu_physical_memory_set_dirty_tracking(int enable)
1878{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001879 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001880 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001881 return ret;
aliguori74576192008-10-06 14:02:03 +00001882}
1883
Avi Kivitya8170e52012-10-23 12:30:10 +02001884hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +00001885 MemoryRegionSection *section,
1886 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +02001887 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +00001888 int prot,
1889 target_ulong *address)
1890{
Avi Kivitya8170e52012-10-23 12:30:10 +02001891 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001892 CPUWatchpoint *wp;
1893
Blue Swirlcc5bea62012-04-14 14:56:48 +00001894 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001895 /* Normal RAM. */
1896 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001897 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001898 if (!section->readonly) {
1899 iotlb |= phys_section_notdirty;
1900 } else {
1901 iotlb |= phys_section_rom;
1902 }
1903 } else {
1904 /* IO handlers are currently passed a physical address.
1905 It would be nice to pass an offset from the base address
1906 of that region. This would avoid having to special case RAM,
1907 and avoid full address decoding in every device.
1908 We can't use the high bits of pd for this because
1909 IO_MEM_ROMD uses these as a ram address. */
1910 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001911 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001912 }
1913
1914 /* Make accesses to pages with watchpoints go via the
1915 watchpoint trap routines. */
1916 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1917 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1918 /* Avoid trapping reads of pages with a write breakpoint. */
1919 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1920 iotlb = phys_section_watch + paddr;
1921 *address |= TLB_MMIO;
1922 break;
1923 }
1924 }
1925 }
1926
1927 return iotlb;
1928}
1929
bellard01243112004-01-04 15:48:17 +00001930#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001931/*
1932 * Walks guest process memory "regions" one by one
1933 * and calls callback function 'fn' for each region.
1934 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001935
1936struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00001937{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001938 walk_memory_regions_fn fn;
1939 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001940 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001941 int prot;
1942};
bellard9fa3e852004-01-04 18:06:42 +00001943
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001944static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001945 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001946{
1947 if (data->start != -1ul) {
1948 int rc = data->fn(data->priv, data->start, end, data->prot);
1949 if (rc != 0) {
1950 return rc;
bellard9fa3e852004-01-04 18:06:42 +00001951 }
bellard33417e72003-08-10 21:47:01 +00001952 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001953
1954 data->start = (new_prot ? end : -1ul);
1955 data->prot = new_prot;
1956
1957 return 0;
1958}
1959
1960static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001961 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001962{
Paul Brookb480d9b2010-03-12 23:23:29 +00001963 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001964 int i, rc;
1965
1966 if (*lp == NULL) {
1967 return walk_memory_regions_end(data, base, 0);
1968 }
1969
1970 if (level == 0) {
1971 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001972 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001973 int prot = pd[i].flags;
1974
1975 pa = base | (i << TARGET_PAGE_BITS);
1976 if (prot != data->prot) {
1977 rc = walk_memory_regions_end(data, pa, prot);
1978 if (rc != 0) {
1979 return rc;
1980 }
1981 }
1982 }
1983 } else {
1984 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001985 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001986 pa = base | ((abi_ulong)i <<
1987 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001988 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1989 if (rc != 0) {
1990 return rc;
1991 }
1992 }
1993 }
1994
1995 return 0;
1996}
1997
1998int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1999{
2000 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02002001 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002002
2003 data.fn = fn;
2004 data.priv = priv;
2005 data.start = -1ul;
2006 data.prot = 0;
2007
2008 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002009 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002010 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2011 if (rc != 0) {
2012 return rc;
2013 }
2014 }
2015
2016 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002017}
2018
Paul Brookb480d9b2010-03-12 23:23:29 +00002019static int dump_region(void *priv, abi_ulong start,
2020 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002021{
2022 FILE *f = (FILE *)priv;
2023
Paul Brookb480d9b2010-03-12 23:23:29 +00002024 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2025 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002026 start, end, end - start,
2027 ((prot & PAGE_READ) ? 'r' : '-'),
2028 ((prot & PAGE_WRITE) ? 'w' : '-'),
2029 ((prot & PAGE_EXEC) ? 'x' : '-'));
2030
2031 return (0);
2032}
2033
2034/* dump memory mappings */
2035void page_dump(FILE *f)
2036{
2037 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2038 "start", "end", "size", "prot");
2039 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002040}
2041
pbrook53a59602006-03-25 19:31:22 +00002042int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002043{
bellard9fa3e852004-01-04 18:06:42 +00002044 PageDesc *p;
2045
2046 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002047 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002048 return 0;
2049 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002050}
2051
Richard Henderson376a7902010-03-10 15:57:04 -08002052/* Modify the flags of a page and invalidate the code if necessary.
2053 The flag PAGE_WRITE_ORG is positioned automatically depending
2054 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002055void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002056{
Richard Henderson376a7902010-03-10 15:57:04 -08002057 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002058
Richard Henderson376a7902010-03-10 15:57:04 -08002059 /* This function should never be called with addresses outside the
2060 guest address space. If this assert fires, it probably indicates
2061 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002062#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2063 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002064#endif
2065 assert(start < end);
2066
bellard9fa3e852004-01-04 18:06:42 +00002067 start = start & TARGET_PAGE_MASK;
2068 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002069
2070 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002071 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002072 }
2073
2074 for (addr = start, len = end - start;
2075 len != 0;
2076 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2077 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2078
2079 /* If the write protection bit is set, then we invalidate
2080 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002081 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002082 (flags & PAGE_WRITE) &&
2083 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002084 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002085 }
2086 p->flags = flags;
2087 }
bellard9fa3e852004-01-04 18:06:42 +00002088}
2089
ths3d97b402007-11-02 19:02:07 +00002090int page_check_range(target_ulong start, target_ulong len, int flags)
2091{
2092 PageDesc *p;
2093 target_ulong end;
2094 target_ulong addr;
2095
Richard Henderson376a7902010-03-10 15:57:04 -08002096 /* This function should never be called with addresses outside the
2097 guest address space. If this assert fires, it probably indicates
2098 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002099#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2100 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002101#endif
2102
Richard Henderson3e0650a2010-03-29 10:54:42 -07002103 if (len == 0) {
2104 return 0;
2105 }
Richard Henderson376a7902010-03-10 15:57:04 -08002106 if (start + len - 1 < start) {
2107 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002108 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002109 }
balrog55f280c2008-10-28 10:24:11 +00002110
ths3d97b402007-11-02 19:02:07 +00002111 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2112 start = start & TARGET_PAGE_MASK;
2113
Richard Henderson376a7902010-03-10 15:57:04 -08002114 for (addr = start, len = end - start;
2115 len != 0;
2116 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002117 p = page_find(addr >> TARGET_PAGE_BITS);
2118 if( !p )
2119 return -1;
2120 if( !(p->flags & PAGE_VALID) )
2121 return -1;
2122
bellarddae32702007-11-14 10:51:00 +00002123 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002124 return -1;
bellarddae32702007-11-14 10:51:00 +00002125 if (flags & PAGE_WRITE) {
2126 if (!(p->flags & PAGE_WRITE_ORG))
2127 return -1;
2128 /* unprotect the page if it was put read-only because it
2129 contains translated code */
2130 if (!(p->flags & PAGE_WRITE)) {
2131 if (!page_unprotect(addr, 0, NULL))
2132 return -1;
2133 }
2134 return 0;
2135 }
ths3d97b402007-11-02 19:02:07 +00002136 }
2137 return 0;
2138}
2139
bellard9fa3e852004-01-04 18:06:42 +00002140/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002141 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002142int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002143{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002144 unsigned int prot;
2145 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002146 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002147
pbrookc8a706f2008-06-02 16:16:42 +00002148 /* Technically this isn't safe inside a signal handler. However we
2149 know this only ever happens in a synchronous SEGV handler, so in
2150 practice it seems to be ok. */
2151 mmap_lock();
2152
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002153 p = page_find(address >> TARGET_PAGE_BITS);
2154 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002155 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002156 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002157 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002158
bellard9fa3e852004-01-04 18:06:42 +00002159 /* if the page was really writable, then we change its
2160 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002161 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2162 host_start = address & qemu_host_page_mask;
2163 host_end = host_start + qemu_host_page_size;
2164
2165 prot = 0;
2166 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2167 p = page_find(addr >> TARGET_PAGE_BITS);
2168 p->flags |= PAGE_WRITE;
2169 prot |= p->flags;
2170
bellard9fa3e852004-01-04 18:06:42 +00002171 /* and since the content will be modified, we must invalidate
2172 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002173 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002174#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002175 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002176#endif
bellard9fa3e852004-01-04 18:06:42 +00002177 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002178 mprotect((void *)g2h(host_start), qemu_host_page_size,
2179 prot & PAGE_BITS);
2180
2181 mmap_unlock();
2182 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002183 }
pbrookc8a706f2008-06-02 16:16:42 +00002184 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002185 return 0;
2186}
bellard9fa3e852004-01-04 18:06:42 +00002187#endif /* defined(CONFIG_USER_ONLY) */
2188
pbrooke2eef172008-06-08 01:09:01 +00002189#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002190
Paul Brookc04b2b72010-03-01 03:31:14 +00002191#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2192typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002193 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +02002194 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002195 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002196} subpage_t;
2197
Anthony Liguoric227f092009-10-01 16:12:16 -05002198static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002199 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +02002200static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002201static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002202{
Avi Kivity5312bd82012-02-12 18:32:55 +02002203 MemoryRegionSection *section = &phys_sections[section_index];
2204 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002205
2206 if (mr->subpage) {
2207 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2208 memory_region_destroy(&subpage->iomem);
2209 g_free(subpage);
2210 }
2211}
2212
Avi Kivity4346ae32012-02-10 17:00:01 +02002213static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002214{
2215 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002216 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002217
Avi Kivityc19e8802012-02-13 20:25:31 +02002218 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002219 return;
2220 }
2221
Avi Kivityc19e8802012-02-13 20:25:31 +02002222 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002223 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002224 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002225 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002226 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002227 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002228 }
Avi Kivity54688b12012-02-09 17:34:32 +02002229 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002230 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002231 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002232}
2233
Avi Kivityac1970f2012-10-03 16:22:53 +02002234static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +02002235{
Avi Kivityac1970f2012-10-03 16:22:53 +02002236 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002237 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002238}
2239
Avi Kivity5312bd82012-02-12 18:32:55 +02002240static uint16_t phys_section_add(MemoryRegionSection *section)
2241{
2242 if (phys_sections_nb == phys_sections_nb_alloc) {
2243 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2244 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2245 phys_sections_nb_alloc);
2246 }
2247 phys_sections[phys_sections_nb] = *section;
2248 return phys_sections_nb++;
2249}
2250
2251static void phys_sections_clear(void)
2252{
2253 phys_sections_nb = 0;
2254}
2255
Avi Kivityac1970f2012-10-03 16:22:53 +02002256static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02002257{
2258 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02002259 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02002260 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +02002261 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002262 MemoryRegionSection subsection = {
2263 .offset_within_address_space = base,
2264 .size = TARGET_PAGE_SIZE,
2265 };
Avi Kivitya8170e52012-10-23 12:30:10 +02002266 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02002267
Avi Kivityf3705d52012-03-08 16:16:34 +02002268 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002269
Avi Kivityf3705d52012-03-08 16:16:34 +02002270 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002271 subpage = subpage_init(base);
2272 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02002273 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +02002274 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002275 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002276 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002277 }
2278 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -04002279 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +02002280 subpage_register(subpage, start, end, phys_section_add(section));
2281}
2282
2283
Avi Kivityac1970f2012-10-03 16:22:53 +02002284static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002285{
Avi Kivitya8170e52012-10-23 12:30:10 +02002286 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +02002287 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +02002288 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002289 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002290
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002291 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002292
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002293 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +02002294 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +02002295 section_index);
bellard33417e72003-08-10 21:47:01 +00002296}
2297
Avi Kivityac1970f2012-10-03 16:22:53 +02002298static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02002299{
Avi Kivityac1970f2012-10-03 16:22:53 +02002300 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002301 MemoryRegionSection now = *section, remain = *section;
2302
2303 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2304 || (now.size < TARGET_PAGE_SIZE)) {
2305 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2306 - now.offset_within_address_space,
2307 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02002308 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002309 remain.size -= now.size;
2310 remain.offset_within_address_space += now.size;
2311 remain.offset_within_region += now.size;
2312 }
Tyler Hall69b67642012-07-25 18:45:04 -04002313 while (remain.size >= TARGET_PAGE_SIZE) {
2314 now = remain;
2315 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
2316 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +02002317 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04002318 } else {
2319 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +02002320 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04002321 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02002322 remain.size -= now.size;
2323 remain.offset_within_address_space += now.size;
2324 remain.offset_within_region += now.size;
2325 }
2326 now = remain;
2327 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002328 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002329 }
2330}
2331
Sheng Yang62a27442010-01-26 19:21:16 +08002332void qemu_flush_coalesced_mmio_buffer(void)
2333{
2334 if (kvm_enabled())
2335 kvm_flush_coalesced_mmio_buffer();
2336}
2337
Marcelo Tosattic9027602010-03-01 20:25:08 -03002338#if defined(__linux__) && !defined(TARGET_S390X)
2339
2340#include <sys/vfs.h>
2341
2342#define HUGETLBFS_MAGIC 0x958458f6
2343
2344static long gethugepagesize(const char *path)
2345{
2346 struct statfs fs;
2347 int ret;
2348
2349 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002350 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002351 } while (ret != 0 && errno == EINTR);
2352
2353 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002354 perror(path);
2355 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002356 }
2357
2358 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002359 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002360
2361 return fs.f_bsize;
2362}
2363
Alex Williamson04b16652010-07-02 11:13:17 -06002364static void *file_ram_alloc(RAMBlock *block,
2365 ram_addr_t memory,
2366 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002367{
2368 char *filename;
2369 void *area;
2370 int fd;
2371#ifdef MAP_POPULATE
2372 int flags;
2373#endif
2374 unsigned long hpagesize;
2375
2376 hpagesize = gethugepagesize(path);
2377 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002378 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002379 }
2380
2381 if (memory < hpagesize) {
2382 return NULL;
2383 }
2384
2385 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2386 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2387 return NULL;
2388 }
2389
2390 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002391 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002392 }
2393
2394 fd = mkstemp(filename);
2395 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002396 perror("unable to create backing store for hugepages");
2397 free(filename);
2398 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002399 }
2400 unlink(filename);
2401 free(filename);
2402
2403 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2404
2405 /*
2406 * ftruncate is not supported by hugetlbfs in older
2407 * hosts, so don't bother bailing out on errors.
2408 * If anything goes wrong with it under other filesystems,
2409 * mmap will fail.
2410 */
2411 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002412 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002413
2414#ifdef MAP_POPULATE
2415 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2416 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2417 * to sidestep this quirk.
2418 */
2419 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2420 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2421#else
2422 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2423#endif
2424 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002425 perror("file_ram_alloc: can't mmap RAM pages");
2426 close(fd);
2427 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002428 }
Alex Williamson04b16652010-07-02 11:13:17 -06002429 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002430 return area;
2431}
2432#endif
2433
Alex Williamsond17b5282010-06-25 11:08:38 -06002434static ram_addr_t find_ram_offset(ram_addr_t size)
2435{
Alex Williamson04b16652010-07-02 11:13:17 -06002436 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002437 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002438
2439 if (QLIST_EMPTY(&ram_list.blocks))
2440 return 0;
2441
2442 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002443 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002444
2445 end = block->offset + block->length;
2446
2447 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2448 if (next_block->offset >= end) {
2449 next = MIN(next, next_block->offset);
2450 }
2451 }
2452 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002453 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002454 mingap = next - end;
2455 }
2456 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002457
2458 if (offset == RAM_ADDR_MAX) {
2459 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2460 (uint64_t)size);
2461 abort();
2462 }
2463
Alex Williamson04b16652010-07-02 11:13:17 -06002464 return offset;
2465}
2466
Juan Quintela652d7ec2012-07-20 10:37:54 +02002467ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06002468{
Alex Williamsond17b5282010-06-25 11:08:38 -06002469 RAMBlock *block;
2470 ram_addr_t last = 0;
2471
2472 QLIST_FOREACH(block, &ram_list.blocks, next)
2473 last = MAX(last, block->offset + block->length);
2474
2475 return last;
2476}
2477
Jason Baronddb97f12012-08-02 15:44:16 -04002478static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2479{
2480 int ret;
2481 QemuOpts *machine_opts;
2482
2483 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2484 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2485 if (machine_opts &&
2486 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2487 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2488 if (ret) {
2489 perror("qemu_madvise");
2490 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2491 "but dump_guest_core=off specified\n");
2492 }
2493 }
2494}
2495
Avi Kivityc5705a72011-12-20 15:59:12 +02002496void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002497{
2498 RAMBlock *new_block, *block;
2499
Avi Kivityc5705a72011-12-20 15:59:12 +02002500 new_block = NULL;
2501 QLIST_FOREACH(block, &ram_list.blocks, next) {
2502 if (block->offset == addr) {
2503 new_block = block;
2504 break;
2505 }
2506 }
2507 assert(new_block);
2508 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002509
Anthony Liguori09e5ab62012-02-03 12:28:43 -06002510 if (dev) {
2511 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002512 if (id) {
2513 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002514 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002515 }
2516 }
2517 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2518
2519 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002520 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002521 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2522 new_block->idstr);
2523 abort();
2524 }
2525 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002526}
2527
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002528static int memory_try_enable_merging(void *addr, size_t len)
2529{
2530 QemuOpts *opts;
2531
2532 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2533 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
2534 /* disabled by the user */
2535 return 0;
2536 }
2537
2538 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
2539}
2540
Avi Kivityc5705a72011-12-20 15:59:12 +02002541ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2542 MemoryRegion *mr)
2543{
2544 RAMBlock *new_block;
2545
2546 size = TARGET_PAGE_ALIGN(size);
2547 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002548
Avi Kivity7c637362011-12-21 13:09:49 +02002549 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002550 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002551 if (host) {
2552 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002553 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002554 } else {
2555 if (mem_path) {
2556#if defined (__linux__) && !defined(TARGET_S390X)
2557 new_block->host = file_ram_alloc(new_block, size, mem_path);
2558 if (!new_block->host) {
2559 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002560 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002561 }
2562#else
2563 fprintf(stderr, "-mem-path option unsupported\n");
2564 exit(1);
2565#endif
2566 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02002567 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002568 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00002569 } else if (kvm_enabled()) {
2570 /* some s390/kvm configurations have special constraints */
2571 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01002572 } else {
2573 new_block->host = qemu_vmalloc(size);
2574 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002575 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002576 }
2577 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002578 new_block->length = size;
2579
2580 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2581
Anthony Liguori7267c092011-08-20 22:09:37 -05002582 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002583 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04002584 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2585 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02002586 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002587
Jason Baronddb97f12012-08-02 15:44:16 -04002588 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03002589 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04002590
Cam Macdonell84b89d72010-07-26 18:10:57 -06002591 if (kvm_enabled())
2592 kvm_setup_guest_memory(new_block->host, size);
2593
2594 return new_block->offset;
2595}
2596
Avi Kivityc5705a72011-12-20 15:59:12 +02002597ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002598{
Avi Kivityc5705a72011-12-20 15:59:12 +02002599 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002600}
bellarde9a1ab12007-02-08 23:08:38 +00002601
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002602void qemu_ram_free_from_ptr(ram_addr_t addr)
2603{
2604 RAMBlock *block;
2605
2606 QLIST_FOREACH(block, &ram_list.blocks, next) {
2607 if (addr == block->offset) {
2608 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002609 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002610 return;
2611 }
2612 }
2613}
2614
Anthony Liguoric227f092009-10-01 16:12:16 -05002615void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002616{
Alex Williamson04b16652010-07-02 11:13:17 -06002617 RAMBlock *block;
2618
2619 QLIST_FOREACH(block, &ram_list.blocks, next) {
2620 if (addr == block->offset) {
2621 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002622 if (block->flags & RAM_PREALLOC_MASK) {
2623 ;
2624 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002625#if defined (__linux__) && !defined(TARGET_S390X)
2626 if (block->fd) {
2627 munmap(block->host, block->length);
2628 close(block->fd);
2629 } else {
2630 qemu_vfree(block->host);
2631 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002632#else
2633 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002634#endif
2635 } else {
2636#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2637 munmap(block->host, block->length);
2638#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002639 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002640 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002641 } else {
2642 qemu_vfree(block->host);
2643 }
Alex Williamson04b16652010-07-02 11:13:17 -06002644#endif
2645 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002646 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002647 return;
2648 }
2649 }
2650
bellarde9a1ab12007-02-08 23:08:38 +00002651}
2652
Huang Yingcd19cfa2011-03-02 08:56:19 +01002653#ifndef _WIN32
2654void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2655{
2656 RAMBlock *block;
2657 ram_addr_t offset;
2658 int flags;
2659 void *area, *vaddr;
2660
2661 QLIST_FOREACH(block, &ram_list.blocks, next) {
2662 offset = addr - block->offset;
2663 if (offset < block->length) {
2664 vaddr = block->host + offset;
2665 if (block->flags & RAM_PREALLOC_MASK) {
2666 ;
2667 } else {
2668 flags = MAP_FIXED;
2669 munmap(vaddr, length);
2670 if (mem_path) {
2671#if defined(__linux__) && !defined(TARGET_S390X)
2672 if (block->fd) {
2673#ifdef MAP_POPULATE
2674 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2675 MAP_PRIVATE;
2676#else
2677 flags |= MAP_PRIVATE;
2678#endif
2679 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2680 flags, block->fd, offset);
2681 } else {
2682 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2683 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2684 flags, -1, 0);
2685 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002686#else
2687 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002688#endif
2689 } else {
2690#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2691 flags |= MAP_SHARED | MAP_ANONYMOUS;
2692 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2693 flags, -1, 0);
2694#else
2695 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2696 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2697 flags, -1, 0);
2698#endif
2699 }
2700 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002701 fprintf(stderr, "Could not remap addr: "
2702 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002703 length, addr);
2704 exit(1);
2705 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002706 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04002707 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002708 }
2709 return;
2710 }
2711 }
2712}
2713#endif /* !_WIN32 */
2714
pbrookdc828ca2009-04-09 22:21:07 +00002715/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002716 With the exception of the softmmu code in this file, this should
2717 only be used for local memory (e.g. video ram) that the device owns,
2718 and knows it isn't going to access beyond the end of the block.
2719
2720 It should not be used for general purpose DMA.
2721 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2722 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002723void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002724{
pbrook94a6b542009-04-11 17:15:54 +00002725 RAMBlock *block;
2726
Alex Williamsonf471a172010-06-11 11:11:42 -06002727 QLIST_FOREACH(block, &ram_list.blocks, next) {
2728 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002729 /* Move this entry to to start of the list. */
2730 if (block != QLIST_FIRST(&ram_list.blocks)) {
2731 QLIST_REMOVE(block, next);
2732 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2733 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002734 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002735 /* We need to check if the requested address is in the RAM
2736 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002737 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002738 */
2739 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002740 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002741 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002742 block->host =
2743 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002744 }
2745 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002746 return block->host + (addr - block->offset);
2747 }
pbrook94a6b542009-04-11 17:15:54 +00002748 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002749
2750 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2751 abort();
2752
2753 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002754}
2755
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002756/* Return a host pointer to ram allocated with qemu_ram_alloc.
2757 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2758 */
2759void *qemu_safe_ram_ptr(ram_addr_t addr)
2760{
2761 RAMBlock *block;
2762
2763 QLIST_FOREACH(block, &ram_list.blocks, next) {
2764 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002765 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002766 /* We need to check if the requested address is in the RAM
2767 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002768 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002769 */
2770 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002771 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002772 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002773 block->host =
2774 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002775 }
2776 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002777 return block->host + (addr - block->offset);
2778 }
2779 }
2780
2781 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2782 abort();
2783
2784 return NULL;
2785}
2786
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002787/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2788 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002789void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002790{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002791 if (*size == 0) {
2792 return NULL;
2793 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002794 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002795 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002796 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002797 RAMBlock *block;
2798
2799 QLIST_FOREACH(block, &ram_list.blocks, next) {
2800 if (addr - block->offset < block->length) {
2801 if (addr - block->offset + *size > block->length)
2802 *size = block->length - addr + block->offset;
2803 return block->host + (addr - block->offset);
2804 }
2805 }
2806
2807 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2808 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002809 }
2810}
2811
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002812void qemu_put_ram_ptr(void *addr)
2813{
2814 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002815}
2816
Marcelo Tosattie8902612010-10-11 15:31:19 -03002817int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002818{
pbrook94a6b542009-04-11 17:15:54 +00002819 RAMBlock *block;
2820 uint8_t *host = ptr;
2821
Jan Kiszka868bb332011-06-21 22:59:09 +02002822 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002823 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002824 return 0;
2825 }
2826
Alex Williamsonf471a172010-06-11 11:11:42 -06002827 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002828 /* This case append when the block is not mapped. */
2829 if (block->host == NULL) {
2830 continue;
2831 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002832 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002833 *ram_addr = block->offset + (host - block->host);
2834 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002835 }
pbrook94a6b542009-04-11 17:15:54 +00002836 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002837
Marcelo Tosattie8902612010-10-11 15:31:19 -03002838 return -1;
2839}
Alex Williamsonf471a172010-06-11 11:11:42 -06002840
Marcelo Tosattie8902612010-10-11 15:31:19 -03002841/* Some of the softmmu routines need to translate from a host pointer
2842 (typically a TLB entry) back to a ram offset. */
2843ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2844{
2845 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002846
Marcelo Tosattie8902612010-10-11 15:31:19 -03002847 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2848 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2849 abort();
2850 }
2851 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002852}
2853
Avi Kivitya8170e52012-10-23 12:30:10 +02002854static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002855 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002856{
pbrook67d3b952006-12-18 05:03:52 +00002857#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002858 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002859#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002860#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002861 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002862#endif
2863 return 0;
2864}
2865
Avi Kivitya8170e52012-10-23 12:30:10 +02002866static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002867 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002868{
2869#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002870 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002871#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002872#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002873 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002874#endif
2875}
2876
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002877static const MemoryRegionOps unassigned_mem_ops = {
2878 .read = unassigned_mem_read,
2879 .write = unassigned_mem_write,
2880 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002881};
2882
Avi Kivitya8170e52012-10-23 12:30:10 +02002883static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002884 unsigned size)
2885{
2886 abort();
2887}
2888
Avi Kivitya8170e52012-10-23 12:30:10 +02002889static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002890 uint64_t value, unsigned size)
2891{
2892 abort();
2893}
2894
2895static const MemoryRegionOps error_mem_ops = {
2896 .read = error_mem_read,
2897 .write = error_mem_write,
2898 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002899};
2900
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002901static const MemoryRegionOps rom_mem_ops = {
2902 .read = error_mem_read,
2903 .write = unassigned_mem_write,
2904 .endianness = DEVICE_NATIVE_ENDIAN,
2905};
2906
Avi Kivitya8170e52012-10-23 12:30:10 +02002907static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002908 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002909{
bellard3a7d9292005-08-21 09:26:42 +00002910 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002911 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002912 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2913#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002914 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002915 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002916#endif
2917 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002918 switch (size) {
2919 case 1:
2920 stb_p(qemu_get_ram_ptr(ram_addr), val);
2921 break;
2922 case 2:
2923 stw_p(qemu_get_ram_ptr(ram_addr), val);
2924 break;
2925 case 4:
2926 stl_p(qemu_get_ram_ptr(ram_addr), val);
2927 break;
2928 default:
2929 abort();
2930 }
bellardf23db162005-08-21 19:12:28 +00002931 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002932 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002933 /* we remove the notdirty callback only if the code has been
2934 flushed */
2935 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002936 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002937}
2938
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002939static const MemoryRegionOps notdirty_mem_ops = {
2940 .read = error_mem_read,
2941 .write = notdirty_mem_write,
2942 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002943};
2944
pbrook0f459d12008-06-09 00:20:13 +00002945/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002946static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002947{
Andreas Färber9349b4f2012-03-14 01:38:32 +01002948 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002949 target_ulong pc, cs_base;
2950 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002951 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002952 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002953 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002954
aliguori06d55cc2008-11-18 20:24:06 +00002955 if (env->watchpoint_hit) {
2956 /* We re-entered the check after replacing the TB. Now raise
2957 * the debug interrupt so that is will trigger after the
2958 * current instruction. */
2959 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2960 return;
2961 }
pbrook2e70f6e2008-06-29 01:03:05 +00002962 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002963 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002964 if ((vaddr == (wp->vaddr & len_mask) ||
2965 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002966 wp->flags |= BP_WATCHPOINT_HIT;
2967 if (!env->watchpoint_hit) {
2968 env->watchpoint_hit = wp;
2969 tb = tb_find_pc(env->mem_io_pc);
2970 if (!tb) {
2971 cpu_abort(env, "check_watchpoint: could not find TB for "
2972 "pc=%p", (void *)env->mem_io_pc);
2973 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00002974 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00002975 tb_phys_invalidate(tb, -1);
2976 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2977 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04002978 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00002979 } else {
2980 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2981 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04002982 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002983 }
aliguori06d55cc2008-11-18 20:24:06 +00002984 }
aliguori6e140f22008-11-18 20:37:55 +00002985 } else {
2986 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002987 }
2988 }
2989}
2990
pbrook6658ffb2007-03-16 23:58:11 +00002991/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2992 so these check for a hit then pass through to the normal out-of-line
2993 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002994static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002995 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00002996{
Avi Kivity1ec9b902012-01-02 12:47:48 +02002997 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
2998 switch (size) {
2999 case 1: return ldub_phys(addr);
3000 case 2: return lduw_phys(addr);
3001 case 4: return ldl_phys(addr);
3002 default: abort();
3003 }
pbrook6658ffb2007-03-16 23:58:11 +00003004}
3005
Avi Kivitya8170e52012-10-23 12:30:10 +02003006static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02003007 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003008{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003009 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3010 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003011 case 1:
3012 stb_phys(addr, val);
3013 break;
3014 case 2:
3015 stw_phys(addr, val);
3016 break;
3017 case 4:
3018 stl_phys(addr, val);
3019 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003020 default: abort();
3021 }
pbrook6658ffb2007-03-16 23:58:11 +00003022}
3023
Avi Kivity1ec9b902012-01-02 12:47:48 +02003024static const MemoryRegionOps watch_mem_ops = {
3025 .read = watch_mem_read,
3026 .write = watch_mem_write,
3027 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003028};
pbrook6658ffb2007-03-16 23:58:11 +00003029
Avi Kivitya8170e52012-10-23 12:30:10 +02003030static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02003031 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003032{
Avi Kivity70c68e42012-01-02 12:32:48 +02003033 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003034 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003035 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003036#if defined(DEBUG_SUBPAGE)
3037 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3038 mmio, len, addr, idx);
3039#endif
blueswir1db7b5422007-05-26 17:36:03 +00003040
Avi Kivity5312bd82012-02-12 18:32:55 +02003041 section = &phys_sections[mmio->sub_section[idx]];
3042 addr += mmio->base;
3043 addr -= section->offset_within_address_space;
3044 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003045 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003046}
3047
Avi Kivitya8170e52012-10-23 12:30:10 +02003048static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02003049 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003050{
Avi Kivity70c68e42012-01-02 12:32:48 +02003051 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003052 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003053 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003054#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003055 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3056 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003057 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003058#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003059
Avi Kivity5312bd82012-02-12 18:32:55 +02003060 section = &phys_sections[mmio->sub_section[idx]];
3061 addr += mmio->base;
3062 addr -= section->offset_within_address_space;
3063 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003064 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003065}
3066
Avi Kivity70c68e42012-01-02 12:32:48 +02003067static const MemoryRegionOps subpage_ops = {
3068 .read = subpage_read,
3069 .write = subpage_write,
3070 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003071};
3072
Avi Kivitya8170e52012-10-23 12:30:10 +02003073static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02003074 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003075{
3076 ram_addr_t raddr = addr;
3077 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003078 switch (size) {
3079 case 1: return ldub_p(ptr);
3080 case 2: return lduw_p(ptr);
3081 case 4: return ldl_p(ptr);
3082 default: abort();
3083 }
Andreas Färber56384e82011-11-30 16:26:21 +01003084}
3085
Avi Kivitya8170e52012-10-23 12:30:10 +02003086static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02003087 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003088{
3089 ram_addr_t raddr = addr;
3090 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003091 switch (size) {
3092 case 1: return stb_p(ptr, value);
3093 case 2: return stw_p(ptr, value);
3094 case 4: return stl_p(ptr, value);
3095 default: abort();
3096 }
Andreas Färber56384e82011-11-30 16:26:21 +01003097}
3098
Avi Kivityde712f92012-01-02 12:41:07 +02003099static const MemoryRegionOps subpage_ram_ops = {
3100 .read = subpage_ram_read,
3101 .write = subpage_ram_write,
3102 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003103};
3104
Anthony Liguoric227f092009-10-01 16:12:16 -05003105static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003106 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003107{
3108 int idx, eidx;
3109
3110 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3111 return -1;
3112 idx = SUBPAGE_IDX(start);
3113 eidx = SUBPAGE_IDX(end);
3114#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003115 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003116 mmio, start, end, idx, eidx, memory);
3117#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003118 if (memory_region_is_ram(phys_sections[section].mr)) {
3119 MemoryRegionSection new_section = phys_sections[section];
3120 new_section.mr = &io_mem_subpage_ram;
3121 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003122 }
blueswir1db7b5422007-05-26 17:36:03 +00003123 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003124 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003125 }
3126
3127 return 0;
3128}
3129
Avi Kivitya8170e52012-10-23 12:30:10 +02003130static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00003131{
Anthony Liguoric227f092009-10-01 16:12:16 -05003132 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003133
Anthony Liguori7267c092011-08-20 22:09:37 -05003134 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003135
3136 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003137 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3138 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003139 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003140#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003141 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3142 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003143#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003144 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003145
3146 return mmio;
3147}
3148
Avi Kivity5312bd82012-02-12 18:32:55 +02003149static uint16_t dummy_section(MemoryRegion *mr)
3150{
3151 MemoryRegionSection section = {
3152 .mr = mr,
3153 .offset_within_address_space = 0,
3154 .offset_within_region = 0,
3155 .size = UINT64_MAX,
3156 };
3157
3158 return phys_section_add(&section);
3159}
3160
Avi Kivitya8170e52012-10-23 12:30:10 +02003161MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02003162{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003163 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003164}
3165
Avi Kivitye9179ce2009-06-14 11:38:52 +03003166static void io_mem_init(void)
3167{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003168 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003169 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3170 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3171 "unassigned", UINT64_MAX);
3172 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3173 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003174 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3175 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003176 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3177 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003178}
3179
Avi Kivityac1970f2012-10-03 16:22:53 +02003180static void mem_begin(MemoryListener *listener)
3181{
3182 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
3183
3184 destroy_all_mappings(d);
3185 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
3186}
3187
Avi Kivity50c1e142012-02-08 21:36:02 +02003188static void core_begin(MemoryListener *listener)
3189{
Avi Kivity5312bd82012-02-12 18:32:55 +02003190 phys_sections_clear();
3191 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003192 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3193 phys_section_rom = dummy_section(&io_mem_rom);
3194 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003195}
3196
Avi Kivity1d711482012-10-02 18:54:45 +02003197static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02003198{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003199 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003200
3201 /* since each CPU stores ram addresses in its TLB cache, we must
3202 reset the modified entries */
3203 /* XXX: slow ! */
3204 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3205 tlb_flush(env, 1);
3206 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003207}
3208
Avi Kivity93632742012-02-08 16:54:16 +02003209static void core_log_global_start(MemoryListener *listener)
3210{
3211 cpu_physical_memory_set_dirty_tracking(1);
3212}
3213
3214static void core_log_global_stop(MemoryListener *listener)
3215{
3216 cpu_physical_memory_set_dirty_tracking(0);
3217}
3218
Avi Kivity4855d412012-02-08 21:16:05 +02003219static void io_region_add(MemoryListener *listener,
3220 MemoryRegionSection *section)
3221{
Avi Kivitya2d33522012-03-05 17:40:12 +02003222 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3223
3224 mrio->mr = section->mr;
3225 mrio->offset = section->offset_within_region;
3226 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003227 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003228 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003229}
3230
3231static void io_region_del(MemoryListener *listener,
3232 MemoryRegionSection *section)
3233{
3234 isa_unassign_ioport(section->offset_within_address_space, section->size);
3235}
3236
Avi Kivity93632742012-02-08 16:54:16 +02003237static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003238 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02003239 .log_global_start = core_log_global_start,
3240 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02003241 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02003242};
3243
Avi Kivity4855d412012-02-08 21:16:05 +02003244static MemoryListener io_memory_listener = {
3245 .region_add = io_region_add,
3246 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02003247 .priority = 0,
3248};
3249
Avi Kivity1d711482012-10-02 18:54:45 +02003250static MemoryListener tcg_memory_listener = {
3251 .commit = tcg_commit,
3252};
3253
Avi Kivityac1970f2012-10-03 16:22:53 +02003254void address_space_init_dispatch(AddressSpace *as)
3255{
3256 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
3257
3258 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
3259 d->listener = (MemoryListener) {
3260 .begin = mem_begin,
3261 .region_add = mem_add,
3262 .region_nop = mem_add,
3263 .priority = 0,
3264 };
3265 as->dispatch = d;
3266 memory_listener_register(&d->listener, as);
3267}
3268
Avi Kivity83f3c252012-10-07 12:59:55 +02003269void address_space_destroy_dispatch(AddressSpace *as)
3270{
3271 AddressSpaceDispatch *d = as->dispatch;
3272
3273 memory_listener_unregister(&d->listener);
3274 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
3275 g_free(d);
3276 as->dispatch = NULL;
3277}
3278
Avi Kivity62152b82011-07-26 14:26:14 +03003279static void memory_map_init(void)
3280{
Anthony Liguori7267c092011-08-20 22:09:37 -05003281 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003282 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02003283 address_space_init(&address_space_memory, system_memory);
3284 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03003285
Anthony Liguori7267c092011-08-20 22:09:37 -05003286 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003287 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02003288 address_space_init(&address_space_io, system_io);
3289 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02003290
Avi Kivityf6790af2012-10-02 20:13:51 +02003291 memory_listener_register(&core_memory_listener, &address_space_memory);
3292 memory_listener_register(&io_memory_listener, &address_space_io);
3293 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Avi Kivity62152b82011-07-26 14:26:14 +03003294}
3295
3296MemoryRegion *get_system_memory(void)
3297{
3298 return system_memory;
3299}
3300
Avi Kivity309cb472011-08-08 16:09:03 +03003301MemoryRegion *get_system_io(void)
3302{
3303 return system_io;
3304}
3305
pbrooke2eef172008-06-08 01:09:01 +00003306#endif /* !defined(CONFIG_USER_ONLY) */
3307
bellard13eb76e2004-01-24 15:23:36 +00003308/* physical memory access (slow version, mainly for debug) */
3309#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003310int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003311 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003312{
3313 int l, flags;
3314 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003315 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003316
3317 while (len > 0) {
3318 page = addr & TARGET_PAGE_MASK;
3319 l = (page + TARGET_PAGE_SIZE) - addr;
3320 if (l > len)
3321 l = len;
3322 flags = page_get_flags(page);
3323 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003324 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003325 if (is_write) {
3326 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003327 return -1;
bellard579a97f2007-11-11 14:26:47 +00003328 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003329 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003330 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003331 memcpy(p, buf, l);
3332 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003333 } else {
3334 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003335 return -1;
bellard579a97f2007-11-11 14:26:47 +00003336 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003337 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003338 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003339 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003340 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003341 }
3342 len -= l;
3343 buf += l;
3344 addr += l;
3345 }
Paul Brooka68fe892010-03-01 00:08:59 +00003346 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003347}
bellard8df1cd02005-01-28 22:37:22 +00003348
bellard13eb76e2004-01-24 15:23:36 +00003349#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003350
Avi Kivitya8170e52012-10-23 12:30:10 +02003351static void invalidate_and_set_dirty(hwaddr addr,
3352 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003353{
3354 if (!cpu_physical_memory_is_dirty(addr)) {
3355 /* invalidate code */
3356 tb_invalidate_phys_page_range(addr, addr + length, 0);
3357 /* set dirty bit */
3358 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
3359 }
Anthony PERARDe2269392012-10-03 13:49:22 +00003360 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003361}
3362
Avi Kivitya8170e52012-10-23 12:30:10 +02003363void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02003364 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00003365{
Avi Kivityac1970f2012-10-03 16:22:53 +02003366 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003367 int l;
bellard13eb76e2004-01-24 15:23:36 +00003368 uint8_t *ptr;
3369 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02003370 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003371 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003372
bellard13eb76e2004-01-24 15:23:36 +00003373 while (len > 0) {
3374 page = addr & TARGET_PAGE_MASK;
3375 l = (page + TARGET_PAGE_SIZE) - addr;
3376 if (l > len)
3377 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003378 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003379
bellard13eb76e2004-01-24 15:23:36 +00003380 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003381 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02003382 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00003383 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003384 /* XXX: could force cpu_single_env to NULL to avoid
3385 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003386 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003387 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003388 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003389 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003390 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003391 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003392 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003393 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003394 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003395 l = 2;
3396 } else {
bellard1c213d12005-09-03 10:49:04 +00003397 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003398 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003399 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003400 l = 1;
3401 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003402 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003403 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003404 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003405 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003406 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003407 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003408 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003409 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003410 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003411 }
3412 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003413 if (!(memory_region_is_ram(section->mr) ||
3414 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02003415 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00003416 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003417 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003418 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003419 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003420 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003421 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003422 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003423 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003424 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003425 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003426 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003427 l = 2;
3428 } else {
bellard1c213d12005-09-03 10:49:04 +00003429 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003430 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003431 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003432 l = 1;
3433 }
3434 } else {
3435 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003436 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00003437 + memory_region_section_addr(section,
3438 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003439 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003440 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003441 }
3442 }
3443 len -= l;
3444 buf += l;
3445 addr += l;
3446 }
3447}
bellard8df1cd02005-01-28 22:37:22 +00003448
Avi Kivitya8170e52012-10-23 12:30:10 +02003449void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02003450 const uint8_t *buf, int len)
3451{
3452 address_space_rw(as, addr, (uint8_t *)buf, len, true);
3453}
3454
3455/**
3456 * address_space_read: read from an address space.
3457 *
3458 * @as: #AddressSpace to be accessed
3459 * @addr: address within that address space
3460 * @buf: buffer with the data transferred
3461 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003462void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003463{
3464 address_space_rw(as, addr, buf, len, false);
3465}
3466
3467
Avi Kivitya8170e52012-10-23 12:30:10 +02003468void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02003469 int len, int is_write)
3470{
3471 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
3472}
3473
bellardd0ecd2a2006-04-23 17:14:48 +00003474/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02003475void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003476 const uint8_t *buf, int len)
3477{
Avi Kivityac1970f2012-10-03 16:22:53 +02003478 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00003479 int l;
3480 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02003481 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003482 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003483
bellardd0ecd2a2006-04-23 17:14:48 +00003484 while (len > 0) {
3485 page = addr & TARGET_PAGE_MASK;
3486 l = (page + TARGET_PAGE_SIZE) - addr;
3487 if (l > len)
3488 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003489 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003490
Blue Swirlcc5bea62012-04-14 14:56:48 +00003491 if (!(memory_region_is_ram(section->mr) ||
3492 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00003493 /* do nothing */
3494 } else {
3495 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003496 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003497 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003498 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003499 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003500 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003501 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003502 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003503 }
3504 len -= l;
3505 buf += l;
3506 addr += l;
3507 }
3508}
3509
aliguori6d16c2f2009-01-22 16:59:11 +00003510typedef struct {
3511 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02003512 hwaddr addr;
3513 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00003514} BounceBuffer;
3515
3516static BounceBuffer bounce;
3517
aliguoriba223c22009-01-22 16:59:16 +00003518typedef struct MapClient {
3519 void *opaque;
3520 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003521 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003522} MapClient;
3523
Blue Swirl72cf2d42009-09-12 07:36:22 +00003524static QLIST_HEAD(map_client_list, MapClient) map_client_list
3525 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003526
3527void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3528{
Anthony Liguori7267c092011-08-20 22:09:37 -05003529 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003530
3531 client->opaque = opaque;
3532 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003533 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003534 return client;
3535}
3536
3537void cpu_unregister_map_client(void *_client)
3538{
3539 MapClient *client = (MapClient *)_client;
3540
Blue Swirl72cf2d42009-09-12 07:36:22 +00003541 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003542 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003543}
3544
3545static void cpu_notify_map_clients(void)
3546{
3547 MapClient *client;
3548
Blue Swirl72cf2d42009-09-12 07:36:22 +00003549 while (!QLIST_EMPTY(&map_client_list)) {
3550 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003551 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003552 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003553 }
3554}
3555
aliguori6d16c2f2009-01-22 16:59:11 +00003556/* Map a physical memory region into a host virtual address.
3557 * May map a subset of the requested range, given by and returned in *plen.
3558 * May return NULL if resources needed to perform the mapping are exhausted.
3559 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003560 * Use cpu_register_map_client() to know when retrying the map operation is
3561 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003562 */
Avi Kivityac1970f2012-10-03 16:22:53 +02003563void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02003564 hwaddr addr,
3565 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003566 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00003567{
Avi Kivityac1970f2012-10-03 16:22:53 +02003568 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02003569 hwaddr len = *plen;
3570 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003571 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003572 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003573 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003574 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003575 ram_addr_t rlen;
3576 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003577
3578 while (len > 0) {
3579 page = addr & TARGET_PAGE_MASK;
3580 l = (page + TARGET_PAGE_SIZE) - addr;
3581 if (l > len)
3582 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003583 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003584
Avi Kivityf3705d52012-03-08 16:16:34 +02003585 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003586 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003587 break;
3588 }
3589 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3590 bounce.addr = addr;
3591 bounce.len = l;
3592 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02003593 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003594 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003595
3596 *plen = l;
3597 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003598 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003599 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003600 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003601 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003602 }
aliguori6d16c2f2009-01-22 16:59:11 +00003603
3604 len -= l;
3605 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003606 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003607 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003608 rlen = todo;
3609 ret = qemu_ram_ptr_length(raddr, &rlen);
3610 *plen = rlen;
3611 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003612}
3613
Avi Kivityac1970f2012-10-03 16:22:53 +02003614/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003615 * Will also mark the memory as dirty if is_write == 1. access_len gives
3616 * the amount of memory that was actually read or written by the caller.
3617 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003618void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3619 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003620{
3621 if (buffer != bounce.buffer) {
3622 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003623 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003624 while (access_len) {
3625 unsigned l;
3626 l = TARGET_PAGE_SIZE;
3627 if (l > access_len)
3628 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003629 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003630 addr1 += l;
3631 access_len -= l;
3632 }
3633 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003634 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003635 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003636 }
aliguori6d16c2f2009-01-22 16:59:11 +00003637 return;
3638 }
3639 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02003640 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003641 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003642 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003643 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003644 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003645}
bellardd0ecd2a2006-04-23 17:14:48 +00003646
Avi Kivitya8170e52012-10-23 12:30:10 +02003647void *cpu_physical_memory_map(hwaddr addr,
3648 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003649 int is_write)
3650{
3651 return address_space_map(&address_space_memory, addr, plen, is_write);
3652}
3653
Avi Kivitya8170e52012-10-23 12:30:10 +02003654void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3655 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003656{
3657 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3658}
3659
bellard8df1cd02005-01-28 22:37:22 +00003660/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003661static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003662 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003663{
bellard8df1cd02005-01-28 22:37:22 +00003664 uint8_t *ptr;
3665 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003666 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003667
Avi Kivityac1970f2012-10-03 16:22:53 +02003668 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003669
Blue Swirlcc5bea62012-04-14 14:56:48 +00003670 if (!(memory_region_is_ram(section->mr) ||
3671 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00003672 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003673 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003674 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003675#if defined(TARGET_WORDS_BIGENDIAN)
3676 if (endian == DEVICE_LITTLE_ENDIAN) {
3677 val = bswap32(val);
3678 }
3679#else
3680 if (endian == DEVICE_BIG_ENDIAN) {
3681 val = bswap32(val);
3682 }
3683#endif
bellard8df1cd02005-01-28 22:37:22 +00003684 } else {
3685 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003686 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003687 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003688 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003689 switch (endian) {
3690 case DEVICE_LITTLE_ENDIAN:
3691 val = ldl_le_p(ptr);
3692 break;
3693 case DEVICE_BIG_ENDIAN:
3694 val = ldl_be_p(ptr);
3695 break;
3696 default:
3697 val = ldl_p(ptr);
3698 break;
3699 }
bellard8df1cd02005-01-28 22:37:22 +00003700 }
3701 return val;
3702}
3703
Avi Kivitya8170e52012-10-23 12:30:10 +02003704uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003705{
3706 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3707}
3708
Avi Kivitya8170e52012-10-23 12:30:10 +02003709uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003710{
3711 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3712}
3713
Avi Kivitya8170e52012-10-23 12:30:10 +02003714uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003715{
3716 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3717}
3718
bellard84b7b8e2005-11-28 21:19:04 +00003719/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003720static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003721 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003722{
bellard84b7b8e2005-11-28 21:19:04 +00003723 uint8_t *ptr;
3724 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003725 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003726
Avi Kivityac1970f2012-10-03 16:22:53 +02003727 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003728
Blue Swirlcc5bea62012-04-14 14:56:48 +00003729 if (!(memory_region_is_ram(section->mr) ||
3730 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00003731 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003732 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003733
3734 /* XXX This is broken when device endian != cpu endian.
3735 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003736#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003737 val = io_mem_read(section->mr, addr, 4) << 32;
3738 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003739#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003740 val = io_mem_read(section->mr, addr, 4);
3741 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003742#endif
3743 } else {
3744 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003745 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003746 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003747 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003748 switch (endian) {
3749 case DEVICE_LITTLE_ENDIAN:
3750 val = ldq_le_p(ptr);
3751 break;
3752 case DEVICE_BIG_ENDIAN:
3753 val = ldq_be_p(ptr);
3754 break;
3755 default:
3756 val = ldq_p(ptr);
3757 break;
3758 }
bellard84b7b8e2005-11-28 21:19:04 +00003759 }
3760 return val;
3761}
3762
Avi Kivitya8170e52012-10-23 12:30:10 +02003763uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003764{
3765 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3766}
3767
Avi Kivitya8170e52012-10-23 12:30:10 +02003768uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003769{
3770 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3771}
3772
Avi Kivitya8170e52012-10-23 12:30:10 +02003773uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003774{
3775 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3776}
3777
bellardaab33092005-10-30 20:48:42 +00003778/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02003779uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00003780{
3781 uint8_t val;
3782 cpu_physical_memory_read(addr, &val, 1);
3783 return val;
3784}
3785
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003786/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003787static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003788 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003789{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003790 uint8_t *ptr;
3791 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003792 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003793
Avi Kivityac1970f2012-10-03 16:22:53 +02003794 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003795
Blue Swirlcc5bea62012-04-14 14:56:48 +00003796 if (!(memory_region_is_ram(section->mr) ||
3797 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003798 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003799 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003800 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003801#if defined(TARGET_WORDS_BIGENDIAN)
3802 if (endian == DEVICE_LITTLE_ENDIAN) {
3803 val = bswap16(val);
3804 }
3805#else
3806 if (endian == DEVICE_BIG_ENDIAN) {
3807 val = bswap16(val);
3808 }
3809#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003810 } else {
3811 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003812 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003813 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003814 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003815 switch (endian) {
3816 case DEVICE_LITTLE_ENDIAN:
3817 val = lduw_le_p(ptr);
3818 break;
3819 case DEVICE_BIG_ENDIAN:
3820 val = lduw_be_p(ptr);
3821 break;
3822 default:
3823 val = lduw_p(ptr);
3824 break;
3825 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003826 }
3827 return val;
bellardaab33092005-10-30 20:48:42 +00003828}
3829
Avi Kivitya8170e52012-10-23 12:30:10 +02003830uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003831{
3832 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3833}
3834
Avi Kivitya8170e52012-10-23 12:30:10 +02003835uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003836{
3837 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3838}
3839
Avi Kivitya8170e52012-10-23 12:30:10 +02003840uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003841{
3842 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3843}
3844
bellard8df1cd02005-01-28 22:37:22 +00003845/* warning: addr must be aligned. The ram page is not masked as dirty
3846 and the code inside is not invalidated. It is useful if the dirty
3847 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02003848void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003849{
bellard8df1cd02005-01-28 22:37:22 +00003850 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003851 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003852
Avi Kivityac1970f2012-10-03 16:22:53 +02003853 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003854
Avi Kivityf3705d52012-03-08 16:16:34 +02003855 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003856 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003857 if (memory_region_is_ram(section->mr)) {
3858 section = &phys_sections[phys_section_rom];
3859 }
3860 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003861 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003862 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003863 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003864 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003865 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003866 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003867
3868 if (unlikely(in_migration)) {
3869 if (!cpu_physical_memory_is_dirty(addr1)) {
3870 /* invalidate code */
3871 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3872 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003873 cpu_physical_memory_set_dirty_flags(
3874 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003875 }
3876 }
bellard8df1cd02005-01-28 22:37:22 +00003877 }
3878}
3879
Avi Kivitya8170e52012-10-23 12:30:10 +02003880void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003881{
j_mayerbc98a7e2007-04-04 07:55:12 +00003882 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003883 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00003884
Avi Kivityac1970f2012-10-03 16:22:53 +02003885 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003886
Avi Kivityf3705d52012-03-08 16:16:34 +02003887 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003888 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003889 if (memory_region_is_ram(section->mr)) {
3890 section = &phys_sections[phys_section_rom];
3891 }
j_mayerbc98a7e2007-04-04 07:55:12 +00003892#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003893 io_mem_write(section->mr, addr, val >> 32, 4);
3894 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003895#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003896 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3897 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003898#endif
3899 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003900 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003901 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003902 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00003903 stq_p(ptr, val);
3904 }
3905}
3906
bellard8df1cd02005-01-28 22:37:22 +00003907/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003908static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003909 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003910{
bellard8df1cd02005-01-28 22:37:22 +00003911 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003912 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003913
Avi Kivityac1970f2012-10-03 16:22:53 +02003914 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003915
Avi Kivityf3705d52012-03-08 16:16:34 +02003916 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003917 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003918 if (memory_region_is_ram(section->mr)) {
3919 section = &phys_sections[phys_section_rom];
3920 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003921#if defined(TARGET_WORDS_BIGENDIAN)
3922 if (endian == DEVICE_LITTLE_ENDIAN) {
3923 val = bswap32(val);
3924 }
3925#else
3926 if (endian == DEVICE_BIG_ENDIAN) {
3927 val = bswap32(val);
3928 }
3929#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02003930 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003931 } else {
3932 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003933 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003934 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00003935 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003936 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003937 switch (endian) {
3938 case DEVICE_LITTLE_ENDIAN:
3939 stl_le_p(ptr, val);
3940 break;
3941 case DEVICE_BIG_ENDIAN:
3942 stl_be_p(ptr, val);
3943 break;
3944 default:
3945 stl_p(ptr, val);
3946 break;
3947 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003948 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00003949 }
3950}
3951
Avi Kivitya8170e52012-10-23 12:30:10 +02003952void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003953{
3954 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3955}
3956
Avi Kivitya8170e52012-10-23 12:30:10 +02003957void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003958{
3959 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3960}
3961
Avi Kivitya8170e52012-10-23 12:30:10 +02003962void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003963{
3964 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
3965}
3966
bellardaab33092005-10-30 20:48:42 +00003967/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02003968void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003969{
3970 uint8_t v = val;
3971 cpu_physical_memory_write(addr, &v, 1);
3972}
3973
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003974/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003975static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003976 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003977{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003978 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003979 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003980
Avi Kivityac1970f2012-10-03 16:22:53 +02003981 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003982
Avi Kivityf3705d52012-03-08 16:16:34 +02003983 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003984 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003985 if (memory_region_is_ram(section->mr)) {
3986 section = &phys_sections[phys_section_rom];
3987 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003988#if defined(TARGET_WORDS_BIGENDIAN)
3989 if (endian == DEVICE_LITTLE_ENDIAN) {
3990 val = bswap16(val);
3991 }
3992#else
3993 if (endian == DEVICE_BIG_ENDIAN) {
3994 val = bswap16(val);
3995 }
3996#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02003997 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003998 } else {
3999 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004000 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004001 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004002 /* RAM case */
4003 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004004 switch (endian) {
4005 case DEVICE_LITTLE_ENDIAN:
4006 stw_le_p(ptr, val);
4007 break;
4008 case DEVICE_BIG_ENDIAN:
4009 stw_be_p(ptr, val);
4010 break;
4011 default:
4012 stw_p(ptr, val);
4013 break;
4014 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00004015 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004016 }
bellardaab33092005-10-30 20:48:42 +00004017}
4018
Avi Kivitya8170e52012-10-23 12:30:10 +02004019void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004020{
4021 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4022}
4023
Avi Kivitya8170e52012-10-23 12:30:10 +02004024void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004025{
4026 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4027}
4028
Avi Kivitya8170e52012-10-23 12:30:10 +02004029void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004030{
4031 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4032}
4033
bellardaab33092005-10-30 20:48:42 +00004034/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02004035void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004036{
4037 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004038 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004039}
4040
Avi Kivitya8170e52012-10-23 12:30:10 +02004041void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004042{
4043 val = cpu_to_le64(val);
4044 cpu_physical_memory_write(addr, &val, 8);
4045}
4046
Avi Kivitya8170e52012-10-23 12:30:10 +02004047void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004048{
4049 val = cpu_to_be64(val);
4050 cpu_physical_memory_write(addr, &val, 8);
4051}
4052
aliguori5e2972f2009-03-28 17:51:36 +00004053/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004054int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004055 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004056{
4057 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02004058 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004059 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004060
4061 while (len > 0) {
4062 page = addr & TARGET_PAGE_MASK;
4063 phys_addr = cpu_get_phys_page_debug(env, page);
4064 /* if no physical page mapped, return an error */
4065 if (phys_addr == -1)
4066 return -1;
4067 l = (page + TARGET_PAGE_SIZE) - addr;
4068 if (l > len)
4069 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004070 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004071 if (is_write)
4072 cpu_physical_memory_write_rom(phys_addr, buf, l);
4073 else
aliguori5e2972f2009-03-28 17:51:36 +00004074 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004075 len -= l;
4076 buf += l;
4077 addr += l;
4078 }
4079 return 0;
4080}
Paul Brooka68fe892010-03-01 00:08:59 +00004081#endif
bellard13eb76e2004-01-24 15:23:36 +00004082
pbrook2e70f6e2008-06-29 01:03:05 +00004083/* in deterministic execution mode, instructions doing device I/Os
4084 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004085void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004086{
4087 TranslationBlock *tb;
4088 uint32_t n, cflags;
4089 target_ulong pc, cs_base;
4090 uint64_t flags;
4091
Blue Swirl20503962012-04-09 14:20:20 +00004092 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004093 if (!tb) {
4094 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004095 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004096 }
4097 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004098 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004099 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004100 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004101 n = n - env->icount_decr.u16.low;
4102 /* Generate a new TB ending on the I/O insn. */
4103 n++;
4104 /* On MIPS and SH, delay slot instructions can only be restarted if
4105 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004106 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004107 branch. */
4108#if defined(TARGET_MIPS)
4109 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4110 env->active_tc.PC -= 4;
4111 env->icount_decr.u16.low++;
4112 env->hflags &= ~MIPS_HFLAG_BMASK;
4113 }
4114#elif defined(TARGET_SH4)
4115 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4116 && n > 1) {
4117 env->pc -= 2;
4118 env->icount_decr.u16.low++;
4119 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4120 }
4121#endif
4122 /* This should never happen. */
4123 if (n > CF_COUNT_MASK)
4124 cpu_abort(env, "TB too big during recompile");
4125
4126 cflags = n | CF_LAST_IO;
4127 pc = tb->pc;
4128 cs_base = tb->cs_base;
4129 flags = tb->flags;
4130 tb_phys_invalidate(tb, -1);
4131 /* FIXME: In theory this could raise an exception. In practice
4132 we have already translated the block once so it's probably ok. */
4133 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004134 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004135 the first in the TB) then we end up generating a whole new TB and
4136 repeating the fault, which is horribly inefficient.
4137 Better would be to execute just this insn uncached, or generate a
4138 second new TB. */
4139 cpu_resume_from_signal(env, NULL);
4140}
4141
Paul Brookb3755a92010-03-12 16:54:58 +00004142#if !defined(CONFIG_USER_ONLY)
4143
Stefan Weil055403b2010-10-22 23:03:32 +02004144void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004145{
4146 int i, target_code_size, max_target_code_size;
4147 int direct_jmp_count, direct_jmp2_count, cross_page;
4148 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004149
bellarde3db7222005-01-26 22:00:47 +00004150 target_code_size = 0;
4151 max_target_code_size = 0;
4152 cross_page = 0;
4153 direct_jmp_count = 0;
4154 direct_jmp2_count = 0;
4155 for(i = 0; i < nb_tbs; i++) {
4156 tb = &tbs[i];
4157 target_code_size += tb->size;
4158 if (tb->size > max_target_code_size)
4159 max_target_code_size = tb->size;
4160 if (tb->page_addr[1] != -1)
4161 cross_page++;
4162 if (tb->tb_next_offset[0] != 0xffff) {
4163 direct_jmp_count++;
4164 if (tb->tb_next_offset[1] != 0xffff) {
4165 direct_jmp2_count++;
4166 }
4167 }
4168 }
4169 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004170 cpu_fprintf(f, "Translation buffer state:\n");
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +10004171 cpu_fprintf(f, "gen code size %td/%zd\n",
bellard26a5f132008-05-28 12:30:31 +00004172 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4173 cpu_fprintf(f, "TB count %d/%d\n",
4174 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004175 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004176 nb_tbs ? target_code_size / nb_tbs : 0,
4177 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004178 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004179 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4180 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004181 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4182 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004183 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4184 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004185 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004186 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4187 direct_jmp2_count,
4188 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004189 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004190 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4191 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4192 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004193 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004194}
4195
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004196/*
4197 * A helper function for the _utterly broken_ virtio device model to find out if
4198 * it's running on a big endian machine. Don't do this at home kids!
4199 */
4200bool virtio_is_big_endian(void);
4201bool virtio_is_big_endian(void)
4202{
4203#if defined(TARGET_WORDS_BIGENDIAN)
4204 return true;
4205#else
4206 return false;
4207#endif
4208}
4209
bellard61382a52003-10-27 21:22:23 +00004210#endif
Wen Congyang76f35532012-05-07 12:04:18 +08004211
4212#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02004213bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08004214{
4215 MemoryRegionSection *section;
4216
Avi Kivityac1970f2012-10-03 16:22:53 +02004217 section = phys_page_find(address_space_memory.dispatch,
4218 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08004219
4220 return !(memory_region_is_ram(section->mr) ||
4221 memory_region_is_romd(section->mr));
4222}
4223#endif