blob: 6d10595b8e78c6c83f2e9215a25730c27d6bda1a [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weil68409812012-04-04 07:45:21 +020096#elif defined(_WIN32) && !defined(_WIN64)
Stefan Weilf8e2af12009-06-18 23:04:48 +020097#define code_gen_section \
98 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000099#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000107/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000108static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200109static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000110
pbrooke2eef172008-06-08 01:09:01 +0000111#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000112int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000113static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000114
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300116
117static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300118static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300119
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200121static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200122
pbrooke2eef172008-06-08 01:09:01 +0000123#endif
bellard9fa3e852004-01-04 18:06:42 +0000124
Andreas Färber9349b4f2012-03-14 01:38:32 +0100125CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100128DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000129/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000130 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000133
bellard54936002003-05-13 00:25:15 +0000134typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000135 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000136 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
bellard54936002003-05-13 00:25:15 +0000144} PageDesc;
145
Paul Brook41c1b1c2010-03-12 16:54:58 +0000146/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000153#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000154#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000156#endif
bellard54936002003-05-13 00:25:15 +0000157
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000160#define L2_SIZE (1 << L2_BITS)
161
Avi Kivity3eef53d2012-02-10 14:57:31 +0200162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800165/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
Stefan Weilc6d50672012-03-16 20:23:49 +0100179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000182
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000186
pbrooke2eef172008-06-08 01:09:01 +0000187#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200188typedef struct PhysPageEntry PhysPageEntry;
189
Avi Kivity5312bd82012-02-12 18:32:55 +0200190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200196
Avi Kivity4346ae32012-02-10 17:00:01 +0200197struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200201};
202
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
Avi Kivity07f07b32012-02-13 20:45:32 +0200207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800209/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200210 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000212
pbrooke2eef172008-06-08 01:09:01 +0000213static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300214static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000215
Avi Kivity1ec9b902012-01-02 12:47:48 +0200216static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000217#endif
bellard33417e72003-08-10 21:47:01 +0000218
bellard34865132003-10-05 14:28:56 +0000219/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200220#ifdef WIN32
221static const char *logfilename = "qemu.log";
222#else
blueswir1d9b630f2008-10-05 09:57:08 +0000223static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200224#endif
bellard34865132003-10-05 14:28:56 +0000225FILE *logfile;
226int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000227static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000228
bellarde3db7222005-01-26 22:00:47 +0000229/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000230#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000231static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000232#endif
bellarde3db7222005-01-26 22:00:47 +0000233static int tb_flush_count;
234static int tb_phys_invalidate_count;
235
bellard7cb69ca2008-05-10 10:55:51 +0000236#ifdef _WIN32
237static void map_exec(void *addr, long size)
238{
239 DWORD old_protect;
240 VirtualProtect(addr, size,
241 PAGE_EXECUTE_READWRITE, &old_protect);
242
243}
244#else
245static void map_exec(void *addr, long size)
246{
bellard43694152008-05-29 09:35:57 +0000247 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000248
bellard43694152008-05-29 09:35:57 +0000249 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000250 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000251 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000252
253 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000254 end += page_size - 1;
255 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000256
257 mprotect((void *)start, end - start,
258 PROT_READ | PROT_WRITE | PROT_EXEC);
259}
260#endif
261
bellardb346ff42003-06-15 20:05:50 +0000262static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000263{
bellard83fb7ad2004-07-05 21:25:26 +0000264 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000265 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000266#ifdef _WIN32
267 {
268 SYSTEM_INFO system_info;
269
270 GetSystemInfo(&system_info);
271 qemu_real_host_page_size = system_info.dwPageSize;
272 }
273#else
274 qemu_real_host_page_size = getpagesize();
275#endif
bellard83fb7ad2004-07-05 21:25:26 +0000276 if (qemu_host_page_size == 0)
277 qemu_host_page_size = qemu_real_host_page_size;
278 if (qemu_host_page_size < TARGET_PAGE_SIZE)
279 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000280 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000281
Paul Brook2e9a5712010-05-05 16:32:59 +0100282#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000283 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100284#ifdef HAVE_KINFO_GETVMMAP
285 struct kinfo_vmentry *freep;
286 int i, cnt;
287
288 freep = kinfo_getvmmap(getpid(), &cnt);
289 if (freep) {
290 mmap_lock();
291 for (i = 0; i < cnt; i++) {
292 unsigned long startaddr, endaddr;
293
294 startaddr = freep[i].kve_start;
295 endaddr = freep[i].kve_end;
296 if (h2g_valid(startaddr)) {
297 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
298
299 if (h2g_valid(endaddr)) {
300 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200301 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100302 } else {
303#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
304 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200305 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100306#endif
307 }
308 }
309 }
310 free(freep);
311 mmap_unlock();
312 }
313#else
balrog50a95692007-12-12 01:16:23 +0000314 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000315
pbrook07765902008-05-31 16:33:53 +0000316 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800317
Aurelien Jarnofd436902010-04-10 17:20:36 +0200318 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000319 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800320 mmap_lock();
321
balrog50a95692007-12-12 01:16:23 +0000322 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800323 unsigned long startaddr, endaddr;
324 int n;
325
326 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
327
328 if (n == 2 && h2g_valid(startaddr)) {
329 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
330
331 if (h2g_valid(endaddr)) {
332 endaddr = h2g(endaddr);
333 } else {
334 endaddr = ~0ul;
335 }
336 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000337 }
338 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800339
balrog50a95692007-12-12 01:16:23 +0000340 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800341 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000342 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100343#endif
balrog50a95692007-12-12 01:16:23 +0000344 }
345#endif
bellard54936002003-05-13 00:25:15 +0000346}
347
Paul Brook41c1b1c2010-03-12 16:54:58 +0000348static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000349{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000350 PageDesc *pd;
351 void **lp;
352 int i;
353
pbrook17e23772008-06-09 13:47:45 +0000354#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500355 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800356# define ALLOC(P, SIZE) \
357 do { \
358 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
359 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800360 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000361#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500363 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000364#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800365
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800366 /* Level 1. Always allocated. */
367 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
368
369 /* Level 2..N-1. */
370 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
371 void **p = *lp;
372
373 if (p == NULL) {
374 if (!alloc) {
375 return NULL;
376 }
377 ALLOC(p, sizeof(void *) * L2_SIZE);
378 *lp = p;
379 }
380
381 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000382 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800383
384 pd = *lp;
385 if (pd == NULL) {
386 if (!alloc) {
387 return NULL;
388 }
389 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
390 *lp = pd;
391 }
392
393#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800394
395 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000396}
397
Paul Brook41c1b1c2010-03-12 16:54:58 +0000398static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000399{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800400 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000401}
402
Paul Brook6d9a1302010-02-28 23:55:53 +0000403#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200404
Avi Kivityf7bf5462012-02-13 20:12:05 +0200405static void phys_map_node_reserve(unsigned nodes)
406{
407 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
408 typedef PhysPageEntry Node[L2_SIZE];
409 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
410 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
411 phys_map_nodes_nb + nodes);
412 phys_map_nodes = g_renew(Node, phys_map_nodes,
413 phys_map_nodes_nb_alloc);
414 }
415}
416
417static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200418{
419 unsigned i;
420 uint16_t ret;
421
Avi Kivityf7bf5462012-02-13 20:12:05 +0200422 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200423 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200424 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200425 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200426 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200427 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200428 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200429 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200430}
431
432static void phys_map_nodes_reset(void)
433{
434 phys_map_nodes_nb = 0;
435}
436
Avi Kivityf7bf5462012-02-13 20:12:05 +0200437
Avi Kivity29990972012-02-13 20:21:20 +0200438static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
439 target_phys_addr_t *nb, uint16_t leaf,
440 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200441{
442 PhysPageEntry *p;
443 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200444 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200445
Avi Kivity07f07b32012-02-13 20:45:32 +0200446 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200447 lp->ptr = phys_map_node_alloc();
448 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200449 if (level == 0) {
450 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200451 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200452 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200453 }
454 }
455 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200456 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200457 }
Avi Kivity29990972012-02-13 20:21:20 +0200458 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200459
Avi Kivity29990972012-02-13 20:21:20 +0200460 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200461 if ((*index & (step - 1)) == 0 && *nb >= step) {
462 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200463 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200464 *index += step;
465 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200466 } else {
467 phys_page_set_level(lp, index, nb, leaf, level - 1);
468 }
469 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200470 }
471}
472
Avi Kivity29990972012-02-13 20:21:20 +0200473static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
474 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000475{
Avi Kivity29990972012-02-13 20:21:20 +0200476 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200477 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000478
Avi Kivity29990972012-02-13 20:21:20 +0200479 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000480}
481
Avi Kivityf3705d52012-03-08 16:16:34 +0200482static MemoryRegionSection *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000483{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200484 PhysPageEntry lp = phys_map;
485 PhysPageEntry *p;
486 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200487 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200488
Avi Kivity07f07b32012-02-13 20:45:32 +0200489 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200490 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200491 goto not_found;
492 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200493 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200494 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200495 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200496
Avi Kivityc19e8802012-02-13 20:25:31 +0200497 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200498not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200499 return &phys_sections[s_index];
500}
501
502static target_phys_addr_t section_addr(MemoryRegionSection *section,
503 target_phys_addr_t addr)
504{
505 addr -= section->offset_within_address_space;
506 addr += section->offset_within_region;
507 return addr;
bellard92e873b2004-05-21 14:52:29 +0000508}
509
Anthony Liguoric227f092009-10-01 16:12:16 -0500510static void tlb_protect_code(ram_addr_t ram_addr);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100511static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000512 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000513#define mmap_lock() do { } while(0)
514#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000515#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000516
bellard43694152008-05-29 09:35:57 +0000517#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
518
519#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100520/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000521 user mode. It will change when a dedicated libc will be used */
522#define USE_STATIC_CODE_GEN_BUFFER
523#endif
524
525#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200526static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
527 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000528#endif
529
blueswir18fcd3692008-08-17 20:26:25 +0000530static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000531{
bellard43694152008-05-29 09:35:57 +0000532#ifdef USE_STATIC_CODE_GEN_BUFFER
533 code_gen_buffer = static_code_gen_buffer;
534 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
535 map_exec(code_gen_buffer, code_gen_buffer_size);
536#else
bellard26a5f132008-05-28 12:30:31 +0000537 code_gen_buffer_size = tb_size;
538 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000539#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000540 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
541#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100542 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000543 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000544#endif
bellard26a5f132008-05-28 12:30:31 +0000545 }
546 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
547 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
548 /* The code gen buffer location may have constraints depending on
549 the host cpu and OS */
550#if defined(__linux__)
551 {
552 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000553 void *start = NULL;
554
bellard26a5f132008-05-28 12:30:31 +0000555 flags = MAP_PRIVATE | MAP_ANONYMOUS;
556#if defined(__x86_64__)
557 flags |= MAP_32BIT;
558 /* Cannot map more than that */
559 if (code_gen_buffer_size > (800 * 1024 * 1024))
560 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000561#elif defined(__sparc_v9__)
562 // Map the buffer below 2G, so we can use direct calls and branches
563 flags |= MAP_FIXED;
564 start = (void *) 0x60000000UL;
565 if (code_gen_buffer_size > (512 * 1024 * 1024))
566 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000567#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100568 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000569 if (code_gen_buffer_size > 16 * 1024 * 1024)
570 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700571#elif defined(__s390x__)
572 /* Map the buffer so that we can use direct calls and branches. */
573 /* We have a +- 4GB range on the branches; leave some slop. */
574 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
575 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
576 }
577 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000578#endif
blueswir1141ac462008-07-26 15:05:57 +0000579 code_gen_buffer = mmap(start, code_gen_buffer_size,
580 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000581 flags, -1, 0);
582 if (code_gen_buffer == MAP_FAILED) {
583 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
584 exit(1);
585 }
586 }
Bradcbb608a2010-12-20 21:25:40 -0500587#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000588 || defined(__DragonFly__) || defined(__OpenBSD__) \
589 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000590 {
591 int flags;
592 void *addr = NULL;
593 flags = MAP_PRIVATE | MAP_ANONYMOUS;
594#if defined(__x86_64__)
595 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
596 * 0x40000000 is free */
597 flags |= MAP_FIXED;
598 addr = (void *)0x40000000;
599 /* Cannot map more than that */
600 if (code_gen_buffer_size > (800 * 1024 * 1024))
601 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000602#elif defined(__sparc_v9__)
603 // Map the buffer below 2G, so we can use direct calls and branches
604 flags |= MAP_FIXED;
605 addr = (void *) 0x60000000UL;
606 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
607 code_gen_buffer_size = (512 * 1024 * 1024);
608 }
aliguori06e67a82008-09-27 15:32:41 +0000609#endif
610 code_gen_buffer = mmap(addr, code_gen_buffer_size,
611 PROT_WRITE | PROT_READ | PROT_EXEC,
612 flags, -1, 0);
613 if (code_gen_buffer == MAP_FAILED) {
614 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
615 exit(1);
616 }
617 }
bellard26a5f132008-05-28 12:30:31 +0000618#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500619 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000620 map_exec(code_gen_buffer, code_gen_buffer_size);
621#endif
bellard43694152008-05-29 09:35:57 +0000622#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000623 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100624 code_gen_buffer_max_size = code_gen_buffer_size -
625 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000626 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500627 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000628}
629
630/* Must be called before using the QEMU cpus. 'tb_size' is the size
631 (in bytes) allocated to the translation buffer. Zero means default
632 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200633void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000634{
bellard26a5f132008-05-28 12:30:31 +0000635 cpu_gen_init();
636 code_gen_alloc(tb_size);
637 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700638 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000639 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700640#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
641 /* There's no guest base to take into account, so go ahead and
642 initialize the prologue now. */
643 tcg_prologue_init(&tcg_ctx);
644#endif
bellard26a5f132008-05-28 12:30:31 +0000645}
646
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200647bool tcg_enabled(void)
648{
649 return code_gen_buffer != NULL;
650}
651
652void cpu_exec_init_all(void)
653{
654#if !defined(CONFIG_USER_ONLY)
655 memory_map_init();
656 io_mem_init();
657#endif
658}
659
pbrook9656f322008-07-01 20:01:19 +0000660#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
661
Juan Quintelae59fb372009-09-29 22:48:21 +0200662static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200663{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100664 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200665
aurel323098dba2009-03-07 21:28:24 +0000666 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
667 version_id is increased. */
668 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000669 tlb_flush(env, 1);
670
671 return 0;
672}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200673
674static const VMStateDescription vmstate_cpu_common = {
675 .name = "cpu_common",
676 .version_id = 1,
677 .minimum_version_id = 1,
678 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200679 .post_load = cpu_common_post_load,
680 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100681 VMSTATE_UINT32(halted, CPUArchState),
682 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200683 VMSTATE_END_OF_LIST()
684 }
685};
pbrook9656f322008-07-01 20:01:19 +0000686#endif
687
Andreas Färber9349b4f2012-03-14 01:38:32 +0100688CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400689{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100690 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400691
692 while (env) {
693 if (env->cpu_index == cpu)
694 break;
695 env = env->next_cpu;
696 }
697
698 return env;
699}
700
Andreas Färber9349b4f2012-03-14 01:38:32 +0100701void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000702{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100703 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000704 int cpu_index;
705
pbrookc2764712009-03-07 15:24:59 +0000706#if defined(CONFIG_USER_ONLY)
707 cpu_list_lock();
708#endif
bellard6a00d602005-11-21 23:25:50 +0000709 env->next_cpu = NULL;
710 penv = &first_cpu;
711 cpu_index = 0;
712 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700713 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000714 cpu_index++;
715 }
716 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000717 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000718 QTAILQ_INIT(&env->breakpoints);
719 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100720#ifndef CONFIG_USER_ONLY
721 env->thread_id = qemu_get_thread_id();
722#endif
bellard6a00d602005-11-21 23:25:50 +0000723 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000724#if defined(CONFIG_USER_ONLY)
725 cpu_list_unlock();
726#endif
pbrookb3c77242008-06-30 16:31:04 +0000727#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600728 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
729 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000730 cpu_save, cpu_load, env);
731#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000732}
733
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100734/* Allocate a new translation block. Flush the translation buffer if
735 too many translation blocks or too much generated code. */
736static TranslationBlock *tb_alloc(target_ulong pc)
737{
738 TranslationBlock *tb;
739
740 if (nb_tbs >= code_gen_max_blocks ||
741 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
742 return NULL;
743 tb = &tbs[nb_tbs++];
744 tb->pc = pc;
745 tb->cflags = 0;
746 return tb;
747}
748
749void tb_free(TranslationBlock *tb)
750{
751 /* In practice this is mostly used for single use temporary TB
752 Ignore the hard cases and just back up if this TB happens to
753 be the last one generated. */
754 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
755 code_gen_ptr = tb->tc_ptr;
756 nb_tbs--;
757 }
758}
759
bellard9fa3e852004-01-04 18:06:42 +0000760static inline void invalidate_page_bitmap(PageDesc *p)
761{
762 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500763 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000764 p->code_bitmap = NULL;
765 }
766 p->code_write_count = 0;
767}
768
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800769/* Set to NULL all the 'first_tb' fields in all PageDescs. */
770
771static void page_flush_tb_1 (int level, void **lp)
772{
773 int i;
774
775 if (*lp == NULL) {
776 return;
777 }
778 if (level == 0) {
779 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000780 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800781 pd[i].first_tb = NULL;
782 invalidate_page_bitmap(pd + i);
783 }
784 } else {
785 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000786 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800787 page_flush_tb_1 (level - 1, pp + i);
788 }
789 }
790}
791
bellardfd6ce8f2003-05-14 19:00:11 +0000792static void page_flush_tb(void)
793{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800794 int i;
795 for (i = 0; i < V_L1_SIZE; i++) {
796 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000797 }
798}
799
800/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000801/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100802void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000803{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100804 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000805#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000806 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
807 (unsigned long)(code_gen_ptr - code_gen_buffer),
808 nb_tbs, nb_tbs > 0 ?
809 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000810#endif
bellard26a5f132008-05-28 12:30:31 +0000811 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000812 cpu_abort(env1, "Internal error: code buffer overflow\n");
813
bellardfd6ce8f2003-05-14 19:00:11 +0000814 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000815
bellard6a00d602005-11-21 23:25:50 +0000816 for(env = first_cpu; env != NULL; env = env->next_cpu) {
817 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
818 }
bellard9fa3e852004-01-04 18:06:42 +0000819
bellard8a8a6082004-10-03 13:36:49 +0000820 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000821 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000822
bellardfd6ce8f2003-05-14 19:00:11 +0000823 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000824 /* XXX: flush processor icache at this point if cache flush is
825 expensive */
bellarde3db7222005-01-26 22:00:47 +0000826 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000827}
828
829#ifdef DEBUG_TB_CHECK
830
j_mayerbc98a7e2007-04-04 07:55:12 +0000831static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000832{
833 TranslationBlock *tb;
834 int i;
835 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000836 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
837 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000838 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
839 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000840 printf("ERROR invalidate: address=" TARGET_FMT_lx
841 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000842 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000843 }
844 }
845 }
846}
847
848/* verify that all the pages have correct rights for code */
849static void tb_page_check(void)
850{
851 TranslationBlock *tb;
852 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000853
pbrook99773bd2006-04-16 15:14:59 +0000854 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
855 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000856 flags1 = page_get_flags(tb->pc);
857 flags2 = page_get_flags(tb->pc + tb->size - 1);
858 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
859 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000860 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000861 }
862 }
863 }
864}
865
866#endif
867
868/* invalidate one TB */
869static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
870 int next_offset)
871{
872 TranslationBlock *tb1;
873 for(;;) {
874 tb1 = *ptb;
875 if (tb1 == tb) {
876 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
877 break;
878 }
879 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
880 }
881}
882
bellard9fa3e852004-01-04 18:06:42 +0000883static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
884{
885 TranslationBlock *tb1;
886 unsigned int n1;
887
888 for(;;) {
889 tb1 = *ptb;
890 n1 = (long)tb1 & 3;
891 tb1 = (TranslationBlock *)((long)tb1 & ~3);
892 if (tb1 == tb) {
893 *ptb = tb1->page_next[n1];
894 break;
895 }
896 ptb = &tb1->page_next[n1];
897 }
898}
899
bellardd4e81642003-05-25 16:46:15 +0000900static inline void tb_jmp_remove(TranslationBlock *tb, int n)
901{
902 TranslationBlock *tb1, **ptb;
903 unsigned int n1;
904
905 ptb = &tb->jmp_next[n];
906 tb1 = *ptb;
907 if (tb1) {
908 /* find tb(n) in circular list */
909 for(;;) {
910 tb1 = *ptb;
911 n1 = (long)tb1 & 3;
912 tb1 = (TranslationBlock *)((long)tb1 & ~3);
913 if (n1 == n && tb1 == tb)
914 break;
915 if (n1 == 2) {
916 ptb = &tb1->jmp_first;
917 } else {
918 ptb = &tb1->jmp_next[n1];
919 }
920 }
921 /* now we can suppress tb(n) from the list */
922 *ptb = tb->jmp_next[n];
923
924 tb->jmp_next[n] = NULL;
925 }
926}
927
928/* reset the jump entry 'n' of a TB so that it is not chained to
929 another TB */
930static inline void tb_reset_jump(TranslationBlock *tb, int n)
931{
932 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
933}
934
Paul Brook41c1b1c2010-03-12 16:54:58 +0000935void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000936{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100937 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000938 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000939 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000940 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000941 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000942
bellard9fa3e852004-01-04 18:06:42 +0000943 /* remove the TB from the hash list */
944 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
945 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000946 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000947 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000948
bellard9fa3e852004-01-04 18:06:42 +0000949 /* remove the TB from the page list */
950 if (tb->page_addr[0] != page_addr) {
951 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
952 tb_page_remove(&p->first_tb, tb);
953 invalidate_page_bitmap(p);
954 }
955 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
956 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
957 tb_page_remove(&p->first_tb, tb);
958 invalidate_page_bitmap(p);
959 }
960
bellard8a40a182005-11-20 10:35:40 +0000961 tb_invalidated_flag = 1;
962
963 /* remove the TB from the hash list */
964 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000965 for(env = first_cpu; env != NULL; env = env->next_cpu) {
966 if (env->tb_jmp_cache[h] == tb)
967 env->tb_jmp_cache[h] = NULL;
968 }
bellard8a40a182005-11-20 10:35:40 +0000969
970 /* suppress this TB from the two jump lists */
971 tb_jmp_remove(tb, 0);
972 tb_jmp_remove(tb, 1);
973
974 /* suppress any remaining jumps to this TB */
975 tb1 = tb->jmp_first;
976 for(;;) {
977 n1 = (long)tb1 & 3;
978 if (n1 == 2)
979 break;
980 tb1 = (TranslationBlock *)((long)tb1 & ~3);
981 tb2 = tb1->jmp_next[n1];
982 tb_reset_jump(tb1, n1);
983 tb1->jmp_next[n1] = NULL;
984 tb1 = tb2;
985 }
986 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
987
bellarde3db7222005-01-26 22:00:47 +0000988 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000989}
990
991static inline void set_bits(uint8_t *tab, int start, int len)
992{
993 int end, mask, end1;
994
995 end = start + len;
996 tab += start >> 3;
997 mask = 0xff << (start & 7);
998 if ((start & ~7) == (end & ~7)) {
999 if (start < end) {
1000 mask &= ~(0xff << (end & 7));
1001 *tab |= mask;
1002 }
1003 } else {
1004 *tab++ |= mask;
1005 start = (start + 8) & ~7;
1006 end1 = end & ~7;
1007 while (start < end1) {
1008 *tab++ = 0xff;
1009 start += 8;
1010 }
1011 if (start < end) {
1012 mask = ~(0xff << (end & 7));
1013 *tab |= mask;
1014 }
1015 }
1016}
1017
1018static void build_page_bitmap(PageDesc *p)
1019{
1020 int n, tb_start, tb_end;
1021 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001022
Anthony Liguori7267c092011-08-20 22:09:37 -05001023 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001024
1025 tb = p->first_tb;
1026 while (tb != NULL) {
1027 n = (long)tb & 3;
1028 tb = (TranslationBlock *)((long)tb & ~3);
1029 /* NOTE: this is subtle as a TB may span two physical pages */
1030 if (n == 0) {
1031 /* NOTE: tb_end may be after the end of the page, but
1032 it is not a problem */
1033 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1034 tb_end = tb_start + tb->size;
1035 if (tb_end > TARGET_PAGE_SIZE)
1036 tb_end = TARGET_PAGE_SIZE;
1037 } else {
1038 tb_start = 0;
1039 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1040 }
1041 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1042 tb = tb->page_next[n];
1043 }
1044}
1045
Andreas Färber9349b4f2012-03-14 01:38:32 +01001046TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001047 target_ulong pc, target_ulong cs_base,
1048 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001049{
1050 TranslationBlock *tb;
1051 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001052 tb_page_addr_t phys_pc, phys_page2;
1053 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001054 int code_gen_size;
1055
Paul Brook41c1b1c2010-03-12 16:54:58 +00001056 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001057 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001058 if (!tb) {
1059 /* flush must be done */
1060 tb_flush(env);
1061 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001062 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001063 /* Don't forget to invalidate previous TB info. */
1064 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001065 }
1066 tc_ptr = code_gen_ptr;
1067 tb->tc_ptr = tc_ptr;
1068 tb->cs_base = cs_base;
1069 tb->flags = flags;
1070 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001071 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001072 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001073
bellardd720b932004-04-25 17:57:43 +00001074 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001075 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001076 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001077 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001078 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001079 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001080 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001081 return tb;
bellardd720b932004-04-25 17:57:43 +00001082}
ths3b46e622007-09-17 08:09:54 +00001083
bellard9fa3e852004-01-04 18:06:42 +00001084/* invalidate all TBs which intersect with the target physical page
1085 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001086 the same physical page. 'is_cpu_write_access' should be true if called
1087 from a real cpu write access: the virtual CPU will exit the current
1088 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001089void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001090 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001091{
aliguori6b917542008-11-18 19:46:41 +00001092 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001093 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001094 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001095 PageDesc *p;
1096 int n;
1097#ifdef TARGET_HAS_PRECISE_SMC
1098 int current_tb_not_found = is_cpu_write_access;
1099 TranslationBlock *current_tb = NULL;
1100 int current_tb_modified = 0;
1101 target_ulong current_pc = 0;
1102 target_ulong current_cs_base = 0;
1103 int current_flags = 0;
1104#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001105
1106 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001107 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001108 return;
ths5fafdf22007-09-16 21:08:06 +00001109 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001110 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1111 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001112 /* build code bitmap */
1113 build_page_bitmap(p);
1114 }
1115
1116 /* we remove all the TBs in the range [start, end[ */
1117 /* XXX: see if in some cases it could be faster to invalidate all the code */
1118 tb = p->first_tb;
1119 while (tb != NULL) {
1120 n = (long)tb & 3;
1121 tb = (TranslationBlock *)((long)tb & ~3);
1122 tb_next = tb->page_next[n];
1123 /* NOTE: this is subtle as a TB may span two physical pages */
1124 if (n == 0) {
1125 /* NOTE: tb_end may be after the end of the page, but
1126 it is not a problem */
1127 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1128 tb_end = tb_start + tb->size;
1129 } else {
1130 tb_start = tb->page_addr[1];
1131 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1132 }
1133 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001134#ifdef TARGET_HAS_PRECISE_SMC
1135 if (current_tb_not_found) {
1136 current_tb_not_found = 0;
1137 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001138 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001139 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001140 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001141 }
1142 }
1143 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001144 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001145 /* If we are modifying the current TB, we must stop
1146 its execution. We could be more precise by checking
1147 that the modification is after the current PC, but it
1148 would require a specialized function to partially
1149 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001150
bellardd720b932004-04-25 17:57:43 +00001151 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001152 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001153 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1154 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001155 }
1156#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001157 /* we need to do that to handle the case where a signal
1158 occurs while doing tb_phys_invalidate() */
1159 saved_tb = NULL;
1160 if (env) {
1161 saved_tb = env->current_tb;
1162 env->current_tb = NULL;
1163 }
bellard9fa3e852004-01-04 18:06:42 +00001164 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001165 if (env) {
1166 env->current_tb = saved_tb;
1167 if (env->interrupt_request && env->current_tb)
1168 cpu_interrupt(env, env->interrupt_request);
1169 }
bellard9fa3e852004-01-04 18:06:42 +00001170 }
1171 tb = tb_next;
1172 }
1173#if !defined(CONFIG_USER_ONLY)
1174 /* if no code remaining, no need to continue to use slow writes */
1175 if (!p->first_tb) {
1176 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001177 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001178 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001179 }
1180 }
1181#endif
1182#ifdef TARGET_HAS_PRECISE_SMC
1183 if (current_tb_modified) {
1184 /* we generate a block containing just the instruction
1185 modifying the memory. It will ensure that it cannot modify
1186 itself */
bellardea1c1802004-06-14 18:56:36 +00001187 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001188 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001189 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001190 }
1191#endif
1192}
1193
1194/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001195static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001196{
1197 PageDesc *p;
1198 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001199#if 0
bellarda4193c82004-06-03 14:01:43 +00001200 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001201 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1202 cpu_single_env->mem_io_vaddr, len,
1203 cpu_single_env->eip,
1204 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001205 }
1206#endif
bellard9fa3e852004-01-04 18:06:42 +00001207 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001208 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001209 return;
1210 if (p->code_bitmap) {
1211 offset = start & ~TARGET_PAGE_MASK;
1212 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1213 if (b & ((1 << len) - 1))
1214 goto do_invalidate;
1215 } else {
1216 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001217 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001218 }
1219}
1220
bellard9fa3e852004-01-04 18:06:42 +00001221#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001222static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001223 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001224{
aliguori6b917542008-11-18 19:46:41 +00001225 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001226 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001227 int n;
bellardd720b932004-04-25 17:57:43 +00001228#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001229 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001230 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001231 int current_tb_modified = 0;
1232 target_ulong current_pc = 0;
1233 target_ulong current_cs_base = 0;
1234 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001235#endif
bellard9fa3e852004-01-04 18:06:42 +00001236
1237 addr &= TARGET_PAGE_MASK;
1238 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001239 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001240 return;
1241 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001242#ifdef TARGET_HAS_PRECISE_SMC
1243 if (tb && pc != 0) {
1244 current_tb = tb_find_pc(pc);
1245 }
1246#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001247 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001248 n = (long)tb & 3;
1249 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001250#ifdef TARGET_HAS_PRECISE_SMC
1251 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001252 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001253 /* If we are modifying the current TB, we must stop
1254 its execution. We could be more precise by checking
1255 that the modification is after the current PC, but it
1256 would require a specialized function to partially
1257 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001258
bellardd720b932004-04-25 17:57:43 +00001259 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001260 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001261 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1262 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001263 }
1264#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001265 tb_phys_invalidate(tb, addr);
1266 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001267 }
1268 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001269#ifdef TARGET_HAS_PRECISE_SMC
1270 if (current_tb_modified) {
1271 /* we generate a block containing just the instruction
1272 modifying the memory. It will ensure that it cannot modify
1273 itself */
bellardea1c1802004-06-14 18:56:36 +00001274 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001275 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001276 cpu_resume_from_signal(env, puc);
1277 }
1278#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001279}
bellard9fa3e852004-01-04 18:06:42 +00001280#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001281
1282/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001283static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001284 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001285{
1286 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001287#ifndef CONFIG_USER_ONLY
1288 bool page_already_protected;
1289#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001290
bellard9fa3e852004-01-04 18:06:42 +00001291 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001292 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001293 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001294#ifndef CONFIG_USER_ONLY
1295 page_already_protected = p->first_tb != NULL;
1296#endif
bellard9fa3e852004-01-04 18:06:42 +00001297 p->first_tb = (TranslationBlock *)((long)tb | n);
1298 invalidate_page_bitmap(p);
1299
bellard107db442004-06-22 18:48:46 +00001300#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001301
bellard9fa3e852004-01-04 18:06:42 +00001302#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001303 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001304 target_ulong addr;
1305 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001306 int prot;
1307
bellardfd6ce8f2003-05-14 19:00:11 +00001308 /* force the host page as non writable (writes will have a
1309 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001310 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001311 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001312 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1313 addr += TARGET_PAGE_SIZE) {
1314
1315 p2 = page_find (addr >> TARGET_PAGE_BITS);
1316 if (!p2)
1317 continue;
1318 prot |= p2->flags;
1319 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001320 }
ths5fafdf22007-09-16 21:08:06 +00001321 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001322 (prot & PAGE_BITS) & ~PAGE_WRITE);
1323#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001324 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001325 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001326#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001327 }
bellard9fa3e852004-01-04 18:06:42 +00001328#else
1329 /* if some code is already present, then the pages are already
1330 protected. So we handle the case where only the first TB is
1331 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001332 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001333 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001334 }
1335#endif
bellardd720b932004-04-25 17:57:43 +00001336
1337#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001338}
1339
bellard9fa3e852004-01-04 18:06:42 +00001340/* add a new TB and link it to the physical page tables. phys_page2 is
1341 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001342void tb_link_page(TranslationBlock *tb,
1343 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001344{
bellard9fa3e852004-01-04 18:06:42 +00001345 unsigned int h;
1346 TranslationBlock **ptb;
1347
pbrookc8a706f2008-06-02 16:16:42 +00001348 /* Grab the mmap lock to stop another thread invalidating this TB
1349 before we are done. */
1350 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001351 /* add in the physical hash table */
1352 h = tb_phys_hash_func(phys_pc);
1353 ptb = &tb_phys_hash[h];
1354 tb->phys_hash_next = *ptb;
1355 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001356
1357 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001358 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1359 if (phys_page2 != -1)
1360 tb_alloc_page(tb, 1, phys_page2);
1361 else
1362 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001363
bellardd4e81642003-05-25 16:46:15 +00001364 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1365 tb->jmp_next[0] = NULL;
1366 tb->jmp_next[1] = NULL;
1367
1368 /* init original jump addresses */
1369 if (tb->tb_next_offset[0] != 0xffff)
1370 tb_reset_jump(tb, 0);
1371 if (tb->tb_next_offset[1] != 0xffff)
1372 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001373
1374#ifdef DEBUG_TB_CHECK
1375 tb_page_check();
1376#endif
pbrookc8a706f2008-06-02 16:16:42 +00001377 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001378}
1379
bellarda513fe12003-05-27 23:29:48 +00001380/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1381 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001382TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001383{
1384 int m_min, m_max, m;
1385 unsigned long v;
1386 TranslationBlock *tb;
1387
1388 if (nb_tbs <= 0)
1389 return NULL;
1390 if (tc_ptr < (unsigned long)code_gen_buffer ||
1391 tc_ptr >= (unsigned long)code_gen_ptr)
1392 return NULL;
1393 /* binary search (cf Knuth) */
1394 m_min = 0;
1395 m_max = nb_tbs - 1;
1396 while (m_min <= m_max) {
1397 m = (m_min + m_max) >> 1;
1398 tb = &tbs[m];
1399 v = (unsigned long)tb->tc_ptr;
1400 if (v == tc_ptr)
1401 return tb;
1402 else if (tc_ptr < v) {
1403 m_max = m - 1;
1404 } else {
1405 m_min = m + 1;
1406 }
ths5fafdf22007-09-16 21:08:06 +00001407 }
bellarda513fe12003-05-27 23:29:48 +00001408 return &tbs[m_max];
1409}
bellard75012672003-06-21 13:11:07 +00001410
bellardea041c02003-06-25 16:16:50 +00001411static void tb_reset_jump_recursive(TranslationBlock *tb);
1412
1413static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1414{
1415 TranslationBlock *tb1, *tb_next, **ptb;
1416 unsigned int n1;
1417
1418 tb1 = tb->jmp_next[n];
1419 if (tb1 != NULL) {
1420 /* find head of list */
1421 for(;;) {
1422 n1 = (long)tb1 & 3;
1423 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1424 if (n1 == 2)
1425 break;
1426 tb1 = tb1->jmp_next[n1];
1427 }
1428 /* we are now sure now that tb jumps to tb1 */
1429 tb_next = tb1;
1430
1431 /* remove tb from the jmp_first list */
1432 ptb = &tb_next->jmp_first;
1433 for(;;) {
1434 tb1 = *ptb;
1435 n1 = (long)tb1 & 3;
1436 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1437 if (n1 == n && tb1 == tb)
1438 break;
1439 ptb = &tb1->jmp_next[n1];
1440 }
1441 *ptb = tb->jmp_next[n];
1442 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001443
bellardea041c02003-06-25 16:16:50 +00001444 /* suppress the jump to next tb in generated code */
1445 tb_reset_jump(tb, n);
1446
bellard01243112004-01-04 15:48:17 +00001447 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001448 tb_reset_jump_recursive(tb_next);
1449 }
1450}
1451
1452static void tb_reset_jump_recursive(TranslationBlock *tb)
1453{
1454 tb_reset_jump_recursive2(tb, 0);
1455 tb_reset_jump_recursive2(tb, 1);
1456}
1457
bellard1fddef42005-04-17 19:16:13 +00001458#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001459#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001460static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001461{
1462 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1463}
1464#else
Max Filippov1e7855a2012-04-10 02:48:17 +04001465void tb_invalidate_phys_addr(target_phys_addr_t addr)
bellardd720b932004-04-25 17:57:43 +00001466{
Anthony Liguoric227f092009-10-01 16:12:16 -05001467 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001468 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001469
Avi Kivity06ef3522012-02-13 16:11:22 +02001470 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001471 if (!(memory_region_is_ram(section->mr)
1472 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001473 return;
1474 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001475 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1476 + section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001477 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001478}
Max Filippov1e7855a2012-04-10 02:48:17 +04001479
1480static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1481{
1482 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc));
1483}
bellardc27004e2005-01-03 23:35:10 +00001484#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001485#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001486
Paul Brookc527ee82010-03-01 03:31:14 +00001487#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001488void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001489
1490{
1491}
1492
Andreas Färber9349b4f2012-03-14 01:38:32 +01001493int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001494 int flags, CPUWatchpoint **watchpoint)
1495{
1496 return -ENOSYS;
1497}
1498#else
pbrook6658ffb2007-03-16 23:58:11 +00001499/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001500int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001501 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001502{
aliguorib4051332008-11-18 20:14:20 +00001503 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001504 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001505
aliguorib4051332008-11-18 20:14:20 +00001506 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001507 if ((len & (len - 1)) || (addr & ~len_mask) ||
1508 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001509 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1510 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1511 return -EINVAL;
1512 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001513 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001514
aliguoria1d1bb32008-11-18 20:07:32 +00001515 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001516 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001517 wp->flags = flags;
1518
aliguori2dc9f412008-11-18 20:56:59 +00001519 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001520 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001521 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001522 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001523 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001524
pbrook6658ffb2007-03-16 23:58:11 +00001525 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001526
1527 if (watchpoint)
1528 *watchpoint = wp;
1529 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001530}
1531
aliguoria1d1bb32008-11-18 20:07:32 +00001532/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001533int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001534 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001535{
aliguorib4051332008-11-18 20:14:20 +00001536 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001537 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001538
Blue Swirl72cf2d42009-09-12 07:36:22 +00001539 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001540 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001541 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001542 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001543 return 0;
1544 }
1545 }
aliguoria1d1bb32008-11-18 20:07:32 +00001546 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001547}
1548
aliguoria1d1bb32008-11-18 20:07:32 +00001549/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001550void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001551{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001552 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001553
aliguoria1d1bb32008-11-18 20:07:32 +00001554 tlb_flush_page(env, watchpoint->vaddr);
1555
Anthony Liguori7267c092011-08-20 22:09:37 -05001556 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001557}
1558
aliguoria1d1bb32008-11-18 20:07:32 +00001559/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001560void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001561{
aliguoric0ce9982008-11-25 22:13:57 +00001562 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001563
Blue Swirl72cf2d42009-09-12 07:36:22 +00001564 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001565 if (wp->flags & mask)
1566 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001567 }
aliguoria1d1bb32008-11-18 20:07:32 +00001568}
Paul Brookc527ee82010-03-01 03:31:14 +00001569#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001570
1571/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001572int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001573 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001574{
bellard1fddef42005-04-17 19:16:13 +00001575#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001576 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001577
Anthony Liguori7267c092011-08-20 22:09:37 -05001578 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001579
1580 bp->pc = pc;
1581 bp->flags = flags;
1582
aliguori2dc9f412008-11-18 20:56:59 +00001583 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001584 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001585 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001586 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001587 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001588
1589 breakpoint_invalidate(env, pc);
1590
1591 if (breakpoint)
1592 *breakpoint = bp;
1593 return 0;
1594#else
1595 return -ENOSYS;
1596#endif
1597}
1598
1599/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001600int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001601{
1602#if defined(TARGET_HAS_ICE)
1603 CPUBreakpoint *bp;
1604
Blue Swirl72cf2d42009-09-12 07:36:22 +00001605 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001606 if (bp->pc == pc && bp->flags == flags) {
1607 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001608 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001609 }
bellard4c3a88a2003-07-26 12:06:08 +00001610 }
aliguoria1d1bb32008-11-18 20:07:32 +00001611 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001612#else
aliguoria1d1bb32008-11-18 20:07:32 +00001613 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001614#endif
1615}
1616
aliguoria1d1bb32008-11-18 20:07:32 +00001617/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001618void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001619{
bellard1fddef42005-04-17 19:16:13 +00001620#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001621 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001622
aliguoria1d1bb32008-11-18 20:07:32 +00001623 breakpoint_invalidate(env, breakpoint->pc);
1624
Anthony Liguori7267c092011-08-20 22:09:37 -05001625 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001626#endif
1627}
1628
1629/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001630void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001631{
1632#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001633 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001634
Blue Swirl72cf2d42009-09-12 07:36:22 +00001635 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001636 if (bp->flags & mask)
1637 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001638 }
bellard4c3a88a2003-07-26 12:06:08 +00001639#endif
1640}
1641
bellardc33a3462003-07-29 20:50:33 +00001642/* enable or disable single step mode. EXCP_DEBUG is returned by the
1643 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001644void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001645{
bellard1fddef42005-04-17 19:16:13 +00001646#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001647 if (env->singlestep_enabled != enabled) {
1648 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001649 if (kvm_enabled())
1650 kvm_update_guest_debug(env, 0);
1651 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001652 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001653 /* XXX: only flush what is necessary */
1654 tb_flush(env);
1655 }
bellardc33a3462003-07-29 20:50:33 +00001656 }
1657#endif
1658}
1659
bellard34865132003-10-05 14:28:56 +00001660/* enable or disable low levels log */
1661void cpu_set_log(int log_flags)
1662{
1663 loglevel = log_flags;
1664 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001665 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001666 if (!logfile) {
1667 perror(logfilename);
1668 _exit(1);
1669 }
bellard9fa3e852004-01-04 18:06:42 +00001670#if !defined(CONFIG_SOFTMMU)
1671 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1672 {
blueswir1b55266b2008-09-20 08:07:15 +00001673 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001674 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1675 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001676#elif defined(_WIN32)
1677 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1678 setvbuf(logfile, NULL, _IONBF, 0);
1679#else
bellard34865132003-10-05 14:28:56 +00001680 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001681#endif
pbrooke735b912007-06-30 13:53:24 +00001682 log_append = 1;
1683 }
1684 if (!loglevel && logfile) {
1685 fclose(logfile);
1686 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001687 }
1688}
1689
1690void cpu_set_log_filename(const char *filename)
1691{
1692 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001693 if (logfile) {
1694 fclose(logfile);
1695 logfile = NULL;
1696 }
1697 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001698}
bellardc33a3462003-07-29 20:50:33 +00001699
Andreas Färber9349b4f2012-03-14 01:38:32 +01001700static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001701{
pbrookd5975362008-06-07 20:50:51 +00001702 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1703 problem and hope the cpu will stop of its own accord. For userspace
1704 emulation this often isn't actually as bad as it sounds. Often
1705 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001706 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001707 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001708
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001709 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001710 tb = env->current_tb;
1711 /* if the cpu is currently executing code, we must unlink it and
1712 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001713 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001714 env->current_tb = NULL;
1715 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001716 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001717 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001718}
1719
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001720#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001721/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001722static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001723{
1724 int old_mask;
1725
1726 old_mask = env->interrupt_request;
1727 env->interrupt_request |= mask;
1728
aliguori8edac962009-04-24 18:03:45 +00001729 /*
1730 * If called from iothread context, wake the target cpu in
1731 * case its halted.
1732 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001733 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001734 qemu_cpu_kick(env);
1735 return;
1736 }
aliguori8edac962009-04-24 18:03:45 +00001737
pbrook2e70f6e2008-06-29 01:03:05 +00001738 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001739 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001740 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001741 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001742 cpu_abort(env, "Raised interrupt while not in I/O function");
1743 }
pbrook2e70f6e2008-06-29 01:03:05 +00001744 } else {
aurel323098dba2009-03-07 21:28:24 +00001745 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001746 }
1747}
1748
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001749CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1750
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001751#else /* CONFIG_USER_ONLY */
1752
Andreas Färber9349b4f2012-03-14 01:38:32 +01001753void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001754{
1755 env->interrupt_request |= mask;
1756 cpu_unlink_tb(env);
1757}
1758#endif /* CONFIG_USER_ONLY */
1759
Andreas Färber9349b4f2012-03-14 01:38:32 +01001760void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001761{
1762 env->interrupt_request &= ~mask;
1763}
1764
Andreas Färber9349b4f2012-03-14 01:38:32 +01001765void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001766{
1767 env->exit_request = 1;
1768 cpu_unlink_tb(env);
1769}
1770
blueswir1c7cd6a32008-10-02 18:27:46 +00001771const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001772 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001773 "show generated host assembly code for each compiled TB" },
1774 { CPU_LOG_TB_IN_ASM, "in_asm",
1775 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001776 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001777 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001778 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001779 "show micro ops "
1780#ifdef TARGET_I386
1781 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001782#endif
blueswir1e01a1152008-03-14 17:37:11 +00001783 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001784 { CPU_LOG_INT, "int",
1785 "show interrupts/exceptions in short format" },
1786 { CPU_LOG_EXEC, "exec",
1787 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001788 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001789 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001790#ifdef TARGET_I386
1791 { CPU_LOG_PCALL, "pcall",
1792 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001793 { CPU_LOG_RESET, "cpu_reset",
1794 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001795#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001796#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001797 { CPU_LOG_IOPORT, "ioport",
1798 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001799#endif
bellardf193c792004-03-21 17:06:25 +00001800 { 0, NULL, NULL },
1801};
1802
1803static int cmp1(const char *s1, int n, const char *s2)
1804{
1805 if (strlen(s2) != n)
1806 return 0;
1807 return memcmp(s1, s2, n) == 0;
1808}
ths3b46e622007-09-17 08:09:54 +00001809
bellardf193c792004-03-21 17:06:25 +00001810/* takes a comma separated list of log masks. Return 0 if error. */
1811int cpu_str_to_log_mask(const char *str)
1812{
blueswir1c7cd6a32008-10-02 18:27:46 +00001813 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001814 int mask;
1815 const char *p, *p1;
1816
1817 p = str;
1818 mask = 0;
1819 for(;;) {
1820 p1 = strchr(p, ',');
1821 if (!p1)
1822 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001823 if(cmp1(p,p1-p,"all")) {
1824 for(item = cpu_log_items; item->mask != 0; item++) {
1825 mask |= item->mask;
1826 }
1827 } else {
1828 for(item = cpu_log_items; item->mask != 0; item++) {
1829 if (cmp1(p, p1 - p, item->name))
1830 goto found;
1831 }
1832 return 0;
bellardf193c792004-03-21 17:06:25 +00001833 }
bellardf193c792004-03-21 17:06:25 +00001834 found:
1835 mask |= item->mask;
1836 if (*p1 != ',')
1837 break;
1838 p = p1 + 1;
1839 }
1840 return mask;
1841}
bellardea041c02003-06-25 16:16:50 +00001842
Andreas Färber9349b4f2012-03-14 01:38:32 +01001843void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001844{
1845 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001846 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001847
1848 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001849 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001850 fprintf(stderr, "qemu: fatal: ");
1851 vfprintf(stderr, fmt, ap);
1852 fprintf(stderr, "\n");
1853#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001854 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1855#else
1856 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001857#endif
aliguori93fcfe32009-01-15 22:34:14 +00001858 if (qemu_log_enabled()) {
1859 qemu_log("qemu: fatal: ");
1860 qemu_log_vprintf(fmt, ap2);
1861 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001862#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001863 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001864#else
aliguori93fcfe32009-01-15 22:34:14 +00001865 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001866#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001867 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001868 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001869 }
pbrook493ae1f2007-11-23 16:53:59 +00001870 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001871 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001872#if defined(CONFIG_USER_ONLY)
1873 {
1874 struct sigaction act;
1875 sigfillset(&act.sa_mask);
1876 act.sa_handler = SIG_DFL;
1877 sigaction(SIGABRT, &act, NULL);
1878 }
1879#endif
bellard75012672003-06-21 13:11:07 +00001880 abort();
1881}
1882
Andreas Färber9349b4f2012-03-14 01:38:32 +01001883CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001884{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001885 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1886 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001887 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001888#if defined(TARGET_HAS_ICE)
1889 CPUBreakpoint *bp;
1890 CPUWatchpoint *wp;
1891#endif
1892
Andreas Färber9349b4f2012-03-14 01:38:32 +01001893 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001894
1895 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001896 new_env->next_cpu = next_cpu;
1897 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001898
1899 /* Clone all break/watchpoints.
1900 Note: Once we support ptrace with hw-debug register access, make sure
1901 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001902 QTAILQ_INIT(&env->breakpoints);
1903 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001904#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001905 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001906 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1907 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001908 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001909 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1910 wp->flags, NULL);
1911 }
1912#endif
1913
thsc5be9f02007-02-28 20:20:53 +00001914 return new_env;
1915}
1916
bellard01243112004-01-04 15:48:17 +00001917#if !defined(CONFIG_USER_ONLY)
1918
Andreas Färber9349b4f2012-03-14 01:38:32 +01001919static inline void tlb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001920{
1921 unsigned int i;
1922
1923 /* Discard jump cache entries for any tb which might potentially
1924 overlap the flushed page. */
1925 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1926 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001927 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001928
1929 i = tb_jmp_cache_hash_page(addr);
1930 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001931 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001932}
1933
Igor Kovalenko08738982009-07-12 02:15:40 +04001934static CPUTLBEntry s_cputlb_empty_entry = {
1935 .addr_read = -1,
1936 .addr_write = -1,
1937 .addr_code = -1,
1938 .addend = -1,
1939};
1940
Peter Maydell771124e2012-01-17 13:23:13 +00001941/* NOTE:
1942 * If flush_global is true (the usual case), flush all tlb entries.
1943 * If flush_global is false, flush (at least) all tlb entries not
1944 * marked global.
1945 *
1946 * Since QEMU doesn't currently implement a global/not-global flag
1947 * for tlb entries, at the moment tlb_flush() will also flush all
1948 * tlb entries in the flush_global == false case. This is OK because
1949 * CPU architectures generally permit an implementation to drop
1950 * entries from the TLB at any time, so flushing more entries than
1951 * required is only an efficiency issue, not a correctness issue.
1952 */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001953void tlb_flush(CPUArchState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001954{
bellard33417e72003-08-10 21:47:01 +00001955 int i;
bellard01243112004-01-04 15:48:17 +00001956
bellard9fa3e852004-01-04 18:06:42 +00001957#if defined(DEBUG_TLB)
1958 printf("tlb_flush:\n");
1959#endif
bellard01243112004-01-04 15:48:17 +00001960 /* must reset current TB so that interrupts cannot modify the
1961 links while we are modifying them */
1962 env->current_tb = NULL;
1963
bellard33417e72003-08-10 21:47:01 +00001964 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001965 int mmu_idx;
1966 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001967 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001968 }
bellard33417e72003-08-10 21:47:01 +00001969 }
bellard9fa3e852004-01-04 18:06:42 +00001970
bellard8a40a182005-11-20 10:35:40 +00001971 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001972
Paul Brookd4c430a2010-03-17 02:14:28 +00001973 env->tlb_flush_addr = -1;
1974 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001975 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001976}
1977
bellard274da6b2004-05-20 21:56:27 +00001978static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001979{
ths5fafdf22007-09-16 21:08:06 +00001980 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001981 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001982 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001983 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001984 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001985 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001986 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001987 }
bellard61382a52003-10-27 21:22:23 +00001988}
1989
Andreas Färber9349b4f2012-03-14 01:38:32 +01001990void tlb_flush_page(CPUArchState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001991{
bellard8a40a182005-11-20 10:35:40 +00001992 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001993 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001994
bellard9fa3e852004-01-04 18:06:42 +00001995#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001996 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001997#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001998 /* Check if we need to flush due to large pages. */
1999 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2000#if defined(DEBUG_TLB)
2001 printf("tlb_flush_page: forced full flush ("
2002 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2003 env->tlb_flush_addr, env->tlb_flush_mask);
2004#endif
2005 tlb_flush(env, 1);
2006 return;
2007 }
bellard01243112004-01-04 15:48:17 +00002008 /* must reset current TB so that interrupts cannot modify the
2009 links while we are modifying them */
2010 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002011
bellard61382a52003-10-27 21:22:23 +00002012 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002013 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002014 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2015 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002016
edgar_igl5c751e92008-05-06 08:44:21 +00002017 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002018}
2019
bellard9fa3e852004-01-04 18:06:42 +00002020/* update the TLBs so that writes to code in the virtual page 'addr'
2021 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002022static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002023{
ths5fafdf22007-09-16 21:08:06 +00002024 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002025 ram_addr + TARGET_PAGE_SIZE,
2026 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002027}
2028
bellard9fa3e852004-01-04 18:06:42 +00002029/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002030 tested for self modifying code */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002031static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002032 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002033{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002034 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002035}
2036
Avi Kivity7859cc62012-03-14 16:19:39 +02002037static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
2038{
2039 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
2040}
2041
ths5fafdf22007-09-16 21:08:06 +00002042static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002043 unsigned long start, unsigned long length)
2044{
2045 unsigned long addr;
Avi Kivity7859cc62012-03-14 16:19:39 +02002046 if (tlb_is_dirty_ram(tlb_entry)) {
bellard84b7b8e2005-11-28 21:19:04 +00002047 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002048 if ((addr - start) < length) {
Avi Kivity7859cc62012-03-14 16:19:39 +02002049 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002050 }
2051 }
2052}
2053
pbrook5579c7f2009-04-11 14:47:08 +00002054/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002055void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002056 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002057{
Andreas Färber9349b4f2012-03-14 01:38:32 +01002058 CPUArchState *env;
bellard4f2ac232004-04-26 19:44:02 +00002059 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002060 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002061
2062 start &= TARGET_PAGE_MASK;
2063 end = TARGET_PAGE_ALIGN(end);
2064
2065 length = end - start;
2066 if (length == 0)
2067 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002068 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002069
bellard1ccde1c2004-02-06 19:46:14 +00002070 /* we modify the TLB cache so that the dirty bit will be set again
2071 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002072 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002073 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002074 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002075 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002076 != (end - 1) - start) {
2077 abort();
2078 }
2079
bellard6a00d602005-11-21 23:25:50 +00002080 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002081 int mmu_idx;
2082 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2083 for(i = 0; i < CPU_TLB_SIZE; i++)
2084 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2085 start1, length);
2086 }
bellard6a00d602005-11-21 23:25:50 +00002087 }
bellard1ccde1c2004-02-06 19:46:14 +00002088}
2089
aliguori74576192008-10-06 14:02:03 +00002090int cpu_physical_memory_set_dirty_tracking(int enable)
2091{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002092 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002093 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002094 return ret;
aliguori74576192008-10-06 14:02:03 +00002095}
2096
bellard3a7d9292005-08-21 09:26:42 +00002097static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2098{
Anthony Liguoric227f092009-10-01 16:12:16 -05002099 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002100 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002101
Avi Kivity7859cc62012-03-14 16:19:39 +02002102 if (tlb_is_dirty_ram(tlb_entry)) {
pbrook5579c7f2009-04-11 14:47:08 +00002103 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2104 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002105 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002106 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002107 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002108 }
2109 }
2110}
2111
2112/* update the TLB according to the current state of the dirty bits */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002113void cpu_tlb_update_dirty(CPUArchState *env)
bellard3a7d9292005-08-21 09:26:42 +00002114{
2115 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002116 int mmu_idx;
2117 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2118 for(i = 0; i < CPU_TLB_SIZE; i++)
2119 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2120 }
bellard3a7d9292005-08-21 09:26:42 +00002121}
2122
pbrook0f459d12008-06-09 00:20:13 +00002123static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002124{
pbrook0f459d12008-06-09 00:20:13 +00002125 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2126 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002127}
2128
pbrook0f459d12008-06-09 00:20:13 +00002129/* update the TLB corresponding to virtual page vaddr
2130 so that it is no longer dirty */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002131static inline void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002132{
bellard1ccde1c2004-02-06 19:46:14 +00002133 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002134 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002135
pbrook0f459d12008-06-09 00:20:13 +00002136 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002137 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002138 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2139 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002140}
2141
Paul Brookd4c430a2010-03-17 02:14:28 +00002142/* Our TLB does not support large pages, so remember the area covered by
2143 large pages and trigger a full TLB flush if these are invalidated. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002144static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
Paul Brookd4c430a2010-03-17 02:14:28 +00002145 target_ulong size)
2146{
2147 target_ulong mask = ~(size - 1);
2148
2149 if (env->tlb_flush_addr == (target_ulong)-1) {
2150 env->tlb_flush_addr = vaddr & mask;
2151 env->tlb_flush_mask = mask;
2152 return;
2153 }
2154 /* Extend the existing region to include the new page.
2155 This is a compromise between unnecessary flushes and the cost
2156 of maintaining a full variable size TLB. */
2157 mask &= env->tlb_flush_mask;
2158 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2159 mask <<= 1;
2160 }
2161 env->tlb_flush_addr &= mask;
2162 env->tlb_flush_mask = mask;
2163}
2164
Avi Kivity06ef3522012-02-13 16:11:22 +02002165static bool is_ram_rom(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002166{
Avi Kivity06ef3522012-02-13 16:11:22 +02002167 return memory_region_is_ram(s->mr);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002168}
2169
Avi Kivity06ef3522012-02-13 16:11:22 +02002170static bool is_romd(MemoryRegionSection *s)
Avi Kivity75c578d2012-01-02 15:40:52 +02002171{
Avi Kivity06ef3522012-02-13 16:11:22 +02002172 MemoryRegion *mr = s->mr;
Avi Kivity75c578d2012-01-02 15:40:52 +02002173
Avi Kivity75c578d2012-01-02 15:40:52 +02002174 return mr->rom_device && mr->readable;
2175}
2176
Avi Kivity06ef3522012-02-13 16:11:22 +02002177static bool is_ram_rom_romd(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002178{
Avi Kivity06ef3522012-02-13 16:11:22 +02002179 return is_ram_rom(s) || is_romd(s);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002180}
2181
Paul Brookd4c430a2010-03-17 02:14:28 +00002182/* Add a new TLB entry. At most one entry for a given virtual address
2183 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2184 supplied size is only used by tlb_flush_page. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002185void tlb_set_page(CPUArchState *env, target_ulong vaddr,
Paul Brookd4c430a2010-03-17 02:14:28 +00002186 target_phys_addr_t paddr, int prot,
2187 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002188{
Avi Kivityf3705d52012-03-08 16:16:34 +02002189 MemoryRegionSection *section;
bellard9fa3e852004-01-04 18:06:42 +00002190 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002191 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002192 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002193 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002194 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002195 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002196 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002197
Paul Brookd4c430a2010-03-17 02:14:28 +00002198 assert(size >= TARGET_PAGE_SIZE);
2199 if (size != TARGET_PAGE_SIZE) {
2200 tlb_add_large_page(env, vaddr, size);
2201 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002202 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002203#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002204 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2205 " prot=%x idx=%d pd=0x%08lx\n",
2206 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002207#endif
2208
pbrook0f459d12008-06-09 00:20:13 +00002209 address = vaddr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002210 if (!is_ram_rom_romd(section)) {
pbrook0f459d12008-06-09 00:20:13 +00002211 /* IO memory case (romd handled later) */
2212 address |= TLB_MMIO;
2213 }
Avi Kivityf3705d52012-03-08 16:16:34 +02002214 if (is_ram_rom_romd(section)) {
2215 addend = (unsigned long)memory_region_get_ram_ptr(section->mr)
2216 + section_addr(section, paddr);
Avi Kivity06ef3522012-02-13 16:11:22 +02002217 } else {
2218 addend = 0;
2219 }
Avi Kivityf3705d52012-03-08 16:16:34 +02002220 if (is_ram_rom(section)) {
pbrook0f459d12008-06-09 00:20:13 +00002221 /* Normal RAM. */
Avi Kivityf3705d52012-03-08 16:16:34 +02002222 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2223 + section_addr(section, paddr);
2224 if (!section->readonly)
Avi Kivityaa102232012-03-08 17:06:55 +02002225 iotlb |= phys_section_notdirty;
pbrook0f459d12008-06-09 00:20:13 +00002226 else
Avi Kivityaa102232012-03-08 17:06:55 +02002227 iotlb |= phys_section_rom;
pbrook0f459d12008-06-09 00:20:13 +00002228 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002229 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002230 It would be nice to pass an offset from the base address
2231 of that region. This would avoid having to special case RAM,
2232 and avoid full address decoding in every device.
2233 We can't use the high bits of pd for this because
2234 IO_MEM_ROMD uses these as a ram address. */
Avi Kivityaa102232012-03-08 17:06:55 +02002235 iotlb = section - phys_sections;
Avi Kivityf3705d52012-03-08 16:16:34 +02002236 iotlb += section_addr(section, paddr);
pbrook0f459d12008-06-09 00:20:13 +00002237 }
pbrook6658ffb2007-03-16 23:58:11 +00002238
pbrook0f459d12008-06-09 00:20:13 +00002239 code_address = address;
2240 /* Make accesses to pages with watchpoints go via the
2241 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002242 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002243 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002244 /* Avoid trapping reads of pages with a write breakpoint. */
2245 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivityaa102232012-03-08 17:06:55 +02002246 iotlb = phys_section_watch + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002247 address |= TLB_MMIO;
2248 break;
2249 }
pbrook6658ffb2007-03-16 23:58:11 +00002250 }
pbrook0f459d12008-06-09 00:20:13 +00002251 }
balrogd79acba2007-06-26 20:01:13 +00002252
pbrook0f459d12008-06-09 00:20:13 +00002253 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2254 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2255 te = &env->tlb_table[mmu_idx][index];
2256 te->addend = addend - vaddr;
2257 if (prot & PAGE_READ) {
2258 te->addr_read = address;
2259 } else {
2260 te->addr_read = -1;
2261 }
edgar_igl5c751e92008-05-06 08:44:21 +00002262
pbrook0f459d12008-06-09 00:20:13 +00002263 if (prot & PAGE_EXEC) {
2264 te->addr_code = code_address;
2265 } else {
2266 te->addr_code = -1;
2267 }
2268 if (prot & PAGE_WRITE) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002269 if ((memory_region_is_ram(section->mr) && section->readonly)
2270 || is_romd(section)) {
pbrook0f459d12008-06-09 00:20:13 +00002271 /* Write access calls the I/O callback. */
2272 te->addr_write = address | TLB_MMIO;
Avi Kivityf3705d52012-03-08 16:16:34 +02002273 } else if (memory_region_is_ram(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002274 && !cpu_physical_memory_is_dirty(
Avi Kivityf3705d52012-03-08 16:16:34 +02002275 section->mr->ram_addr
2276 + section_addr(section, paddr))) {
pbrook0f459d12008-06-09 00:20:13 +00002277 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002278 } else {
pbrook0f459d12008-06-09 00:20:13 +00002279 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002280 }
pbrook0f459d12008-06-09 00:20:13 +00002281 } else {
2282 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002283 }
bellard9fa3e852004-01-04 18:06:42 +00002284}
2285
bellard01243112004-01-04 15:48:17 +00002286#else
2287
Andreas Färber9349b4f2012-03-14 01:38:32 +01002288void tlb_flush(CPUArchState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002289{
2290}
2291
Andreas Färber9349b4f2012-03-14 01:38:32 +01002292void tlb_flush_page(CPUArchState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002293{
2294}
2295
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002296/*
2297 * Walks guest process memory "regions" one by one
2298 * and calls callback function 'fn' for each region.
2299 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002300
2301struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002302{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002303 walk_memory_regions_fn fn;
2304 void *priv;
2305 unsigned long start;
2306 int prot;
2307};
bellard9fa3e852004-01-04 18:06:42 +00002308
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002309static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002310 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002311{
2312 if (data->start != -1ul) {
2313 int rc = data->fn(data->priv, data->start, end, data->prot);
2314 if (rc != 0) {
2315 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002316 }
bellard33417e72003-08-10 21:47:01 +00002317 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002318
2319 data->start = (new_prot ? end : -1ul);
2320 data->prot = new_prot;
2321
2322 return 0;
2323}
2324
2325static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002326 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002327{
Paul Brookb480d9b2010-03-12 23:23:29 +00002328 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002329 int i, rc;
2330
2331 if (*lp == NULL) {
2332 return walk_memory_regions_end(data, base, 0);
2333 }
2334
2335 if (level == 0) {
2336 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002337 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002338 int prot = pd[i].flags;
2339
2340 pa = base | (i << TARGET_PAGE_BITS);
2341 if (prot != data->prot) {
2342 rc = walk_memory_regions_end(data, pa, prot);
2343 if (rc != 0) {
2344 return rc;
2345 }
2346 }
2347 }
2348 } else {
2349 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002350 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002351 pa = base | ((abi_ulong)i <<
2352 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002353 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2354 if (rc != 0) {
2355 return rc;
2356 }
2357 }
2358 }
2359
2360 return 0;
2361}
2362
2363int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2364{
2365 struct walk_memory_regions_data data;
2366 unsigned long i;
2367
2368 data.fn = fn;
2369 data.priv = priv;
2370 data.start = -1ul;
2371 data.prot = 0;
2372
2373 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002374 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002375 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2376 if (rc != 0) {
2377 return rc;
2378 }
2379 }
2380
2381 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002382}
2383
Paul Brookb480d9b2010-03-12 23:23:29 +00002384static int dump_region(void *priv, abi_ulong start,
2385 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002386{
2387 FILE *f = (FILE *)priv;
2388
Paul Brookb480d9b2010-03-12 23:23:29 +00002389 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2390 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002391 start, end, end - start,
2392 ((prot & PAGE_READ) ? 'r' : '-'),
2393 ((prot & PAGE_WRITE) ? 'w' : '-'),
2394 ((prot & PAGE_EXEC) ? 'x' : '-'));
2395
2396 return (0);
2397}
2398
2399/* dump memory mappings */
2400void page_dump(FILE *f)
2401{
2402 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2403 "start", "end", "size", "prot");
2404 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002405}
2406
pbrook53a59602006-03-25 19:31:22 +00002407int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002408{
bellard9fa3e852004-01-04 18:06:42 +00002409 PageDesc *p;
2410
2411 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002412 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002413 return 0;
2414 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002415}
2416
Richard Henderson376a7902010-03-10 15:57:04 -08002417/* Modify the flags of a page and invalidate the code if necessary.
2418 The flag PAGE_WRITE_ORG is positioned automatically depending
2419 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002420void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002421{
Richard Henderson376a7902010-03-10 15:57:04 -08002422 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002423
Richard Henderson376a7902010-03-10 15:57:04 -08002424 /* This function should never be called with addresses outside the
2425 guest address space. If this assert fires, it probably indicates
2426 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002427#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2428 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002429#endif
2430 assert(start < end);
2431
bellard9fa3e852004-01-04 18:06:42 +00002432 start = start & TARGET_PAGE_MASK;
2433 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002434
2435 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002436 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002437 }
2438
2439 for (addr = start, len = end - start;
2440 len != 0;
2441 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2442 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2443
2444 /* If the write protection bit is set, then we invalidate
2445 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002446 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002447 (flags & PAGE_WRITE) &&
2448 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002449 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002450 }
2451 p->flags = flags;
2452 }
bellard9fa3e852004-01-04 18:06:42 +00002453}
2454
ths3d97b402007-11-02 19:02:07 +00002455int page_check_range(target_ulong start, target_ulong len, int flags)
2456{
2457 PageDesc *p;
2458 target_ulong end;
2459 target_ulong addr;
2460
Richard Henderson376a7902010-03-10 15:57:04 -08002461 /* This function should never be called with addresses outside the
2462 guest address space. If this assert fires, it probably indicates
2463 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002464#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2465 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002466#endif
2467
Richard Henderson3e0650a2010-03-29 10:54:42 -07002468 if (len == 0) {
2469 return 0;
2470 }
Richard Henderson376a7902010-03-10 15:57:04 -08002471 if (start + len - 1 < start) {
2472 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002473 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002474 }
balrog55f280c2008-10-28 10:24:11 +00002475
ths3d97b402007-11-02 19:02:07 +00002476 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2477 start = start & TARGET_PAGE_MASK;
2478
Richard Henderson376a7902010-03-10 15:57:04 -08002479 for (addr = start, len = end - start;
2480 len != 0;
2481 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002482 p = page_find(addr >> TARGET_PAGE_BITS);
2483 if( !p )
2484 return -1;
2485 if( !(p->flags & PAGE_VALID) )
2486 return -1;
2487
bellarddae32702007-11-14 10:51:00 +00002488 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002489 return -1;
bellarddae32702007-11-14 10:51:00 +00002490 if (flags & PAGE_WRITE) {
2491 if (!(p->flags & PAGE_WRITE_ORG))
2492 return -1;
2493 /* unprotect the page if it was put read-only because it
2494 contains translated code */
2495 if (!(p->flags & PAGE_WRITE)) {
2496 if (!page_unprotect(addr, 0, NULL))
2497 return -1;
2498 }
2499 return 0;
2500 }
ths3d97b402007-11-02 19:02:07 +00002501 }
2502 return 0;
2503}
2504
bellard9fa3e852004-01-04 18:06:42 +00002505/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002506 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002507int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002508{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002509 unsigned int prot;
2510 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002511 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002512
pbrookc8a706f2008-06-02 16:16:42 +00002513 /* Technically this isn't safe inside a signal handler. However we
2514 know this only ever happens in a synchronous SEGV handler, so in
2515 practice it seems to be ok. */
2516 mmap_lock();
2517
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002518 p = page_find(address >> TARGET_PAGE_BITS);
2519 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002520 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002521 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002522 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002523
bellard9fa3e852004-01-04 18:06:42 +00002524 /* if the page was really writable, then we change its
2525 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002526 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2527 host_start = address & qemu_host_page_mask;
2528 host_end = host_start + qemu_host_page_size;
2529
2530 prot = 0;
2531 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2532 p = page_find(addr >> TARGET_PAGE_BITS);
2533 p->flags |= PAGE_WRITE;
2534 prot |= p->flags;
2535
bellard9fa3e852004-01-04 18:06:42 +00002536 /* and since the content will be modified, we must invalidate
2537 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002538 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002539#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002540 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002541#endif
bellard9fa3e852004-01-04 18:06:42 +00002542 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002543 mprotect((void *)g2h(host_start), qemu_host_page_size,
2544 prot & PAGE_BITS);
2545
2546 mmap_unlock();
2547 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002548 }
pbrookc8a706f2008-06-02 16:16:42 +00002549 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002550 return 0;
2551}
2552
Andreas Färber9349b4f2012-03-14 01:38:32 +01002553static inline void tlb_set_dirty(CPUArchState *env,
bellard6a00d602005-11-21 23:25:50 +00002554 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002555{
2556}
bellard9fa3e852004-01-04 18:06:42 +00002557#endif /* defined(CONFIG_USER_ONLY) */
2558
pbrooke2eef172008-06-08 01:09:01 +00002559#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002560
Paul Brookc04b2b72010-03-01 03:31:14 +00002561#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2562typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002563 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002564 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002565 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002566} subpage_t;
2567
Anthony Liguoric227f092009-10-01 16:12:16 -05002568static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002569 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002570static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002571static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002572{
Avi Kivity5312bd82012-02-12 18:32:55 +02002573 MemoryRegionSection *section = &phys_sections[section_index];
2574 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002575
2576 if (mr->subpage) {
2577 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2578 memory_region_destroy(&subpage->iomem);
2579 g_free(subpage);
2580 }
2581}
2582
Avi Kivity4346ae32012-02-10 17:00:01 +02002583static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002584{
2585 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002586 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002587
Avi Kivityc19e8802012-02-13 20:25:31 +02002588 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002589 return;
2590 }
2591
Avi Kivityc19e8802012-02-13 20:25:31 +02002592 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002593 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002594 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002595 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002596 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002597 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002598 }
Avi Kivity54688b12012-02-09 17:34:32 +02002599 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002600 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002601 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002602}
2603
2604static void destroy_all_mappings(void)
2605{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002606 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002607 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002608}
2609
Avi Kivity5312bd82012-02-12 18:32:55 +02002610static uint16_t phys_section_add(MemoryRegionSection *section)
2611{
2612 if (phys_sections_nb == phys_sections_nb_alloc) {
2613 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2614 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2615 phys_sections_nb_alloc);
2616 }
2617 phys_sections[phys_sections_nb] = *section;
2618 return phys_sections_nb++;
2619}
2620
2621static void phys_sections_clear(void)
2622{
2623 phys_sections_nb = 0;
2624}
2625
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002626/* register physical memory.
2627 For RAM, 'size' must be a multiple of the target page size.
2628 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002629 io memory page. The address used when calling the IO function is
2630 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002631 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002632 before calculating this offset. This should not be a problem unless
2633 the low bits of start_addr and region_offset differ. */
Avi Kivity0f0cb162012-02-13 17:14:32 +02002634static void register_subpage(MemoryRegionSection *section)
2635{
2636 subpage_t *subpage;
2637 target_phys_addr_t base = section->offset_within_address_space
2638 & TARGET_PAGE_MASK;
Avi Kivityf3705d52012-03-08 16:16:34 +02002639 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002640 MemoryRegionSection subsection = {
2641 .offset_within_address_space = base,
2642 .size = TARGET_PAGE_SIZE,
2643 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002644 target_phys_addr_t start, end;
2645
Avi Kivityf3705d52012-03-08 16:16:34 +02002646 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002647
Avi Kivityf3705d52012-03-08 16:16:34 +02002648 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002649 subpage = subpage_init(base);
2650 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002651 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2652 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002653 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002654 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002655 }
2656 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2657 end = start + section->size;
2658 subpage_register(subpage, start, end, phys_section_add(section));
2659}
2660
2661
2662static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002663{
Avi Kivitydd811242012-01-02 12:17:03 +02002664 target_phys_addr_t start_addr = section->offset_within_address_space;
2665 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002666 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002667 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002668
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002669 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002670
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002671 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002672 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2673 section_index);
bellard33417e72003-08-10 21:47:01 +00002674}
2675
Avi Kivity0f0cb162012-02-13 17:14:32 +02002676void cpu_register_physical_memory_log(MemoryRegionSection *section,
2677 bool readonly)
2678{
2679 MemoryRegionSection now = *section, remain = *section;
2680
2681 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2682 || (now.size < TARGET_PAGE_SIZE)) {
2683 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2684 - now.offset_within_address_space,
2685 now.size);
2686 register_subpage(&now);
2687 remain.size -= now.size;
2688 remain.offset_within_address_space += now.size;
2689 remain.offset_within_region += now.size;
2690 }
2691 now = remain;
2692 now.size &= TARGET_PAGE_MASK;
2693 if (now.size) {
2694 register_multipage(&now);
2695 remain.size -= now.size;
2696 remain.offset_within_address_space += now.size;
2697 remain.offset_within_region += now.size;
2698 }
2699 now = remain;
2700 if (now.size) {
2701 register_subpage(&now);
2702 }
2703}
2704
2705
Anthony Liguoric227f092009-10-01 16:12:16 -05002706void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002707{
2708 if (kvm_enabled())
2709 kvm_coalesce_mmio_region(addr, size);
2710}
2711
Anthony Liguoric227f092009-10-01 16:12:16 -05002712void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002713{
2714 if (kvm_enabled())
2715 kvm_uncoalesce_mmio_region(addr, size);
2716}
2717
Sheng Yang62a27442010-01-26 19:21:16 +08002718void qemu_flush_coalesced_mmio_buffer(void)
2719{
2720 if (kvm_enabled())
2721 kvm_flush_coalesced_mmio_buffer();
2722}
2723
Marcelo Tosattic9027602010-03-01 20:25:08 -03002724#if defined(__linux__) && !defined(TARGET_S390X)
2725
2726#include <sys/vfs.h>
2727
2728#define HUGETLBFS_MAGIC 0x958458f6
2729
2730static long gethugepagesize(const char *path)
2731{
2732 struct statfs fs;
2733 int ret;
2734
2735 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002736 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002737 } while (ret != 0 && errno == EINTR);
2738
2739 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002740 perror(path);
2741 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002742 }
2743
2744 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002745 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002746
2747 return fs.f_bsize;
2748}
2749
Alex Williamson04b16652010-07-02 11:13:17 -06002750static void *file_ram_alloc(RAMBlock *block,
2751 ram_addr_t memory,
2752 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002753{
2754 char *filename;
2755 void *area;
2756 int fd;
2757#ifdef MAP_POPULATE
2758 int flags;
2759#endif
2760 unsigned long hpagesize;
2761
2762 hpagesize = gethugepagesize(path);
2763 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002764 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002765 }
2766
2767 if (memory < hpagesize) {
2768 return NULL;
2769 }
2770
2771 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2772 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2773 return NULL;
2774 }
2775
2776 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002777 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002778 }
2779
2780 fd = mkstemp(filename);
2781 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002782 perror("unable to create backing store for hugepages");
2783 free(filename);
2784 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002785 }
2786 unlink(filename);
2787 free(filename);
2788
2789 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2790
2791 /*
2792 * ftruncate is not supported by hugetlbfs in older
2793 * hosts, so don't bother bailing out on errors.
2794 * If anything goes wrong with it under other filesystems,
2795 * mmap will fail.
2796 */
2797 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002798 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002799
2800#ifdef MAP_POPULATE
2801 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2802 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2803 * to sidestep this quirk.
2804 */
2805 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2806 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2807#else
2808 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2809#endif
2810 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002811 perror("file_ram_alloc: can't mmap RAM pages");
2812 close(fd);
2813 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002814 }
Alex Williamson04b16652010-07-02 11:13:17 -06002815 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002816 return area;
2817}
2818#endif
2819
Alex Williamsond17b5282010-06-25 11:08:38 -06002820static ram_addr_t find_ram_offset(ram_addr_t size)
2821{
Alex Williamson04b16652010-07-02 11:13:17 -06002822 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002823 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002824
2825 if (QLIST_EMPTY(&ram_list.blocks))
2826 return 0;
2827
2828 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002829 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002830
2831 end = block->offset + block->length;
2832
2833 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2834 if (next_block->offset >= end) {
2835 next = MIN(next, next_block->offset);
2836 }
2837 }
2838 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002839 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002840 mingap = next - end;
2841 }
2842 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002843
2844 if (offset == RAM_ADDR_MAX) {
2845 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2846 (uint64_t)size);
2847 abort();
2848 }
2849
Alex Williamson04b16652010-07-02 11:13:17 -06002850 return offset;
2851}
2852
2853static ram_addr_t last_ram_offset(void)
2854{
Alex Williamsond17b5282010-06-25 11:08:38 -06002855 RAMBlock *block;
2856 ram_addr_t last = 0;
2857
2858 QLIST_FOREACH(block, &ram_list.blocks, next)
2859 last = MAX(last, block->offset + block->length);
2860
2861 return last;
2862}
2863
Avi Kivityc5705a72011-12-20 15:59:12 +02002864void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002865{
2866 RAMBlock *new_block, *block;
2867
Avi Kivityc5705a72011-12-20 15:59:12 +02002868 new_block = NULL;
2869 QLIST_FOREACH(block, &ram_list.blocks, next) {
2870 if (block->offset == addr) {
2871 new_block = block;
2872 break;
2873 }
2874 }
2875 assert(new_block);
2876 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002877
2878 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2879 char *id = dev->parent_bus->info->get_dev_path(dev);
2880 if (id) {
2881 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002882 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002883 }
2884 }
2885 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2886
2887 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002888 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002889 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2890 new_block->idstr);
2891 abort();
2892 }
2893 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002894}
2895
2896ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2897 MemoryRegion *mr)
2898{
2899 RAMBlock *new_block;
2900
2901 size = TARGET_PAGE_ALIGN(size);
2902 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002903
Avi Kivity7c637362011-12-21 13:09:49 +02002904 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002905 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002906 if (host) {
2907 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002908 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002909 } else {
2910 if (mem_path) {
2911#if defined (__linux__) && !defined(TARGET_S390X)
2912 new_block->host = file_ram_alloc(new_block, size, mem_path);
2913 if (!new_block->host) {
2914 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002915 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002916 }
2917#else
2918 fprintf(stderr, "-mem-path option unsupported\n");
2919 exit(1);
2920#endif
2921 } else {
2922#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002923 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2924 an system defined value, which is at least 256GB. Larger systems
2925 have larger values. We put the guest between the end of data
2926 segment (system break) and this value. We use 32GB as a base to
2927 have enough room for the system break to grow. */
2928 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002929 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002930 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002931 if (new_block->host == MAP_FAILED) {
2932 fprintf(stderr, "Allocating RAM failed\n");
2933 abort();
2934 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002935#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002936 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002937 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002938 } else {
2939 new_block->host = qemu_vmalloc(size);
2940 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002941#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002942 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002943 }
2944 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002945 new_block->length = size;
2946
2947 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2948
Anthony Liguori7267c092011-08-20 22:09:37 -05002949 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002950 last_ram_offset() >> TARGET_PAGE_BITS);
2951 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2952 0xff, size >> TARGET_PAGE_BITS);
2953
2954 if (kvm_enabled())
2955 kvm_setup_guest_memory(new_block->host, size);
2956
2957 return new_block->offset;
2958}
2959
Avi Kivityc5705a72011-12-20 15:59:12 +02002960ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002961{
Avi Kivityc5705a72011-12-20 15:59:12 +02002962 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002963}
bellarde9a1ab12007-02-08 23:08:38 +00002964
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002965void qemu_ram_free_from_ptr(ram_addr_t addr)
2966{
2967 RAMBlock *block;
2968
2969 QLIST_FOREACH(block, &ram_list.blocks, next) {
2970 if (addr == block->offset) {
2971 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002972 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002973 return;
2974 }
2975 }
2976}
2977
Anthony Liguoric227f092009-10-01 16:12:16 -05002978void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002979{
Alex Williamson04b16652010-07-02 11:13:17 -06002980 RAMBlock *block;
2981
2982 QLIST_FOREACH(block, &ram_list.blocks, next) {
2983 if (addr == block->offset) {
2984 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002985 if (block->flags & RAM_PREALLOC_MASK) {
2986 ;
2987 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002988#if defined (__linux__) && !defined(TARGET_S390X)
2989 if (block->fd) {
2990 munmap(block->host, block->length);
2991 close(block->fd);
2992 } else {
2993 qemu_vfree(block->host);
2994 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002995#else
2996 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002997#endif
2998 } else {
2999#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3000 munmap(block->host, block->length);
3001#else
Jan Kiszka868bb332011-06-21 22:59:09 +02003002 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003003 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01003004 } else {
3005 qemu_vfree(block->host);
3006 }
Alex Williamson04b16652010-07-02 11:13:17 -06003007#endif
3008 }
Anthony Liguori7267c092011-08-20 22:09:37 -05003009 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06003010 return;
3011 }
3012 }
3013
bellarde9a1ab12007-02-08 23:08:38 +00003014}
3015
Huang Yingcd19cfa2011-03-02 08:56:19 +01003016#ifndef _WIN32
3017void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3018{
3019 RAMBlock *block;
3020 ram_addr_t offset;
3021 int flags;
3022 void *area, *vaddr;
3023
3024 QLIST_FOREACH(block, &ram_list.blocks, next) {
3025 offset = addr - block->offset;
3026 if (offset < block->length) {
3027 vaddr = block->host + offset;
3028 if (block->flags & RAM_PREALLOC_MASK) {
3029 ;
3030 } else {
3031 flags = MAP_FIXED;
3032 munmap(vaddr, length);
3033 if (mem_path) {
3034#if defined(__linux__) && !defined(TARGET_S390X)
3035 if (block->fd) {
3036#ifdef MAP_POPULATE
3037 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3038 MAP_PRIVATE;
3039#else
3040 flags |= MAP_PRIVATE;
3041#endif
3042 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3043 flags, block->fd, offset);
3044 } else {
3045 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3046 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3047 flags, -1, 0);
3048 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003049#else
3050 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003051#endif
3052 } else {
3053#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3054 flags |= MAP_SHARED | MAP_ANONYMOUS;
3055 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3056 flags, -1, 0);
3057#else
3058 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3059 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3060 flags, -1, 0);
3061#endif
3062 }
3063 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003064 fprintf(stderr, "Could not remap addr: "
3065 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003066 length, addr);
3067 exit(1);
3068 }
3069 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3070 }
3071 return;
3072 }
3073 }
3074}
3075#endif /* !_WIN32 */
3076
pbrookdc828ca2009-04-09 22:21:07 +00003077/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003078 With the exception of the softmmu code in this file, this should
3079 only be used for local memory (e.g. video ram) that the device owns,
3080 and knows it isn't going to access beyond the end of the block.
3081
3082 It should not be used for general purpose DMA.
3083 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3084 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003085void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003086{
pbrook94a6b542009-04-11 17:15:54 +00003087 RAMBlock *block;
3088
Alex Williamsonf471a172010-06-11 11:11:42 -06003089 QLIST_FOREACH(block, &ram_list.blocks, next) {
3090 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003091 /* Move this entry to to start of the list. */
3092 if (block != QLIST_FIRST(&ram_list.blocks)) {
3093 QLIST_REMOVE(block, next);
3094 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3095 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003096 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003097 /* We need to check if the requested address is in the RAM
3098 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003099 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003100 */
3101 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003102 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003103 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003104 block->host =
3105 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003106 }
3107 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003108 return block->host + (addr - block->offset);
3109 }
pbrook94a6b542009-04-11 17:15:54 +00003110 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003111
3112 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3113 abort();
3114
3115 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003116}
3117
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003118/* Return a host pointer to ram allocated with qemu_ram_alloc.
3119 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3120 */
3121void *qemu_safe_ram_ptr(ram_addr_t addr)
3122{
3123 RAMBlock *block;
3124
3125 QLIST_FOREACH(block, &ram_list.blocks, next) {
3126 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003127 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003128 /* We need to check if the requested address is in the RAM
3129 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003130 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003131 */
3132 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003133 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003134 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003135 block->host =
3136 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003137 }
3138 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003139 return block->host + (addr - block->offset);
3140 }
3141 }
3142
3143 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3144 abort();
3145
3146 return NULL;
3147}
3148
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003149/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3150 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003151void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003152{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003153 if (*size == 0) {
3154 return NULL;
3155 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003156 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003157 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003158 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003159 RAMBlock *block;
3160
3161 QLIST_FOREACH(block, &ram_list.blocks, next) {
3162 if (addr - block->offset < block->length) {
3163 if (addr - block->offset + *size > block->length)
3164 *size = block->length - addr + block->offset;
3165 return block->host + (addr - block->offset);
3166 }
3167 }
3168
3169 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3170 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003171 }
3172}
3173
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003174void qemu_put_ram_ptr(void *addr)
3175{
3176 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003177}
3178
Marcelo Tosattie8902612010-10-11 15:31:19 -03003179int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003180{
pbrook94a6b542009-04-11 17:15:54 +00003181 RAMBlock *block;
3182 uint8_t *host = ptr;
3183
Jan Kiszka868bb332011-06-21 22:59:09 +02003184 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003185 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003186 return 0;
3187 }
3188
Alex Williamsonf471a172010-06-11 11:11:42 -06003189 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003190 /* This case append when the block is not mapped. */
3191 if (block->host == NULL) {
3192 continue;
3193 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003194 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003195 *ram_addr = block->offset + (host - block->host);
3196 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003197 }
pbrook94a6b542009-04-11 17:15:54 +00003198 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003199
Marcelo Tosattie8902612010-10-11 15:31:19 -03003200 return -1;
3201}
Alex Williamsonf471a172010-06-11 11:11:42 -06003202
Marcelo Tosattie8902612010-10-11 15:31:19 -03003203/* Some of the softmmu routines need to translate from a host pointer
3204 (typically a TLB entry) back to a ram offset. */
3205ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3206{
3207 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003208
Marcelo Tosattie8902612010-10-11 15:31:19 -03003209 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3210 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3211 abort();
3212 }
3213 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003214}
3215
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003216static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3217 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003218{
pbrook67d3b952006-12-18 05:03:52 +00003219#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003220 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003221#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003222#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003223 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003224#endif
3225 return 0;
3226}
3227
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003228static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3229 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003230{
3231#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003232 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003233#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003234#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003235 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003236#endif
3237}
3238
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003239static const MemoryRegionOps unassigned_mem_ops = {
3240 .read = unassigned_mem_read,
3241 .write = unassigned_mem_write,
3242 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003243};
3244
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003245static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3246 unsigned size)
3247{
3248 abort();
3249}
3250
3251static void error_mem_write(void *opaque, target_phys_addr_t addr,
3252 uint64_t value, unsigned size)
3253{
3254 abort();
3255}
3256
3257static const MemoryRegionOps error_mem_ops = {
3258 .read = error_mem_read,
3259 .write = error_mem_write,
3260 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003261};
3262
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003263static const MemoryRegionOps rom_mem_ops = {
3264 .read = error_mem_read,
3265 .write = unassigned_mem_write,
3266 .endianness = DEVICE_NATIVE_ENDIAN,
3267};
3268
3269static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3270 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003271{
bellard3a7d9292005-08-21 09:26:42 +00003272 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003273 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003274 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3275#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003276 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003277 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003278#endif
3279 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003280 switch (size) {
3281 case 1:
3282 stb_p(qemu_get_ram_ptr(ram_addr), val);
3283 break;
3284 case 2:
3285 stw_p(qemu_get_ram_ptr(ram_addr), val);
3286 break;
3287 case 4:
3288 stl_p(qemu_get_ram_ptr(ram_addr), val);
3289 break;
3290 default:
3291 abort();
3292 }
bellardf23db162005-08-21 19:12:28 +00003293 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003294 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003295 /* we remove the notdirty callback only if the code has been
3296 flushed */
3297 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003298 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003299}
3300
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003301static const MemoryRegionOps notdirty_mem_ops = {
3302 .read = error_mem_read,
3303 .write = notdirty_mem_write,
3304 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003305};
3306
pbrook0f459d12008-06-09 00:20:13 +00003307/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003308static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003309{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003310 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003311 target_ulong pc, cs_base;
3312 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003313 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003314 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003315 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003316
aliguori06d55cc2008-11-18 20:24:06 +00003317 if (env->watchpoint_hit) {
3318 /* We re-entered the check after replacing the TB. Now raise
3319 * the debug interrupt so that is will trigger after the
3320 * current instruction. */
3321 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3322 return;
3323 }
pbrook2e70f6e2008-06-29 01:03:05 +00003324 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003325 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003326 if ((vaddr == (wp->vaddr & len_mask) ||
3327 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003328 wp->flags |= BP_WATCHPOINT_HIT;
3329 if (!env->watchpoint_hit) {
3330 env->watchpoint_hit = wp;
3331 tb = tb_find_pc(env->mem_io_pc);
3332 if (!tb) {
3333 cpu_abort(env, "check_watchpoint: could not find TB for "
3334 "pc=%p", (void *)env->mem_io_pc);
3335 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003336 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003337 tb_phys_invalidate(tb, -1);
3338 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3339 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04003340 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00003341 } else {
3342 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3343 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04003344 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00003345 }
aliguori06d55cc2008-11-18 20:24:06 +00003346 }
aliguori6e140f22008-11-18 20:37:55 +00003347 } else {
3348 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003349 }
3350 }
3351}
3352
pbrook6658ffb2007-03-16 23:58:11 +00003353/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3354 so these check for a hit then pass through to the normal out-of-line
3355 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003356static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3357 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003358{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003359 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3360 switch (size) {
3361 case 1: return ldub_phys(addr);
3362 case 2: return lduw_phys(addr);
3363 case 4: return ldl_phys(addr);
3364 default: abort();
3365 }
pbrook6658ffb2007-03-16 23:58:11 +00003366}
3367
Avi Kivity1ec9b902012-01-02 12:47:48 +02003368static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3369 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003370{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003371 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3372 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003373 case 1:
3374 stb_phys(addr, val);
3375 break;
3376 case 2:
3377 stw_phys(addr, val);
3378 break;
3379 case 4:
3380 stl_phys(addr, val);
3381 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003382 default: abort();
3383 }
pbrook6658ffb2007-03-16 23:58:11 +00003384}
3385
Avi Kivity1ec9b902012-01-02 12:47:48 +02003386static const MemoryRegionOps watch_mem_ops = {
3387 .read = watch_mem_read,
3388 .write = watch_mem_write,
3389 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003390};
pbrook6658ffb2007-03-16 23:58:11 +00003391
Avi Kivity70c68e42012-01-02 12:32:48 +02003392static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3393 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003394{
Avi Kivity70c68e42012-01-02 12:32:48 +02003395 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003396 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003397 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003398#if defined(DEBUG_SUBPAGE)
3399 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3400 mmio, len, addr, idx);
3401#endif
blueswir1db7b5422007-05-26 17:36:03 +00003402
Avi Kivity5312bd82012-02-12 18:32:55 +02003403 section = &phys_sections[mmio->sub_section[idx]];
3404 addr += mmio->base;
3405 addr -= section->offset_within_address_space;
3406 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003407 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003408}
3409
Avi Kivity70c68e42012-01-02 12:32:48 +02003410static void subpage_write(void *opaque, target_phys_addr_t addr,
3411 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003412{
Avi Kivity70c68e42012-01-02 12:32:48 +02003413 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003414 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003415 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003416#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003417 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3418 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003419 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003420#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003421
Avi Kivity5312bd82012-02-12 18:32:55 +02003422 section = &phys_sections[mmio->sub_section[idx]];
3423 addr += mmio->base;
3424 addr -= section->offset_within_address_space;
3425 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003426 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003427}
3428
Avi Kivity70c68e42012-01-02 12:32:48 +02003429static const MemoryRegionOps subpage_ops = {
3430 .read = subpage_read,
3431 .write = subpage_write,
3432 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003433};
3434
Avi Kivityde712f92012-01-02 12:41:07 +02003435static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3436 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003437{
3438 ram_addr_t raddr = addr;
3439 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003440 switch (size) {
3441 case 1: return ldub_p(ptr);
3442 case 2: return lduw_p(ptr);
3443 case 4: return ldl_p(ptr);
3444 default: abort();
3445 }
Andreas Färber56384e82011-11-30 16:26:21 +01003446}
3447
Avi Kivityde712f92012-01-02 12:41:07 +02003448static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3449 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003450{
3451 ram_addr_t raddr = addr;
3452 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003453 switch (size) {
3454 case 1: return stb_p(ptr, value);
3455 case 2: return stw_p(ptr, value);
3456 case 4: return stl_p(ptr, value);
3457 default: abort();
3458 }
Andreas Färber56384e82011-11-30 16:26:21 +01003459}
3460
Avi Kivityde712f92012-01-02 12:41:07 +02003461static const MemoryRegionOps subpage_ram_ops = {
3462 .read = subpage_ram_read,
3463 .write = subpage_ram_write,
3464 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003465};
3466
Anthony Liguoric227f092009-10-01 16:12:16 -05003467static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003468 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003469{
3470 int idx, eidx;
3471
3472 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3473 return -1;
3474 idx = SUBPAGE_IDX(start);
3475 eidx = SUBPAGE_IDX(end);
3476#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003477 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003478 mmio, start, end, idx, eidx, memory);
3479#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003480 if (memory_region_is_ram(phys_sections[section].mr)) {
3481 MemoryRegionSection new_section = phys_sections[section];
3482 new_section.mr = &io_mem_subpage_ram;
3483 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003484 }
blueswir1db7b5422007-05-26 17:36:03 +00003485 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003486 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003487 }
3488
3489 return 0;
3490}
3491
Avi Kivity0f0cb162012-02-13 17:14:32 +02003492static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003493{
Anthony Liguoric227f092009-10-01 16:12:16 -05003494 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003495
Anthony Liguori7267c092011-08-20 22:09:37 -05003496 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003497
3498 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003499 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3500 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003501 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003502#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003503 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3504 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003505#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003506 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003507
3508 return mmio;
3509}
3510
Avi Kivity5312bd82012-02-12 18:32:55 +02003511static uint16_t dummy_section(MemoryRegion *mr)
3512{
3513 MemoryRegionSection section = {
3514 .mr = mr,
3515 .offset_within_address_space = 0,
3516 .offset_within_region = 0,
3517 .size = UINT64_MAX,
3518 };
3519
3520 return phys_section_add(&section);
3521}
3522
Avi Kivity37ec01d2012-03-08 18:08:35 +02003523MemoryRegion *iotlb_to_region(target_phys_addr_t index)
Avi Kivityaa102232012-03-08 17:06:55 +02003524{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003525 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003526}
3527
Avi Kivitye9179ce2009-06-14 11:38:52 +03003528static void io_mem_init(void)
3529{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003530 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003531 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3532 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3533 "unassigned", UINT64_MAX);
3534 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3535 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003536 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3537 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003538 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3539 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003540}
3541
Avi Kivity50c1e142012-02-08 21:36:02 +02003542static void core_begin(MemoryListener *listener)
3543{
Avi Kivity54688b12012-02-09 17:34:32 +02003544 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003545 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003546 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003547 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003548 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3549 phys_section_rom = dummy_section(&io_mem_rom);
3550 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003551}
3552
3553static void core_commit(MemoryListener *listener)
3554{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003555 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003556
3557 /* since each CPU stores ram addresses in its TLB cache, we must
3558 reset the modified entries */
3559 /* XXX: slow ! */
3560 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3561 tlb_flush(env, 1);
3562 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003563}
3564
Avi Kivity93632742012-02-08 16:54:16 +02003565static void core_region_add(MemoryListener *listener,
3566 MemoryRegionSection *section)
3567{
Avi Kivity4855d412012-02-08 21:16:05 +02003568 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003569}
3570
3571static void core_region_del(MemoryListener *listener,
3572 MemoryRegionSection *section)
3573{
Avi Kivity93632742012-02-08 16:54:16 +02003574}
3575
Avi Kivity50c1e142012-02-08 21:36:02 +02003576static void core_region_nop(MemoryListener *listener,
3577 MemoryRegionSection *section)
3578{
Avi Kivity54688b12012-02-09 17:34:32 +02003579 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003580}
3581
Avi Kivity93632742012-02-08 16:54:16 +02003582static void core_log_start(MemoryListener *listener,
3583 MemoryRegionSection *section)
3584{
3585}
3586
3587static void core_log_stop(MemoryListener *listener,
3588 MemoryRegionSection *section)
3589{
3590}
3591
3592static void core_log_sync(MemoryListener *listener,
3593 MemoryRegionSection *section)
3594{
3595}
3596
3597static void core_log_global_start(MemoryListener *listener)
3598{
3599 cpu_physical_memory_set_dirty_tracking(1);
3600}
3601
3602static void core_log_global_stop(MemoryListener *listener)
3603{
3604 cpu_physical_memory_set_dirty_tracking(0);
3605}
3606
3607static void core_eventfd_add(MemoryListener *listener,
3608 MemoryRegionSection *section,
3609 bool match_data, uint64_t data, int fd)
3610{
3611}
3612
3613static void core_eventfd_del(MemoryListener *listener,
3614 MemoryRegionSection *section,
3615 bool match_data, uint64_t data, int fd)
3616{
3617}
3618
Avi Kivity50c1e142012-02-08 21:36:02 +02003619static void io_begin(MemoryListener *listener)
3620{
3621}
3622
3623static void io_commit(MemoryListener *listener)
3624{
3625}
3626
Avi Kivity4855d412012-02-08 21:16:05 +02003627static void io_region_add(MemoryListener *listener,
3628 MemoryRegionSection *section)
3629{
Avi Kivitya2d33522012-03-05 17:40:12 +02003630 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3631
3632 mrio->mr = section->mr;
3633 mrio->offset = section->offset_within_region;
3634 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003635 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003636 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003637}
3638
3639static void io_region_del(MemoryListener *listener,
3640 MemoryRegionSection *section)
3641{
3642 isa_unassign_ioport(section->offset_within_address_space, section->size);
3643}
3644
Avi Kivity50c1e142012-02-08 21:36:02 +02003645static void io_region_nop(MemoryListener *listener,
3646 MemoryRegionSection *section)
3647{
3648}
3649
Avi Kivity4855d412012-02-08 21:16:05 +02003650static void io_log_start(MemoryListener *listener,
3651 MemoryRegionSection *section)
3652{
3653}
3654
3655static void io_log_stop(MemoryListener *listener,
3656 MemoryRegionSection *section)
3657{
3658}
3659
3660static void io_log_sync(MemoryListener *listener,
3661 MemoryRegionSection *section)
3662{
3663}
3664
3665static void io_log_global_start(MemoryListener *listener)
3666{
3667}
3668
3669static void io_log_global_stop(MemoryListener *listener)
3670{
3671}
3672
3673static void io_eventfd_add(MemoryListener *listener,
3674 MemoryRegionSection *section,
3675 bool match_data, uint64_t data, int fd)
3676{
3677}
3678
3679static void io_eventfd_del(MemoryListener *listener,
3680 MemoryRegionSection *section,
3681 bool match_data, uint64_t data, int fd)
3682{
3683}
3684
Avi Kivity93632742012-02-08 16:54:16 +02003685static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003686 .begin = core_begin,
3687 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003688 .region_add = core_region_add,
3689 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003690 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003691 .log_start = core_log_start,
3692 .log_stop = core_log_stop,
3693 .log_sync = core_log_sync,
3694 .log_global_start = core_log_global_start,
3695 .log_global_stop = core_log_global_stop,
3696 .eventfd_add = core_eventfd_add,
3697 .eventfd_del = core_eventfd_del,
3698 .priority = 0,
3699};
3700
Avi Kivity4855d412012-02-08 21:16:05 +02003701static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003702 .begin = io_begin,
3703 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003704 .region_add = io_region_add,
3705 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003706 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003707 .log_start = io_log_start,
3708 .log_stop = io_log_stop,
3709 .log_sync = io_log_sync,
3710 .log_global_start = io_log_global_start,
3711 .log_global_stop = io_log_global_stop,
3712 .eventfd_add = io_eventfd_add,
3713 .eventfd_del = io_eventfd_del,
3714 .priority = 0,
3715};
3716
Avi Kivity62152b82011-07-26 14:26:14 +03003717static void memory_map_init(void)
3718{
Anthony Liguori7267c092011-08-20 22:09:37 -05003719 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003720 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003721 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003722
Anthony Liguori7267c092011-08-20 22:09:37 -05003723 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003724 memory_region_init(system_io, "io", 65536);
3725 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003726
Avi Kivity4855d412012-02-08 21:16:05 +02003727 memory_listener_register(&core_memory_listener, system_memory);
3728 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003729}
3730
3731MemoryRegion *get_system_memory(void)
3732{
3733 return system_memory;
3734}
3735
Avi Kivity309cb472011-08-08 16:09:03 +03003736MemoryRegion *get_system_io(void)
3737{
3738 return system_io;
3739}
3740
pbrooke2eef172008-06-08 01:09:01 +00003741#endif /* !defined(CONFIG_USER_ONLY) */
3742
bellard13eb76e2004-01-24 15:23:36 +00003743/* physical memory access (slow version, mainly for debug) */
3744#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003745int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003746 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003747{
3748 int l, flags;
3749 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003750 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003751
3752 while (len > 0) {
3753 page = addr & TARGET_PAGE_MASK;
3754 l = (page + TARGET_PAGE_SIZE) - addr;
3755 if (l > len)
3756 l = len;
3757 flags = page_get_flags(page);
3758 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003759 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003760 if (is_write) {
3761 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003762 return -1;
bellard579a97f2007-11-11 14:26:47 +00003763 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003764 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003765 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003766 memcpy(p, buf, l);
3767 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003768 } else {
3769 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003770 return -1;
bellard579a97f2007-11-11 14:26:47 +00003771 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003772 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003773 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003774 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003775 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003776 }
3777 len -= l;
3778 buf += l;
3779 addr += l;
3780 }
Paul Brooka68fe892010-03-01 00:08:59 +00003781 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003782}
bellard8df1cd02005-01-28 22:37:22 +00003783
bellard13eb76e2004-01-24 15:23:36 +00003784#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003785void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003786 int len, int is_write)
3787{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003788 int l;
bellard13eb76e2004-01-24 15:23:36 +00003789 uint8_t *ptr;
3790 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003791 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003792 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003793
bellard13eb76e2004-01-24 15:23:36 +00003794 while (len > 0) {
3795 page = addr & TARGET_PAGE_MASK;
3796 l = (page + TARGET_PAGE_SIZE) - addr;
3797 if (l > len)
3798 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003799 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003800
bellard13eb76e2004-01-24 15:23:36 +00003801 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003802 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003803 target_phys_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003804 addr1 = section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003805 /* XXX: could force cpu_single_env to NULL to avoid
3806 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003807 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003808 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003809 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003810 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003811 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003812 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003813 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003814 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003815 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003816 l = 2;
3817 } else {
bellard1c213d12005-09-03 10:49:04 +00003818 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003819 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003820 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003821 l = 1;
3822 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003823 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003824 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003825 addr1 = memory_region_get_ram_addr(section->mr)
3826 + section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003827 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003828 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003829 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003830 if (!cpu_physical_memory_is_dirty(addr1)) {
3831 /* invalidate code */
3832 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3833 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003834 cpu_physical_memory_set_dirty_flags(
3835 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003836 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003837 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003838 }
3839 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003840 if (!is_ram_rom_romd(section)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003841 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003842 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003843 addr1 = section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003844 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003845 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003846 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003847 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003848 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003849 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003850 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003851 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003852 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003853 l = 2;
3854 } else {
bellard1c213d12005-09-03 10:49:04 +00003855 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003856 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003857 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003858 l = 1;
3859 }
3860 } else {
3861 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003862 ptr = qemu_get_ram_ptr(section->mr->ram_addr
3863 + section_addr(section, addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003864 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003865 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003866 }
3867 }
3868 len -= l;
3869 buf += l;
3870 addr += l;
3871 }
3872}
bellard8df1cd02005-01-28 22:37:22 +00003873
bellardd0ecd2a2006-04-23 17:14:48 +00003874/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003875void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003876 const uint8_t *buf, int len)
3877{
3878 int l;
3879 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003880 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003881 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003882
bellardd0ecd2a2006-04-23 17:14:48 +00003883 while (len > 0) {
3884 page = addr & TARGET_PAGE_MASK;
3885 l = (page + TARGET_PAGE_SIZE) - addr;
3886 if (l > len)
3887 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003888 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003889
Avi Kivityf3705d52012-03-08 16:16:34 +02003890 if (!is_ram_rom_romd(section)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003891 /* do nothing */
3892 } else {
3893 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003894 addr1 = memory_region_get_ram_addr(section->mr)
3895 + section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003896 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003897 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003898 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003899 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003900 }
3901 len -= l;
3902 buf += l;
3903 addr += l;
3904 }
3905}
3906
aliguori6d16c2f2009-01-22 16:59:11 +00003907typedef struct {
3908 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003909 target_phys_addr_t addr;
3910 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003911} BounceBuffer;
3912
3913static BounceBuffer bounce;
3914
aliguoriba223c22009-01-22 16:59:16 +00003915typedef struct MapClient {
3916 void *opaque;
3917 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003918 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003919} MapClient;
3920
Blue Swirl72cf2d42009-09-12 07:36:22 +00003921static QLIST_HEAD(map_client_list, MapClient) map_client_list
3922 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003923
3924void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3925{
Anthony Liguori7267c092011-08-20 22:09:37 -05003926 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003927
3928 client->opaque = opaque;
3929 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003930 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003931 return client;
3932}
3933
3934void cpu_unregister_map_client(void *_client)
3935{
3936 MapClient *client = (MapClient *)_client;
3937
Blue Swirl72cf2d42009-09-12 07:36:22 +00003938 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003939 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003940}
3941
3942static void cpu_notify_map_clients(void)
3943{
3944 MapClient *client;
3945
Blue Swirl72cf2d42009-09-12 07:36:22 +00003946 while (!QLIST_EMPTY(&map_client_list)) {
3947 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003948 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003949 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003950 }
3951}
3952
aliguori6d16c2f2009-01-22 16:59:11 +00003953/* Map a physical memory region into a host virtual address.
3954 * May map a subset of the requested range, given by and returned in *plen.
3955 * May return NULL if resources needed to perform the mapping are exhausted.
3956 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003957 * Use cpu_register_map_client() to know when retrying the map operation is
3958 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003959 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003960void *cpu_physical_memory_map(target_phys_addr_t addr,
3961 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003962 int is_write)
3963{
Anthony Liguoric227f092009-10-01 16:12:16 -05003964 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003965 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003966 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003967 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003968 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003969 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003970 ram_addr_t rlen;
3971 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003972
3973 while (len > 0) {
3974 page = addr & TARGET_PAGE_MASK;
3975 l = (page + TARGET_PAGE_SIZE) - addr;
3976 if (l > len)
3977 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003978 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003979
Avi Kivityf3705d52012-03-08 16:16:34 +02003980 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003981 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003982 break;
3983 }
3984 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3985 bounce.addr = addr;
3986 bounce.len = l;
3987 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003988 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003989 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003990
3991 *plen = l;
3992 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003993 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003994 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003995 raddr = memory_region_get_ram_addr(section->mr)
3996 + section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003997 }
aliguori6d16c2f2009-01-22 16:59:11 +00003998
3999 len -= l;
4000 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004001 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004002 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004003 rlen = todo;
4004 ret = qemu_ram_ptr_length(raddr, &rlen);
4005 *plen = rlen;
4006 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004007}
4008
4009/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4010 * Will also mark the memory as dirty if is_write == 1. access_len gives
4011 * the amount of memory that was actually read or written by the caller.
4012 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004013void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4014 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004015{
4016 if (buffer != bounce.buffer) {
4017 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004018 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004019 while (access_len) {
4020 unsigned l;
4021 l = TARGET_PAGE_SIZE;
4022 if (l > access_len)
4023 l = access_len;
4024 if (!cpu_physical_memory_is_dirty(addr1)) {
4025 /* invalidate code */
4026 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4027 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004028 cpu_physical_memory_set_dirty_flags(
4029 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004030 }
4031 addr1 += l;
4032 access_len -= l;
4033 }
4034 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004035 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004036 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004037 }
aliguori6d16c2f2009-01-22 16:59:11 +00004038 return;
4039 }
4040 if (is_write) {
4041 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4042 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004043 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004044 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004045 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004046}
bellardd0ecd2a2006-04-23 17:14:48 +00004047
bellard8df1cd02005-01-28 22:37:22 +00004048/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004049static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4050 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004051{
bellard8df1cd02005-01-28 22:37:22 +00004052 uint8_t *ptr;
4053 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02004054 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004055
Avi Kivity06ef3522012-02-13 16:11:22 +02004056 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004057
Avi Kivityf3705d52012-03-08 16:16:34 +02004058 if (!is_ram_rom_romd(section)) {
bellard8df1cd02005-01-28 22:37:22 +00004059 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004060 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004061 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004062#if defined(TARGET_WORDS_BIGENDIAN)
4063 if (endian == DEVICE_LITTLE_ENDIAN) {
4064 val = bswap32(val);
4065 }
4066#else
4067 if (endian == DEVICE_BIG_ENDIAN) {
4068 val = bswap32(val);
4069 }
4070#endif
bellard8df1cd02005-01-28 22:37:22 +00004071 } else {
4072 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004073 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004074 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004075 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004076 switch (endian) {
4077 case DEVICE_LITTLE_ENDIAN:
4078 val = ldl_le_p(ptr);
4079 break;
4080 case DEVICE_BIG_ENDIAN:
4081 val = ldl_be_p(ptr);
4082 break;
4083 default:
4084 val = ldl_p(ptr);
4085 break;
4086 }
bellard8df1cd02005-01-28 22:37:22 +00004087 }
4088 return val;
4089}
4090
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004091uint32_t ldl_phys(target_phys_addr_t addr)
4092{
4093 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4094}
4095
4096uint32_t ldl_le_phys(target_phys_addr_t addr)
4097{
4098 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4099}
4100
4101uint32_t ldl_be_phys(target_phys_addr_t addr)
4102{
4103 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4104}
4105
bellard84b7b8e2005-11-28 21:19:04 +00004106/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004107static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4108 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004109{
bellard84b7b8e2005-11-28 21:19:04 +00004110 uint8_t *ptr;
4111 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02004112 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00004113
Avi Kivity06ef3522012-02-13 16:11:22 +02004114 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004115
Avi Kivityf3705d52012-03-08 16:16:34 +02004116 if (!is_ram_rom_romd(section)) {
bellard84b7b8e2005-11-28 21:19:04 +00004117 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004118 addr = section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004119
4120 /* XXX This is broken when device endian != cpu endian.
4121 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004122#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02004123 val = io_mem_read(section->mr, addr, 4) << 32;
4124 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004125#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02004126 val = io_mem_read(section->mr, addr, 4);
4127 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004128#endif
4129 } else {
4130 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004131 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004132 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004133 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004134 switch (endian) {
4135 case DEVICE_LITTLE_ENDIAN:
4136 val = ldq_le_p(ptr);
4137 break;
4138 case DEVICE_BIG_ENDIAN:
4139 val = ldq_be_p(ptr);
4140 break;
4141 default:
4142 val = ldq_p(ptr);
4143 break;
4144 }
bellard84b7b8e2005-11-28 21:19:04 +00004145 }
4146 return val;
4147}
4148
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004149uint64_t ldq_phys(target_phys_addr_t addr)
4150{
4151 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4152}
4153
4154uint64_t ldq_le_phys(target_phys_addr_t addr)
4155{
4156 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4157}
4158
4159uint64_t ldq_be_phys(target_phys_addr_t addr)
4160{
4161 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4162}
4163
bellardaab33092005-10-30 20:48:42 +00004164/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004165uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004166{
4167 uint8_t val;
4168 cpu_physical_memory_read(addr, &val, 1);
4169 return val;
4170}
4171
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004172/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004173static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4174 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004175{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004176 uint8_t *ptr;
4177 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02004178 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004179
Avi Kivity06ef3522012-02-13 16:11:22 +02004180 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004181
Avi Kivityf3705d52012-03-08 16:16:34 +02004182 if (!is_ram_rom_romd(section)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004183 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004184 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004185 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004186#if defined(TARGET_WORDS_BIGENDIAN)
4187 if (endian == DEVICE_LITTLE_ENDIAN) {
4188 val = bswap16(val);
4189 }
4190#else
4191 if (endian == DEVICE_BIG_ENDIAN) {
4192 val = bswap16(val);
4193 }
4194#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004195 } else {
4196 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004197 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004198 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004199 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004200 switch (endian) {
4201 case DEVICE_LITTLE_ENDIAN:
4202 val = lduw_le_p(ptr);
4203 break;
4204 case DEVICE_BIG_ENDIAN:
4205 val = lduw_be_p(ptr);
4206 break;
4207 default:
4208 val = lduw_p(ptr);
4209 break;
4210 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004211 }
4212 return val;
bellardaab33092005-10-30 20:48:42 +00004213}
4214
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004215uint32_t lduw_phys(target_phys_addr_t addr)
4216{
4217 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4218}
4219
4220uint32_t lduw_le_phys(target_phys_addr_t addr)
4221{
4222 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4223}
4224
4225uint32_t lduw_be_phys(target_phys_addr_t addr)
4226{
4227 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4228}
4229
bellard8df1cd02005-01-28 22:37:22 +00004230/* warning: addr must be aligned. The ram page is not masked as dirty
4231 and the code inside is not invalidated. It is useful if the dirty
4232 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004233void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004234{
bellard8df1cd02005-01-28 22:37:22 +00004235 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004236 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004237
Avi Kivity06ef3522012-02-13 16:11:22 +02004238 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004239
Avi Kivityf3705d52012-03-08 16:16:34 +02004240 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004241 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004242 if (memory_region_is_ram(section->mr)) {
4243 section = &phys_sections[phys_section_rom];
4244 }
4245 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004246 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004247 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004248 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004249 + section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00004250 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004251 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004252
4253 if (unlikely(in_migration)) {
4254 if (!cpu_physical_memory_is_dirty(addr1)) {
4255 /* invalidate code */
4256 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4257 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004258 cpu_physical_memory_set_dirty_flags(
4259 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004260 }
4261 }
bellard8df1cd02005-01-28 22:37:22 +00004262 }
4263}
4264
Anthony Liguoric227f092009-10-01 16:12:16 -05004265void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004266{
j_mayerbc98a7e2007-04-04 07:55:12 +00004267 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004268 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00004269
Avi Kivity06ef3522012-02-13 16:11:22 +02004270 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004271
Avi Kivityf3705d52012-03-08 16:16:34 +02004272 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004273 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004274 if (memory_region_is_ram(section->mr)) {
4275 section = &phys_sections[phys_section_rom];
4276 }
j_mayerbc98a7e2007-04-04 07:55:12 +00004277#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02004278 io_mem_write(section->mr, addr, val >> 32, 4);
4279 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004280#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02004281 io_mem_write(section->mr, addr, (uint32_t)val, 4);
4282 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004283#endif
4284 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004285 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004286 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004287 + section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00004288 stq_p(ptr, val);
4289 }
4290}
4291
bellard8df1cd02005-01-28 22:37:22 +00004292/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004293static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4294 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004295{
bellard8df1cd02005-01-28 22:37:22 +00004296 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004297 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004298
Avi Kivity06ef3522012-02-13 16:11:22 +02004299 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004300
Avi Kivityf3705d52012-03-08 16:16:34 +02004301 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004302 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004303 if (memory_region_is_ram(section->mr)) {
4304 section = &phys_sections[phys_section_rom];
4305 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004306#if defined(TARGET_WORDS_BIGENDIAN)
4307 if (endian == DEVICE_LITTLE_ENDIAN) {
4308 val = bswap32(val);
4309 }
4310#else
4311 if (endian == DEVICE_BIG_ENDIAN) {
4312 val = bswap32(val);
4313 }
4314#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004315 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004316 } else {
4317 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004318 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4319 + section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00004320 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004321 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004322 switch (endian) {
4323 case DEVICE_LITTLE_ENDIAN:
4324 stl_le_p(ptr, val);
4325 break;
4326 case DEVICE_BIG_ENDIAN:
4327 stl_be_p(ptr, val);
4328 break;
4329 default:
4330 stl_p(ptr, val);
4331 break;
4332 }
bellard3a7d9292005-08-21 09:26:42 +00004333 if (!cpu_physical_memory_is_dirty(addr1)) {
4334 /* invalidate code */
4335 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4336 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004337 cpu_physical_memory_set_dirty_flags(addr1,
4338 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004339 }
bellard8df1cd02005-01-28 22:37:22 +00004340 }
4341}
4342
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004343void stl_phys(target_phys_addr_t addr, uint32_t val)
4344{
4345 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4346}
4347
4348void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4349{
4350 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4351}
4352
4353void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4354{
4355 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4356}
4357
bellardaab33092005-10-30 20:48:42 +00004358/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004359void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004360{
4361 uint8_t v = val;
4362 cpu_physical_memory_write(addr, &v, 1);
4363}
4364
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004365/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004366static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4367 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004368{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004369 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004370 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004371
Avi Kivity06ef3522012-02-13 16:11:22 +02004372 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004373
Avi Kivityf3705d52012-03-08 16:16:34 +02004374 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004375 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004376 if (memory_region_is_ram(section->mr)) {
4377 section = &phys_sections[phys_section_rom];
4378 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004379#if defined(TARGET_WORDS_BIGENDIAN)
4380 if (endian == DEVICE_LITTLE_ENDIAN) {
4381 val = bswap16(val);
4382 }
4383#else
4384 if (endian == DEVICE_BIG_ENDIAN) {
4385 val = bswap16(val);
4386 }
4387#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004388 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004389 } else {
4390 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004391 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4392 + section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004393 /* RAM case */
4394 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004395 switch (endian) {
4396 case DEVICE_LITTLE_ENDIAN:
4397 stw_le_p(ptr, val);
4398 break;
4399 case DEVICE_BIG_ENDIAN:
4400 stw_be_p(ptr, val);
4401 break;
4402 default:
4403 stw_p(ptr, val);
4404 break;
4405 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004406 if (!cpu_physical_memory_is_dirty(addr1)) {
4407 /* invalidate code */
4408 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4409 /* set dirty bit */
4410 cpu_physical_memory_set_dirty_flags(addr1,
4411 (0xff & ~CODE_DIRTY_FLAG));
4412 }
4413 }
bellardaab33092005-10-30 20:48:42 +00004414}
4415
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004416void stw_phys(target_phys_addr_t addr, uint32_t val)
4417{
4418 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4419}
4420
4421void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4422{
4423 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4424}
4425
4426void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4427{
4428 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4429}
4430
bellardaab33092005-10-30 20:48:42 +00004431/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004432void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004433{
4434 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004435 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004436}
4437
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004438void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4439{
4440 val = cpu_to_le64(val);
4441 cpu_physical_memory_write(addr, &val, 8);
4442}
4443
4444void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4445{
4446 val = cpu_to_be64(val);
4447 cpu_physical_memory_write(addr, &val, 8);
4448}
4449
aliguori5e2972f2009-03-28 17:51:36 +00004450/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004451int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004452 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004453{
4454 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004455 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004456 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004457
4458 while (len > 0) {
4459 page = addr & TARGET_PAGE_MASK;
4460 phys_addr = cpu_get_phys_page_debug(env, page);
4461 /* if no physical page mapped, return an error */
4462 if (phys_addr == -1)
4463 return -1;
4464 l = (page + TARGET_PAGE_SIZE) - addr;
4465 if (l > len)
4466 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004467 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004468 if (is_write)
4469 cpu_physical_memory_write_rom(phys_addr, buf, l);
4470 else
aliguori5e2972f2009-03-28 17:51:36 +00004471 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004472 len -= l;
4473 buf += l;
4474 addr += l;
4475 }
4476 return 0;
4477}
Paul Brooka68fe892010-03-01 00:08:59 +00004478#endif
bellard13eb76e2004-01-24 15:23:36 +00004479
pbrook2e70f6e2008-06-29 01:03:05 +00004480/* in deterministic execution mode, instructions doing device I/Os
4481 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004482void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004483{
4484 TranslationBlock *tb;
4485 uint32_t n, cflags;
4486 target_ulong pc, cs_base;
4487 uint64_t flags;
4488
Blue Swirl20503962012-04-09 14:20:20 +00004489 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004490 if (!tb) {
4491 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004492 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004493 }
4494 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004495 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004496 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004497 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004498 n = n - env->icount_decr.u16.low;
4499 /* Generate a new TB ending on the I/O insn. */
4500 n++;
4501 /* On MIPS and SH, delay slot instructions can only be restarted if
4502 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004503 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004504 branch. */
4505#if defined(TARGET_MIPS)
4506 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4507 env->active_tc.PC -= 4;
4508 env->icount_decr.u16.low++;
4509 env->hflags &= ~MIPS_HFLAG_BMASK;
4510 }
4511#elif defined(TARGET_SH4)
4512 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4513 && n > 1) {
4514 env->pc -= 2;
4515 env->icount_decr.u16.low++;
4516 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4517 }
4518#endif
4519 /* This should never happen. */
4520 if (n > CF_COUNT_MASK)
4521 cpu_abort(env, "TB too big during recompile");
4522
4523 cflags = n | CF_LAST_IO;
4524 pc = tb->pc;
4525 cs_base = tb->cs_base;
4526 flags = tb->flags;
4527 tb_phys_invalidate(tb, -1);
4528 /* FIXME: In theory this could raise an exception. In practice
4529 we have already translated the block once so it's probably ok. */
4530 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004531 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004532 the first in the TB) then we end up generating a whole new TB and
4533 repeating the fault, which is horribly inefficient.
4534 Better would be to execute just this insn uncached, or generate a
4535 second new TB. */
4536 cpu_resume_from_signal(env, NULL);
4537}
4538
Paul Brookb3755a92010-03-12 16:54:58 +00004539#if !defined(CONFIG_USER_ONLY)
4540
Stefan Weil055403b2010-10-22 23:03:32 +02004541void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004542{
4543 int i, target_code_size, max_target_code_size;
4544 int direct_jmp_count, direct_jmp2_count, cross_page;
4545 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004546
bellarde3db7222005-01-26 22:00:47 +00004547 target_code_size = 0;
4548 max_target_code_size = 0;
4549 cross_page = 0;
4550 direct_jmp_count = 0;
4551 direct_jmp2_count = 0;
4552 for(i = 0; i < nb_tbs; i++) {
4553 tb = &tbs[i];
4554 target_code_size += tb->size;
4555 if (tb->size > max_target_code_size)
4556 max_target_code_size = tb->size;
4557 if (tb->page_addr[1] != -1)
4558 cross_page++;
4559 if (tb->tb_next_offset[0] != 0xffff) {
4560 direct_jmp_count++;
4561 if (tb->tb_next_offset[1] != 0xffff) {
4562 direct_jmp2_count++;
4563 }
4564 }
4565 }
4566 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004567 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004568 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004569 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4570 cpu_fprintf(f, "TB count %d/%d\n",
4571 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004572 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004573 nb_tbs ? target_code_size / nb_tbs : 0,
4574 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004575 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004576 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4577 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004578 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4579 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004580 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4581 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004582 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004583 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4584 direct_jmp2_count,
4585 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004586 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004587 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4588 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4589 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004590 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004591}
4592
Avi Kivityd39e8222012-01-01 23:35:10 +02004593/* NOTE: this function can trigger an exception */
4594/* NOTE2: the returned address is not exactly the physical address: it
4595 is the offset relative to phys_ram_base */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004596tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
Avi Kivityd39e8222012-01-01 23:35:10 +02004597{
4598 int mmu_idx, page_index, pd;
4599 void *p;
Avi Kivity37ec01d2012-03-08 18:08:35 +02004600 MemoryRegion *mr;
Avi Kivityd39e8222012-01-01 23:35:10 +02004601
4602 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4603 mmu_idx = cpu_mmu_index(env1);
4604 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4605 (addr & TARGET_PAGE_MASK))) {
Blue Swirle141ab52011-09-18 14:55:46 +00004606#ifdef CONFIG_TCG_PASS_AREG0
4607 cpu_ldub_code(env1, addr);
4608#else
Avi Kivityd39e8222012-01-01 23:35:10 +02004609 ldub_code(addr);
Blue Swirle141ab52011-09-18 14:55:46 +00004610#endif
Avi Kivityd39e8222012-01-01 23:35:10 +02004611 }
Avi Kivityce5d64c2012-03-08 18:50:18 +02004612 pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
Avi Kivity37ec01d2012-03-08 18:08:35 +02004613 mr = iotlb_to_region(pd);
4614 if (mr != &io_mem_ram && mr != &io_mem_rom
Avi Kivity32b08982012-03-18 18:31:13 +02004615 && mr != &io_mem_notdirty && !mr->rom_device
4616 && mr != &io_mem_watch) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004617#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4618 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4619#else
4620 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4621#endif
4622 }
4623 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4624 return qemu_ram_addr_from_host_nofail(p);
4625}
4626
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004627/*
4628 * A helper function for the _utterly broken_ virtio device model to find out if
4629 * it's running on a big endian machine. Don't do this at home kids!
4630 */
4631bool virtio_is_big_endian(void);
4632bool virtio_is_big_endian(void)
4633{
4634#if defined(TARGET_WORDS_BIGENDIAN)
4635 return true;
4636#else
4637 return false;
4638#endif
4639}
4640
bellard61382a52003-10-27 21:22:23 +00004641#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004642#undef GETPC
Blue Swirl20503962012-04-09 14:20:20 +00004643#define GETPC() ((uintptr_t)0)
bellard61382a52003-10-27 21:22:23 +00004644#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004645#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004646
4647#define SHIFT 0
4648#include "softmmu_template.h"
4649
4650#define SHIFT 1
4651#include "softmmu_template.h"
4652
4653#define SHIFT 2
4654#include "softmmu_template.h"
4655
4656#define SHIFT 3
4657#include "softmmu_template.h"
4658
4659#undef env
4660
4661#endif