blob: a2015f7ef34a580fbd951a416f424d8d412f0ee5 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Avi Kivity3eef53d2012-02-10 14:57:31 +0200163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200189typedef struct PhysPageEntry PhysPageEntry;
190
Avi Kivity5312bd82012-02-12 18:32:55 +0200191static MemoryRegionSection *phys_sections;
192static unsigned phys_sections_nb, phys_sections_nb_alloc;
193static uint16_t phys_section_unassigned;
194
Avi Kivity4346ae32012-02-10 17:00:01 +0200195struct PhysPageEntry {
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 /* index into phys_sections (last level) or phys_map_nodes (others) */
197 uint16_t ptr;
Avi Kivity4346ae32012-02-10 17:00:01 +0200198};
199
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200200/* Simple allocator for PhysPageEntry nodes */
201static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
202static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
203
204#define PHYS_MAP_NODE_NIL ((uint16_t)~0)
205
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800206/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200207 The bottom level has pointers to MemoryRegionSections. */
Avi Kivityc19e8802012-02-13 20:25:31 +0200208static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL };
Paul Brook6d9a1302010-02-28 23:55:53 +0000209
pbrooke2eef172008-06-08 01:09:01 +0000210static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300211static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000212
bellard33417e72003-08-10 21:47:01 +0000213/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200214MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000215static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200216static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000217#endif
bellard33417e72003-08-10 21:47:01 +0000218
bellard34865132003-10-05 14:28:56 +0000219/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200220#ifdef WIN32
221static const char *logfilename = "qemu.log";
222#else
blueswir1d9b630f2008-10-05 09:57:08 +0000223static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200224#endif
bellard34865132003-10-05 14:28:56 +0000225FILE *logfile;
226int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000227static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000228
bellarde3db7222005-01-26 22:00:47 +0000229/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000230#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000231static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000232#endif
bellarde3db7222005-01-26 22:00:47 +0000233static int tb_flush_count;
234static int tb_phys_invalidate_count;
235
bellard7cb69ca2008-05-10 10:55:51 +0000236#ifdef _WIN32
237static void map_exec(void *addr, long size)
238{
239 DWORD old_protect;
240 VirtualProtect(addr, size,
241 PAGE_EXECUTE_READWRITE, &old_protect);
242
243}
244#else
245static void map_exec(void *addr, long size)
246{
bellard43694152008-05-29 09:35:57 +0000247 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000248
bellard43694152008-05-29 09:35:57 +0000249 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000250 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000251 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000252
253 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000254 end += page_size - 1;
255 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000256
257 mprotect((void *)start, end - start,
258 PROT_READ | PROT_WRITE | PROT_EXEC);
259}
260#endif
261
bellardb346ff42003-06-15 20:05:50 +0000262static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000263{
bellard83fb7ad2004-07-05 21:25:26 +0000264 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000265 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000266#ifdef _WIN32
267 {
268 SYSTEM_INFO system_info;
269
270 GetSystemInfo(&system_info);
271 qemu_real_host_page_size = system_info.dwPageSize;
272 }
273#else
274 qemu_real_host_page_size = getpagesize();
275#endif
bellard83fb7ad2004-07-05 21:25:26 +0000276 if (qemu_host_page_size == 0)
277 qemu_host_page_size = qemu_real_host_page_size;
278 if (qemu_host_page_size < TARGET_PAGE_SIZE)
279 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000280 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000281
Paul Brook2e9a5712010-05-05 16:32:59 +0100282#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000283 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100284#ifdef HAVE_KINFO_GETVMMAP
285 struct kinfo_vmentry *freep;
286 int i, cnt;
287
288 freep = kinfo_getvmmap(getpid(), &cnt);
289 if (freep) {
290 mmap_lock();
291 for (i = 0; i < cnt; i++) {
292 unsigned long startaddr, endaddr;
293
294 startaddr = freep[i].kve_start;
295 endaddr = freep[i].kve_end;
296 if (h2g_valid(startaddr)) {
297 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
298
299 if (h2g_valid(endaddr)) {
300 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200301 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100302 } else {
303#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
304 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200305 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100306#endif
307 }
308 }
309 }
310 free(freep);
311 mmap_unlock();
312 }
313#else
balrog50a95692007-12-12 01:16:23 +0000314 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000315
pbrook07765902008-05-31 16:33:53 +0000316 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800317
Aurelien Jarnofd436902010-04-10 17:20:36 +0200318 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000319 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800320 mmap_lock();
321
balrog50a95692007-12-12 01:16:23 +0000322 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800323 unsigned long startaddr, endaddr;
324 int n;
325
326 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
327
328 if (n == 2 && h2g_valid(startaddr)) {
329 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
330
331 if (h2g_valid(endaddr)) {
332 endaddr = h2g(endaddr);
333 } else {
334 endaddr = ~0ul;
335 }
336 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000337 }
338 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800339
balrog50a95692007-12-12 01:16:23 +0000340 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800341 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000342 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100343#endif
balrog50a95692007-12-12 01:16:23 +0000344 }
345#endif
bellard54936002003-05-13 00:25:15 +0000346}
347
Paul Brook41c1b1c2010-03-12 16:54:58 +0000348static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000349{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000350 PageDesc *pd;
351 void **lp;
352 int i;
353
pbrook17e23772008-06-09 13:47:45 +0000354#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500355 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800356# define ALLOC(P, SIZE) \
357 do { \
358 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
359 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800360 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000361#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500363 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000364#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800365
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800366 /* Level 1. Always allocated. */
367 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
368
369 /* Level 2..N-1. */
370 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
371 void **p = *lp;
372
373 if (p == NULL) {
374 if (!alloc) {
375 return NULL;
376 }
377 ALLOC(p, sizeof(void *) * L2_SIZE);
378 *lp = p;
379 }
380
381 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000382 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800383
384 pd = *lp;
385 if (pd == NULL) {
386 if (!alloc) {
387 return NULL;
388 }
389 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
390 *lp = pd;
391 }
392
393#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800394
395 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000396}
397
Paul Brook41c1b1c2010-03-12 16:54:58 +0000398static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000399{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800400 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000401}
402
Paul Brook6d9a1302010-02-28 23:55:53 +0000403#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200404
Avi Kivityf7bf5462012-02-13 20:12:05 +0200405static void phys_map_node_reserve(unsigned nodes)
406{
407 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
408 typedef PhysPageEntry Node[L2_SIZE];
409 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
410 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
411 phys_map_nodes_nb + nodes);
412 phys_map_nodes = g_renew(Node, phys_map_nodes,
413 phys_map_nodes_nb_alloc);
414 }
415}
416
417static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200418{
419 unsigned i;
420 uint16_t ret;
421
Avi Kivityf7bf5462012-02-13 20:12:05 +0200422 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200423 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200424 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200425 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200426 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200427 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200428 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200429}
430
431static void phys_map_nodes_reset(void)
432{
433 phys_map_nodes_nb = 0;
434}
435
Avi Kivityf7bf5462012-02-13 20:12:05 +0200436
Avi Kivity29990972012-02-13 20:21:20 +0200437static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
438 target_phys_addr_t *nb, uint16_t leaf,
439 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200440{
441 PhysPageEntry *p;
442 int i;
443
Avi Kivityc19e8802012-02-13 20:25:31 +0200444 if (lp->ptr == PHYS_MAP_NODE_NIL) {
445 lp->ptr = phys_map_node_alloc();
446 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200447 if (level == 0) {
448 for (i = 0; i < L2_SIZE; i++) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200449 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200450 }
451 }
452 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200453 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200454 }
Avi Kivity29990972012-02-13 20:21:20 +0200455 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200456
Avi Kivity29990972012-02-13 20:21:20 +0200457 while (*nb && lp < &p[L2_SIZE]) {
458 if (level == 0) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200459 lp->ptr = leaf;
Avi Kivity29990972012-02-13 20:21:20 +0200460 ++*index;
461 --*nb;
462 } else {
463 phys_page_set_level(lp, index, nb, leaf, level - 1);
464 }
465 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200466 }
467}
468
Avi Kivity29990972012-02-13 20:21:20 +0200469static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
470 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000471{
Avi Kivity29990972012-02-13 20:21:20 +0200472 /* Wildly overreserve - it doesn't matter much. */
473 phys_map_node_reserve((nb + L2_SIZE - 1) / L2_SIZE * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000474
Avi Kivity29990972012-02-13 20:21:20 +0200475 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000476}
477
Avi Kivity06ef3522012-02-13 16:11:22 +0200478static MemoryRegionSection phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000479{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200480 PhysPageEntry lp = phys_map;
481 PhysPageEntry *p;
482 int i;
Avi Kivity06ef3522012-02-13 16:11:22 +0200483 MemoryRegionSection section;
484 target_phys_addr_t delta;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200485 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200486
Avi Kivity31ab2b42012-02-13 16:44:19 +0200487 for (i = P_L2_LEVELS - 1; i >= 0; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200488 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200489 goto not_found;
490 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200491 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200492 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200493 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200494
Avi Kivityc19e8802012-02-13 20:25:31 +0200495 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200496not_found:
Avi Kivity06ef3522012-02-13 16:11:22 +0200497 section = phys_sections[s_index];
Avi Kivity5312bd82012-02-12 18:32:55 +0200498 index <<= TARGET_PAGE_BITS;
Avi Kivity06ef3522012-02-13 16:11:22 +0200499 assert(section.offset_within_address_space <= index
500 && index <= section.offset_within_address_space + section.size-1);
501 delta = index - section.offset_within_address_space;
502 section.offset_within_address_space += delta;
503 section.offset_within_region += delta;
504 section.size -= delta;
505 return section;
bellard92e873b2004-05-21 14:52:29 +0000506}
507
Anthony Liguoric227f092009-10-01 16:12:16 -0500508static void tlb_protect_code(ram_addr_t ram_addr);
509static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000510 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000511#define mmap_lock() do { } while(0)
512#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000513#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000514
bellard43694152008-05-29 09:35:57 +0000515#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
516
517#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100518/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000519 user mode. It will change when a dedicated libc will be used */
520#define USE_STATIC_CODE_GEN_BUFFER
521#endif
522
523#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200524static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
525 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000526#endif
527
blueswir18fcd3692008-08-17 20:26:25 +0000528static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000529{
bellard43694152008-05-29 09:35:57 +0000530#ifdef USE_STATIC_CODE_GEN_BUFFER
531 code_gen_buffer = static_code_gen_buffer;
532 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
533 map_exec(code_gen_buffer, code_gen_buffer_size);
534#else
bellard26a5f132008-05-28 12:30:31 +0000535 code_gen_buffer_size = tb_size;
536 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000537#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000538 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
539#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100540 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000541 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000542#endif
bellard26a5f132008-05-28 12:30:31 +0000543 }
544 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
545 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
546 /* The code gen buffer location may have constraints depending on
547 the host cpu and OS */
548#if defined(__linux__)
549 {
550 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000551 void *start = NULL;
552
bellard26a5f132008-05-28 12:30:31 +0000553 flags = MAP_PRIVATE | MAP_ANONYMOUS;
554#if defined(__x86_64__)
555 flags |= MAP_32BIT;
556 /* Cannot map more than that */
557 if (code_gen_buffer_size > (800 * 1024 * 1024))
558 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000559#elif defined(__sparc_v9__)
560 // Map the buffer below 2G, so we can use direct calls and branches
561 flags |= MAP_FIXED;
562 start = (void *) 0x60000000UL;
563 if (code_gen_buffer_size > (512 * 1024 * 1024))
564 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000565#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100566 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000567 if (code_gen_buffer_size > 16 * 1024 * 1024)
568 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700569#elif defined(__s390x__)
570 /* Map the buffer so that we can use direct calls and branches. */
571 /* We have a +- 4GB range on the branches; leave some slop. */
572 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
573 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
574 }
575 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000576#endif
blueswir1141ac462008-07-26 15:05:57 +0000577 code_gen_buffer = mmap(start, code_gen_buffer_size,
578 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000579 flags, -1, 0);
580 if (code_gen_buffer == MAP_FAILED) {
581 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
582 exit(1);
583 }
584 }
Bradcbb608a2010-12-20 21:25:40 -0500585#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000586 || defined(__DragonFly__) || defined(__OpenBSD__) \
587 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000588 {
589 int flags;
590 void *addr = NULL;
591 flags = MAP_PRIVATE | MAP_ANONYMOUS;
592#if defined(__x86_64__)
593 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
594 * 0x40000000 is free */
595 flags |= MAP_FIXED;
596 addr = (void *)0x40000000;
597 /* Cannot map more than that */
598 if (code_gen_buffer_size > (800 * 1024 * 1024))
599 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000600#elif defined(__sparc_v9__)
601 // Map the buffer below 2G, so we can use direct calls and branches
602 flags |= MAP_FIXED;
603 addr = (void *) 0x60000000UL;
604 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
605 code_gen_buffer_size = (512 * 1024 * 1024);
606 }
aliguori06e67a82008-09-27 15:32:41 +0000607#endif
608 code_gen_buffer = mmap(addr, code_gen_buffer_size,
609 PROT_WRITE | PROT_READ | PROT_EXEC,
610 flags, -1, 0);
611 if (code_gen_buffer == MAP_FAILED) {
612 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
613 exit(1);
614 }
615 }
bellard26a5f132008-05-28 12:30:31 +0000616#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500617 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000618 map_exec(code_gen_buffer, code_gen_buffer_size);
619#endif
bellard43694152008-05-29 09:35:57 +0000620#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000621 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100622 code_gen_buffer_max_size = code_gen_buffer_size -
623 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000624 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500625 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000626}
627
628/* Must be called before using the QEMU cpus. 'tb_size' is the size
629 (in bytes) allocated to the translation buffer. Zero means default
630 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200631void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000632{
bellard26a5f132008-05-28 12:30:31 +0000633 cpu_gen_init();
634 code_gen_alloc(tb_size);
635 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000636 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700637#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
638 /* There's no guest base to take into account, so go ahead and
639 initialize the prologue now. */
640 tcg_prologue_init(&tcg_ctx);
641#endif
bellard26a5f132008-05-28 12:30:31 +0000642}
643
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200644bool tcg_enabled(void)
645{
646 return code_gen_buffer != NULL;
647}
648
649void cpu_exec_init_all(void)
650{
651#if !defined(CONFIG_USER_ONLY)
652 memory_map_init();
653 io_mem_init();
654#endif
655}
656
pbrook9656f322008-07-01 20:01:19 +0000657#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
658
Juan Quintelae59fb372009-09-29 22:48:21 +0200659static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200660{
661 CPUState *env = opaque;
662
aurel323098dba2009-03-07 21:28:24 +0000663 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
664 version_id is increased. */
665 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000666 tlb_flush(env, 1);
667
668 return 0;
669}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200670
671static const VMStateDescription vmstate_cpu_common = {
672 .name = "cpu_common",
673 .version_id = 1,
674 .minimum_version_id = 1,
675 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200676 .post_load = cpu_common_post_load,
677 .fields = (VMStateField []) {
678 VMSTATE_UINT32(halted, CPUState),
679 VMSTATE_UINT32(interrupt_request, CPUState),
680 VMSTATE_END_OF_LIST()
681 }
682};
pbrook9656f322008-07-01 20:01:19 +0000683#endif
684
Glauber Costa950f1472009-06-09 12:15:18 -0400685CPUState *qemu_get_cpu(int cpu)
686{
687 CPUState *env = first_cpu;
688
689 while (env) {
690 if (env->cpu_index == cpu)
691 break;
692 env = env->next_cpu;
693 }
694
695 return env;
696}
697
bellard6a00d602005-11-21 23:25:50 +0000698void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000699{
bellard6a00d602005-11-21 23:25:50 +0000700 CPUState **penv;
701 int cpu_index;
702
pbrookc2764712009-03-07 15:24:59 +0000703#if defined(CONFIG_USER_ONLY)
704 cpu_list_lock();
705#endif
bellard6a00d602005-11-21 23:25:50 +0000706 env->next_cpu = NULL;
707 penv = &first_cpu;
708 cpu_index = 0;
709 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700710 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000711 cpu_index++;
712 }
713 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000714 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000715 QTAILQ_INIT(&env->breakpoints);
716 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100717#ifndef CONFIG_USER_ONLY
718 env->thread_id = qemu_get_thread_id();
719#endif
bellard6a00d602005-11-21 23:25:50 +0000720 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000721#if defined(CONFIG_USER_ONLY)
722 cpu_list_unlock();
723#endif
pbrookb3c77242008-06-30 16:31:04 +0000724#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600725 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
726 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000727 cpu_save, cpu_load, env);
728#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000729}
730
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100731/* Allocate a new translation block. Flush the translation buffer if
732 too many translation blocks or too much generated code. */
733static TranslationBlock *tb_alloc(target_ulong pc)
734{
735 TranslationBlock *tb;
736
737 if (nb_tbs >= code_gen_max_blocks ||
738 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
739 return NULL;
740 tb = &tbs[nb_tbs++];
741 tb->pc = pc;
742 tb->cflags = 0;
743 return tb;
744}
745
746void tb_free(TranslationBlock *tb)
747{
748 /* In practice this is mostly used for single use temporary TB
749 Ignore the hard cases and just back up if this TB happens to
750 be the last one generated. */
751 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
752 code_gen_ptr = tb->tc_ptr;
753 nb_tbs--;
754 }
755}
756
bellard9fa3e852004-01-04 18:06:42 +0000757static inline void invalidate_page_bitmap(PageDesc *p)
758{
759 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500760 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000761 p->code_bitmap = NULL;
762 }
763 p->code_write_count = 0;
764}
765
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800766/* Set to NULL all the 'first_tb' fields in all PageDescs. */
767
768static void page_flush_tb_1 (int level, void **lp)
769{
770 int i;
771
772 if (*lp == NULL) {
773 return;
774 }
775 if (level == 0) {
776 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000777 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800778 pd[i].first_tb = NULL;
779 invalidate_page_bitmap(pd + i);
780 }
781 } else {
782 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000783 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800784 page_flush_tb_1 (level - 1, pp + i);
785 }
786 }
787}
788
bellardfd6ce8f2003-05-14 19:00:11 +0000789static void page_flush_tb(void)
790{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800791 int i;
792 for (i = 0; i < V_L1_SIZE; i++) {
793 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000794 }
795}
796
797/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000798/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000799void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000800{
bellard6a00d602005-11-21 23:25:50 +0000801 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000802#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000803 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
804 (unsigned long)(code_gen_ptr - code_gen_buffer),
805 nb_tbs, nb_tbs > 0 ?
806 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000807#endif
bellard26a5f132008-05-28 12:30:31 +0000808 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000809 cpu_abort(env1, "Internal error: code buffer overflow\n");
810
bellardfd6ce8f2003-05-14 19:00:11 +0000811 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000812
bellard6a00d602005-11-21 23:25:50 +0000813 for(env = first_cpu; env != NULL; env = env->next_cpu) {
814 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
815 }
bellard9fa3e852004-01-04 18:06:42 +0000816
bellard8a8a6082004-10-03 13:36:49 +0000817 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000818 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000819
bellardfd6ce8f2003-05-14 19:00:11 +0000820 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000821 /* XXX: flush processor icache at this point if cache flush is
822 expensive */
bellarde3db7222005-01-26 22:00:47 +0000823 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000824}
825
826#ifdef DEBUG_TB_CHECK
827
j_mayerbc98a7e2007-04-04 07:55:12 +0000828static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000829{
830 TranslationBlock *tb;
831 int i;
832 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000833 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
834 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000835 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
836 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000837 printf("ERROR invalidate: address=" TARGET_FMT_lx
838 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000839 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000840 }
841 }
842 }
843}
844
845/* verify that all the pages have correct rights for code */
846static void tb_page_check(void)
847{
848 TranslationBlock *tb;
849 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000850
pbrook99773bd2006-04-16 15:14:59 +0000851 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
852 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000853 flags1 = page_get_flags(tb->pc);
854 flags2 = page_get_flags(tb->pc + tb->size - 1);
855 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
856 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000857 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000858 }
859 }
860 }
861}
862
863#endif
864
865/* invalidate one TB */
866static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
867 int next_offset)
868{
869 TranslationBlock *tb1;
870 for(;;) {
871 tb1 = *ptb;
872 if (tb1 == tb) {
873 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
874 break;
875 }
876 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
877 }
878}
879
bellard9fa3e852004-01-04 18:06:42 +0000880static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
881{
882 TranslationBlock *tb1;
883 unsigned int n1;
884
885 for(;;) {
886 tb1 = *ptb;
887 n1 = (long)tb1 & 3;
888 tb1 = (TranslationBlock *)((long)tb1 & ~3);
889 if (tb1 == tb) {
890 *ptb = tb1->page_next[n1];
891 break;
892 }
893 ptb = &tb1->page_next[n1];
894 }
895}
896
bellardd4e81642003-05-25 16:46:15 +0000897static inline void tb_jmp_remove(TranslationBlock *tb, int n)
898{
899 TranslationBlock *tb1, **ptb;
900 unsigned int n1;
901
902 ptb = &tb->jmp_next[n];
903 tb1 = *ptb;
904 if (tb1) {
905 /* find tb(n) in circular list */
906 for(;;) {
907 tb1 = *ptb;
908 n1 = (long)tb1 & 3;
909 tb1 = (TranslationBlock *)((long)tb1 & ~3);
910 if (n1 == n && tb1 == tb)
911 break;
912 if (n1 == 2) {
913 ptb = &tb1->jmp_first;
914 } else {
915 ptb = &tb1->jmp_next[n1];
916 }
917 }
918 /* now we can suppress tb(n) from the list */
919 *ptb = tb->jmp_next[n];
920
921 tb->jmp_next[n] = NULL;
922 }
923}
924
925/* reset the jump entry 'n' of a TB so that it is not chained to
926 another TB */
927static inline void tb_reset_jump(TranslationBlock *tb, int n)
928{
929 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
930}
931
Paul Brook41c1b1c2010-03-12 16:54:58 +0000932void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000933{
bellard6a00d602005-11-21 23:25:50 +0000934 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000935 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000936 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000937 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000938 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000939
bellard9fa3e852004-01-04 18:06:42 +0000940 /* remove the TB from the hash list */
941 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
942 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000943 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000944 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000945
bellard9fa3e852004-01-04 18:06:42 +0000946 /* remove the TB from the page list */
947 if (tb->page_addr[0] != page_addr) {
948 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
949 tb_page_remove(&p->first_tb, tb);
950 invalidate_page_bitmap(p);
951 }
952 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
953 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
954 tb_page_remove(&p->first_tb, tb);
955 invalidate_page_bitmap(p);
956 }
957
bellard8a40a182005-11-20 10:35:40 +0000958 tb_invalidated_flag = 1;
959
960 /* remove the TB from the hash list */
961 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000962 for(env = first_cpu; env != NULL; env = env->next_cpu) {
963 if (env->tb_jmp_cache[h] == tb)
964 env->tb_jmp_cache[h] = NULL;
965 }
bellard8a40a182005-11-20 10:35:40 +0000966
967 /* suppress this TB from the two jump lists */
968 tb_jmp_remove(tb, 0);
969 tb_jmp_remove(tb, 1);
970
971 /* suppress any remaining jumps to this TB */
972 tb1 = tb->jmp_first;
973 for(;;) {
974 n1 = (long)tb1 & 3;
975 if (n1 == 2)
976 break;
977 tb1 = (TranslationBlock *)((long)tb1 & ~3);
978 tb2 = tb1->jmp_next[n1];
979 tb_reset_jump(tb1, n1);
980 tb1->jmp_next[n1] = NULL;
981 tb1 = tb2;
982 }
983 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
984
bellarde3db7222005-01-26 22:00:47 +0000985 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000986}
987
988static inline void set_bits(uint8_t *tab, int start, int len)
989{
990 int end, mask, end1;
991
992 end = start + len;
993 tab += start >> 3;
994 mask = 0xff << (start & 7);
995 if ((start & ~7) == (end & ~7)) {
996 if (start < end) {
997 mask &= ~(0xff << (end & 7));
998 *tab |= mask;
999 }
1000 } else {
1001 *tab++ |= mask;
1002 start = (start + 8) & ~7;
1003 end1 = end & ~7;
1004 while (start < end1) {
1005 *tab++ = 0xff;
1006 start += 8;
1007 }
1008 if (start < end) {
1009 mask = ~(0xff << (end & 7));
1010 *tab |= mask;
1011 }
1012 }
1013}
1014
1015static void build_page_bitmap(PageDesc *p)
1016{
1017 int n, tb_start, tb_end;
1018 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001019
Anthony Liguori7267c092011-08-20 22:09:37 -05001020 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001021
1022 tb = p->first_tb;
1023 while (tb != NULL) {
1024 n = (long)tb & 3;
1025 tb = (TranslationBlock *)((long)tb & ~3);
1026 /* NOTE: this is subtle as a TB may span two physical pages */
1027 if (n == 0) {
1028 /* NOTE: tb_end may be after the end of the page, but
1029 it is not a problem */
1030 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1031 tb_end = tb_start + tb->size;
1032 if (tb_end > TARGET_PAGE_SIZE)
1033 tb_end = TARGET_PAGE_SIZE;
1034 } else {
1035 tb_start = 0;
1036 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1037 }
1038 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1039 tb = tb->page_next[n];
1040 }
1041}
1042
pbrook2e70f6e2008-06-29 01:03:05 +00001043TranslationBlock *tb_gen_code(CPUState *env,
1044 target_ulong pc, target_ulong cs_base,
1045 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001046{
1047 TranslationBlock *tb;
1048 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001049 tb_page_addr_t phys_pc, phys_page2;
1050 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001051 int code_gen_size;
1052
Paul Brook41c1b1c2010-03-12 16:54:58 +00001053 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001054 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001055 if (!tb) {
1056 /* flush must be done */
1057 tb_flush(env);
1058 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001059 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001060 /* Don't forget to invalidate previous TB info. */
1061 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001062 }
1063 tc_ptr = code_gen_ptr;
1064 tb->tc_ptr = tc_ptr;
1065 tb->cs_base = cs_base;
1066 tb->flags = flags;
1067 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001068 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001069 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001070
bellardd720b932004-04-25 17:57:43 +00001071 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001072 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001073 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001074 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001075 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001076 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001077 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001078 return tb;
bellardd720b932004-04-25 17:57:43 +00001079}
ths3b46e622007-09-17 08:09:54 +00001080
bellard9fa3e852004-01-04 18:06:42 +00001081/* invalidate all TBs which intersect with the target physical page
1082 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001083 the same physical page. 'is_cpu_write_access' should be true if called
1084 from a real cpu write access: the virtual CPU will exit the current
1085 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001086void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001087 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001088{
aliguori6b917542008-11-18 19:46:41 +00001089 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001090 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001091 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001092 PageDesc *p;
1093 int n;
1094#ifdef TARGET_HAS_PRECISE_SMC
1095 int current_tb_not_found = is_cpu_write_access;
1096 TranslationBlock *current_tb = NULL;
1097 int current_tb_modified = 0;
1098 target_ulong current_pc = 0;
1099 target_ulong current_cs_base = 0;
1100 int current_flags = 0;
1101#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001102
1103 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001104 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001105 return;
ths5fafdf22007-09-16 21:08:06 +00001106 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001107 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1108 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001109 /* build code bitmap */
1110 build_page_bitmap(p);
1111 }
1112
1113 /* we remove all the TBs in the range [start, end[ */
1114 /* XXX: see if in some cases it could be faster to invalidate all the code */
1115 tb = p->first_tb;
1116 while (tb != NULL) {
1117 n = (long)tb & 3;
1118 tb = (TranslationBlock *)((long)tb & ~3);
1119 tb_next = tb->page_next[n];
1120 /* NOTE: this is subtle as a TB may span two physical pages */
1121 if (n == 0) {
1122 /* NOTE: tb_end may be after the end of the page, but
1123 it is not a problem */
1124 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1125 tb_end = tb_start + tb->size;
1126 } else {
1127 tb_start = tb->page_addr[1];
1128 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1129 }
1130 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001131#ifdef TARGET_HAS_PRECISE_SMC
1132 if (current_tb_not_found) {
1133 current_tb_not_found = 0;
1134 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001135 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001136 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001137 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001138 }
1139 }
1140 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001141 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001142 /* If we are modifying the current TB, we must stop
1143 its execution. We could be more precise by checking
1144 that the modification is after the current PC, but it
1145 would require a specialized function to partially
1146 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001147
bellardd720b932004-04-25 17:57:43 +00001148 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001149 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001150 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1151 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001152 }
1153#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001154 /* we need to do that to handle the case where a signal
1155 occurs while doing tb_phys_invalidate() */
1156 saved_tb = NULL;
1157 if (env) {
1158 saved_tb = env->current_tb;
1159 env->current_tb = NULL;
1160 }
bellard9fa3e852004-01-04 18:06:42 +00001161 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001162 if (env) {
1163 env->current_tb = saved_tb;
1164 if (env->interrupt_request && env->current_tb)
1165 cpu_interrupt(env, env->interrupt_request);
1166 }
bellard9fa3e852004-01-04 18:06:42 +00001167 }
1168 tb = tb_next;
1169 }
1170#if !defined(CONFIG_USER_ONLY)
1171 /* if no code remaining, no need to continue to use slow writes */
1172 if (!p->first_tb) {
1173 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001174 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001175 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001176 }
1177 }
1178#endif
1179#ifdef TARGET_HAS_PRECISE_SMC
1180 if (current_tb_modified) {
1181 /* we generate a block containing just the instruction
1182 modifying the memory. It will ensure that it cannot modify
1183 itself */
bellardea1c1802004-06-14 18:56:36 +00001184 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001185 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001186 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001187 }
1188#endif
1189}
1190
1191/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001192static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001193{
1194 PageDesc *p;
1195 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001196#if 0
bellarda4193c82004-06-03 14:01:43 +00001197 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001198 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1199 cpu_single_env->mem_io_vaddr, len,
1200 cpu_single_env->eip,
1201 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001202 }
1203#endif
bellard9fa3e852004-01-04 18:06:42 +00001204 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001205 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001206 return;
1207 if (p->code_bitmap) {
1208 offset = start & ~TARGET_PAGE_MASK;
1209 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1210 if (b & ((1 << len) - 1))
1211 goto do_invalidate;
1212 } else {
1213 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001214 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001215 }
1216}
1217
bellard9fa3e852004-01-04 18:06:42 +00001218#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001219static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001220 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001221{
aliguori6b917542008-11-18 19:46:41 +00001222 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001223 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001224 int n;
bellardd720b932004-04-25 17:57:43 +00001225#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001226 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001227 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001228 int current_tb_modified = 0;
1229 target_ulong current_pc = 0;
1230 target_ulong current_cs_base = 0;
1231 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001232#endif
bellard9fa3e852004-01-04 18:06:42 +00001233
1234 addr &= TARGET_PAGE_MASK;
1235 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001236 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001237 return;
1238 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001239#ifdef TARGET_HAS_PRECISE_SMC
1240 if (tb && pc != 0) {
1241 current_tb = tb_find_pc(pc);
1242 }
1243#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001244 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001245 n = (long)tb & 3;
1246 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001247#ifdef TARGET_HAS_PRECISE_SMC
1248 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001249 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001250 /* If we are modifying the current TB, we must stop
1251 its execution. We could be more precise by checking
1252 that the modification is after the current PC, but it
1253 would require a specialized function to partially
1254 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001255
bellardd720b932004-04-25 17:57:43 +00001256 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001257 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001258 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1259 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001260 }
1261#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001262 tb_phys_invalidate(tb, addr);
1263 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001264 }
1265 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001266#ifdef TARGET_HAS_PRECISE_SMC
1267 if (current_tb_modified) {
1268 /* we generate a block containing just the instruction
1269 modifying the memory. It will ensure that it cannot modify
1270 itself */
bellardea1c1802004-06-14 18:56:36 +00001271 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001272 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001273 cpu_resume_from_signal(env, puc);
1274 }
1275#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001276}
bellard9fa3e852004-01-04 18:06:42 +00001277#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001278
1279/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001280static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001281 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001282{
1283 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001284#ifndef CONFIG_USER_ONLY
1285 bool page_already_protected;
1286#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001287
bellard9fa3e852004-01-04 18:06:42 +00001288 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001289 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001290 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001291#ifndef CONFIG_USER_ONLY
1292 page_already_protected = p->first_tb != NULL;
1293#endif
bellard9fa3e852004-01-04 18:06:42 +00001294 p->first_tb = (TranslationBlock *)((long)tb | n);
1295 invalidate_page_bitmap(p);
1296
bellard107db442004-06-22 18:48:46 +00001297#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001298
bellard9fa3e852004-01-04 18:06:42 +00001299#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001300 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001301 target_ulong addr;
1302 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001303 int prot;
1304
bellardfd6ce8f2003-05-14 19:00:11 +00001305 /* force the host page as non writable (writes will have a
1306 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001307 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001308 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001309 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1310 addr += TARGET_PAGE_SIZE) {
1311
1312 p2 = page_find (addr >> TARGET_PAGE_BITS);
1313 if (!p2)
1314 continue;
1315 prot |= p2->flags;
1316 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001317 }
ths5fafdf22007-09-16 21:08:06 +00001318 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001319 (prot & PAGE_BITS) & ~PAGE_WRITE);
1320#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001321 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001322 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001323#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001324 }
bellard9fa3e852004-01-04 18:06:42 +00001325#else
1326 /* if some code is already present, then the pages are already
1327 protected. So we handle the case where only the first TB is
1328 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001329 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001330 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001331 }
1332#endif
bellardd720b932004-04-25 17:57:43 +00001333
1334#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001335}
1336
bellard9fa3e852004-01-04 18:06:42 +00001337/* add a new TB and link it to the physical page tables. phys_page2 is
1338 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001339void tb_link_page(TranslationBlock *tb,
1340 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001341{
bellard9fa3e852004-01-04 18:06:42 +00001342 unsigned int h;
1343 TranslationBlock **ptb;
1344
pbrookc8a706f2008-06-02 16:16:42 +00001345 /* Grab the mmap lock to stop another thread invalidating this TB
1346 before we are done. */
1347 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001348 /* add in the physical hash table */
1349 h = tb_phys_hash_func(phys_pc);
1350 ptb = &tb_phys_hash[h];
1351 tb->phys_hash_next = *ptb;
1352 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001353
1354 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001355 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1356 if (phys_page2 != -1)
1357 tb_alloc_page(tb, 1, phys_page2);
1358 else
1359 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001360
bellardd4e81642003-05-25 16:46:15 +00001361 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1362 tb->jmp_next[0] = NULL;
1363 tb->jmp_next[1] = NULL;
1364
1365 /* init original jump addresses */
1366 if (tb->tb_next_offset[0] != 0xffff)
1367 tb_reset_jump(tb, 0);
1368 if (tb->tb_next_offset[1] != 0xffff)
1369 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001370
1371#ifdef DEBUG_TB_CHECK
1372 tb_page_check();
1373#endif
pbrookc8a706f2008-06-02 16:16:42 +00001374 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001375}
1376
bellarda513fe12003-05-27 23:29:48 +00001377/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1378 tb[1].tc_ptr. Return NULL if not found */
1379TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1380{
1381 int m_min, m_max, m;
1382 unsigned long v;
1383 TranslationBlock *tb;
1384
1385 if (nb_tbs <= 0)
1386 return NULL;
1387 if (tc_ptr < (unsigned long)code_gen_buffer ||
1388 tc_ptr >= (unsigned long)code_gen_ptr)
1389 return NULL;
1390 /* binary search (cf Knuth) */
1391 m_min = 0;
1392 m_max = nb_tbs - 1;
1393 while (m_min <= m_max) {
1394 m = (m_min + m_max) >> 1;
1395 tb = &tbs[m];
1396 v = (unsigned long)tb->tc_ptr;
1397 if (v == tc_ptr)
1398 return tb;
1399 else if (tc_ptr < v) {
1400 m_max = m - 1;
1401 } else {
1402 m_min = m + 1;
1403 }
ths5fafdf22007-09-16 21:08:06 +00001404 }
bellarda513fe12003-05-27 23:29:48 +00001405 return &tbs[m_max];
1406}
bellard75012672003-06-21 13:11:07 +00001407
bellardea041c02003-06-25 16:16:50 +00001408static void tb_reset_jump_recursive(TranslationBlock *tb);
1409
1410static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1411{
1412 TranslationBlock *tb1, *tb_next, **ptb;
1413 unsigned int n1;
1414
1415 tb1 = tb->jmp_next[n];
1416 if (tb1 != NULL) {
1417 /* find head of list */
1418 for(;;) {
1419 n1 = (long)tb1 & 3;
1420 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1421 if (n1 == 2)
1422 break;
1423 tb1 = tb1->jmp_next[n1];
1424 }
1425 /* we are now sure now that tb jumps to tb1 */
1426 tb_next = tb1;
1427
1428 /* remove tb from the jmp_first list */
1429 ptb = &tb_next->jmp_first;
1430 for(;;) {
1431 tb1 = *ptb;
1432 n1 = (long)tb1 & 3;
1433 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1434 if (n1 == n && tb1 == tb)
1435 break;
1436 ptb = &tb1->jmp_next[n1];
1437 }
1438 *ptb = tb->jmp_next[n];
1439 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001440
bellardea041c02003-06-25 16:16:50 +00001441 /* suppress the jump to next tb in generated code */
1442 tb_reset_jump(tb, n);
1443
bellard01243112004-01-04 15:48:17 +00001444 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001445 tb_reset_jump_recursive(tb_next);
1446 }
1447}
1448
1449static void tb_reset_jump_recursive(TranslationBlock *tb)
1450{
1451 tb_reset_jump_recursive2(tb, 0);
1452 tb_reset_jump_recursive2(tb, 1);
1453}
1454
bellard1fddef42005-04-17 19:16:13 +00001455#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001456#if defined(CONFIG_USER_ONLY)
1457static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1458{
1459 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1460}
1461#else
bellardd720b932004-04-25 17:57:43 +00001462static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1463{
Anthony Liguoric227f092009-10-01 16:12:16 -05001464 target_phys_addr_t addr;
Anthony Liguoric227f092009-10-01 16:12:16 -05001465 ram_addr_t ram_addr;
Avi Kivity06ef3522012-02-13 16:11:22 +02001466 MemoryRegionSection section;
bellardd720b932004-04-25 17:57:43 +00001467
pbrookc2f07f82006-04-08 17:14:56 +00001468 addr = cpu_get_phys_page_debug(env, pc);
Avi Kivity06ef3522012-02-13 16:11:22 +02001469 section = phys_page_find(addr >> TARGET_PAGE_BITS);
1470 if (!(memory_region_is_ram(section.mr)
1471 || (section.mr->rom_device && section.mr->readable))) {
1472 return;
1473 }
1474 ram_addr = (memory_region_get_ram_addr(section.mr)
1475 + section.offset_within_region) & TARGET_PAGE_MASK;
1476 ram_addr |= (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001477 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001478}
bellardc27004e2005-01-03 23:35:10 +00001479#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001480#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001481
Paul Brookc527ee82010-03-01 03:31:14 +00001482#if defined(CONFIG_USER_ONLY)
1483void cpu_watchpoint_remove_all(CPUState *env, int mask)
1484
1485{
1486}
1487
1488int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1489 int flags, CPUWatchpoint **watchpoint)
1490{
1491 return -ENOSYS;
1492}
1493#else
pbrook6658ffb2007-03-16 23:58:11 +00001494/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001495int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1496 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001497{
aliguorib4051332008-11-18 20:14:20 +00001498 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001499 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001500
aliguorib4051332008-11-18 20:14:20 +00001501 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1502 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1503 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1504 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1505 return -EINVAL;
1506 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001507 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001508
aliguoria1d1bb32008-11-18 20:07:32 +00001509 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001510 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001511 wp->flags = flags;
1512
aliguori2dc9f412008-11-18 20:56:59 +00001513 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001514 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001515 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001516 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001517 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001518
pbrook6658ffb2007-03-16 23:58:11 +00001519 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001520
1521 if (watchpoint)
1522 *watchpoint = wp;
1523 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001524}
1525
aliguoria1d1bb32008-11-18 20:07:32 +00001526/* Remove a specific watchpoint. */
1527int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1528 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001529{
aliguorib4051332008-11-18 20:14:20 +00001530 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001531 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001532
Blue Swirl72cf2d42009-09-12 07:36:22 +00001533 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001534 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001535 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001536 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001537 return 0;
1538 }
1539 }
aliguoria1d1bb32008-11-18 20:07:32 +00001540 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001541}
1542
aliguoria1d1bb32008-11-18 20:07:32 +00001543/* Remove a specific watchpoint by reference. */
1544void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1545{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001546 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001547
aliguoria1d1bb32008-11-18 20:07:32 +00001548 tlb_flush_page(env, watchpoint->vaddr);
1549
Anthony Liguori7267c092011-08-20 22:09:37 -05001550 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001551}
1552
aliguoria1d1bb32008-11-18 20:07:32 +00001553/* Remove all matching watchpoints. */
1554void cpu_watchpoint_remove_all(CPUState *env, int mask)
1555{
aliguoric0ce9982008-11-25 22:13:57 +00001556 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001557
Blue Swirl72cf2d42009-09-12 07:36:22 +00001558 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001559 if (wp->flags & mask)
1560 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001561 }
aliguoria1d1bb32008-11-18 20:07:32 +00001562}
Paul Brookc527ee82010-03-01 03:31:14 +00001563#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001564
1565/* Add a breakpoint. */
1566int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1567 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001568{
bellard1fddef42005-04-17 19:16:13 +00001569#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001570 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001571
Anthony Liguori7267c092011-08-20 22:09:37 -05001572 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001573
1574 bp->pc = pc;
1575 bp->flags = flags;
1576
aliguori2dc9f412008-11-18 20:56:59 +00001577 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001578 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001579 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001580 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001581 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001582
1583 breakpoint_invalidate(env, pc);
1584
1585 if (breakpoint)
1586 *breakpoint = bp;
1587 return 0;
1588#else
1589 return -ENOSYS;
1590#endif
1591}
1592
1593/* Remove a specific breakpoint. */
1594int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1595{
1596#if defined(TARGET_HAS_ICE)
1597 CPUBreakpoint *bp;
1598
Blue Swirl72cf2d42009-09-12 07:36:22 +00001599 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001600 if (bp->pc == pc && bp->flags == flags) {
1601 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001602 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001603 }
bellard4c3a88a2003-07-26 12:06:08 +00001604 }
aliguoria1d1bb32008-11-18 20:07:32 +00001605 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001606#else
aliguoria1d1bb32008-11-18 20:07:32 +00001607 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001608#endif
1609}
1610
aliguoria1d1bb32008-11-18 20:07:32 +00001611/* Remove a specific breakpoint by reference. */
1612void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001613{
bellard1fddef42005-04-17 19:16:13 +00001614#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001615 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001616
aliguoria1d1bb32008-11-18 20:07:32 +00001617 breakpoint_invalidate(env, breakpoint->pc);
1618
Anthony Liguori7267c092011-08-20 22:09:37 -05001619 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001620#endif
1621}
1622
1623/* Remove all matching breakpoints. */
1624void cpu_breakpoint_remove_all(CPUState *env, int mask)
1625{
1626#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001627 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001628
Blue Swirl72cf2d42009-09-12 07:36:22 +00001629 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001630 if (bp->flags & mask)
1631 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001632 }
bellard4c3a88a2003-07-26 12:06:08 +00001633#endif
1634}
1635
bellardc33a3462003-07-29 20:50:33 +00001636/* enable or disable single step mode. EXCP_DEBUG is returned by the
1637 CPU loop after each instruction */
1638void cpu_single_step(CPUState *env, int enabled)
1639{
bellard1fddef42005-04-17 19:16:13 +00001640#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001641 if (env->singlestep_enabled != enabled) {
1642 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001643 if (kvm_enabled())
1644 kvm_update_guest_debug(env, 0);
1645 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001646 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001647 /* XXX: only flush what is necessary */
1648 tb_flush(env);
1649 }
bellardc33a3462003-07-29 20:50:33 +00001650 }
1651#endif
1652}
1653
bellard34865132003-10-05 14:28:56 +00001654/* enable or disable low levels log */
1655void cpu_set_log(int log_flags)
1656{
1657 loglevel = log_flags;
1658 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001659 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001660 if (!logfile) {
1661 perror(logfilename);
1662 _exit(1);
1663 }
bellard9fa3e852004-01-04 18:06:42 +00001664#if !defined(CONFIG_SOFTMMU)
1665 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1666 {
blueswir1b55266b2008-09-20 08:07:15 +00001667 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001668 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1669 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001670#elif defined(_WIN32)
1671 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1672 setvbuf(logfile, NULL, _IONBF, 0);
1673#else
bellard34865132003-10-05 14:28:56 +00001674 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001675#endif
pbrooke735b912007-06-30 13:53:24 +00001676 log_append = 1;
1677 }
1678 if (!loglevel && logfile) {
1679 fclose(logfile);
1680 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001681 }
1682}
1683
1684void cpu_set_log_filename(const char *filename)
1685{
1686 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001687 if (logfile) {
1688 fclose(logfile);
1689 logfile = NULL;
1690 }
1691 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001692}
bellardc33a3462003-07-29 20:50:33 +00001693
aurel323098dba2009-03-07 21:28:24 +00001694static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001695{
pbrookd5975362008-06-07 20:50:51 +00001696 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1697 problem and hope the cpu will stop of its own accord. For userspace
1698 emulation this often isn't actually as bad as it sounds. Often
1699 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001700 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001701 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001702
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001703 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001704 tb = env->current_tb;
1705 /* if the cpu is currently executing code, we must unlink it and
1706 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001707 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001708 env->current_tb = NULL;
1709 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001710 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001711 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001712}
1713
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001714#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001715/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001716static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001717{
1718 int old_mask;
1719
1720 old_mask = env->interrupt_request;
1721 env->interrupt_request |= mask;
1722
aliguori8edac962009-04-24 18:03:45 +00001723 /*
1724 * If called from iothread context, wake the target cpu in
1725 * case its halted.
1726 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001727 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001728 qemu_cpu_kick(env);
1729 return;
1730 }
aliguori8edac962009-04-24 18:03:45 +00001731
pbrook2e70f6e2008-06-29 01:03:05 +00001732 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001733 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001734 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001735 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001736 cpu_abort(env, "Raised interrupt while not in I/O function");
1737 }
pbrook2e70f6e2008-06-29 01:03:05 +00001738 } else {
aurel323098dba2009-03-07 21:28:24 +00001739 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001740 }
1741}
1742
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001743CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1744
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001745#else /* CONFIG_USER_ONLY */
1746
1747void cpu_interrupt(CPUState *env, int mask)
1748{
1749 env->interrupt_request |= mask;
1750 cpu_unlink_tb(env);
1751}
1752#endif /* CONFIG_USER_ONLY */
1753
bellardb54ad042004-05-20 13:42:52 +00001754void cpu_reset_interrupt(CPUState *env, int mask)
1755{
1756 env->interrupt_request &= ~mask;
1757}
1758
aurel323098dba2009-03-07 21:28:24 +00001759void cpu_exit(CPUState *env)
1760{
1761 env->exit_request = 1;
1762 cpu_unlink_tb(env);
1763}
1764
blueswir1c7cd6a32008-10-02 18:27:46 +00001765const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001766 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001767 "show generated host assembly code for each compiled TB" },
1768 { CPU_LOG_TB_IN_ASM, "in_asm",
1769 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001770 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001771 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001772 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001773 "show micro ops "
1774#ifdef TARGET_I386
1775 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001776#endif
blueswir1e01a1152008-03-14 17:37:11 +00001777 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001778 { CPU_LOG_INT, "int",
1779 "show interrupts/exceptions in short format" },
1780 { CPU_LOG_EXEC, "exec",
1781 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001782 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001783 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001784#ifdef TARGET_I386
1785 { CPU_LOG_PCALL, "pcall",
1786 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001787 { CPU_LOG_RESET, "cpu_reset",
1788 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001789#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001790#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001791 { CPU_LOG_IOPORT, "ioport",
1792 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001793#endif
bellardf193c792004-03-21 17:06:25 +00001794 { 0, NULL, NULL },
1795};
1796
1797static int cmp1(const char *s1, int n, const char *s2)
1798{
1799 if (strlen(s2) != n)
1800 return 0;
1801 return memcmp(s1, s2, n) == 0;
1802}
ths3b46e622007-09-17 08:09:54 +00001803
bellardf193c792004-03-21 17:06:25 +00001804/* takes a comma separated list of log masks. Return 0 if error. */
1805int cpu_str_to_log_mask(const char *str)
1806{
blueswir1c7cd6a32008-10-02 18:27:46 +00001807 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001808 int mask;
1809 const char *p, *p1;
1810
1811 p = str;
1812 mask = 0;
1813 for(;;) {
1814 p1 = strchr(p, ',');
1815 if (!p1)
1816 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001817 if(cmp1(p,p1-p,"all")) {
1818 for(item = cpu_log_items; item->mask != 0; item++) {
1819 mask |= item->mask;
1820 }
1821 } else {
1822 for(item = cpu_log_items; item->mask != 0; item++) {
1823 if (cmp1(p, p1 - p, item->name))
1824 goto found;
1825 }
1826 return 0;
bellardf193c792004-03-21 17:06:25 +00001827 }
bellardf193c792004-03-21 17:06:25 +00001828 found:
1829 mask |= item->mask;
1830 if (*p1 != ',')
1831 break;
1832 p = p1 + 1;
1833 }
1834 return mask;
1835}
bellardea041c02003-06-25 16:16:50 +00001836
bellard75012672003-06-21 13:11:07 +00001837void cpu_abort(CPUState *env, const char *fmt, ...)
1838{
1839 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001840 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001841
1842 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001843 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001844 fprintf(stderr, "qemu: fatal: ");
1845 vfprintf(stderr, fmt, ap);
1846 fprintf(stderr, "\n");
1847#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001848 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1849#else
1850 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001851#endif
aliguori93fcfe32009-01-15 22:34:14 +00001852 if (qemu_log_enabled()) {
1853 qemu_log("qemu: fatal: ");
1854 qemu_log_vprintf(fmt, ap2);
1855 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001856#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001857 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001858#else
aliguori93fcfe32009-01-15 22:34:14 +00001859 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001860#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001861 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001862 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001863 }
pbrook493ae1f2007-11-23 16:53:59 +00001864 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001865 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001866#if defined(CONFIG_USER_ONLY)
1867 {
1868 struct sigaction act;
1869 sigfillset(&act.sa_mask);
1870 act.sa_handler = SIG_DFL;
1871 sigaction(SIGABRT, &act, NULL);
1872 }
1873#endif
bellard75012672003-06-21 13:11:07 +00001874 abort();
1875}
1876
thsc5be9f02007-02-28 20:20:53 +00001877CPUState *cpu_copy(CPUState *env)
1878{
ths01ba9812007-12-09 02:22:57 +00001879 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001880 CPUState *next_cpu = new_env->next_cpu;
1881 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001882#if defined(TARGET_HAS_ICE)
1883 CPUBreakpoint *bp;
1884 CPUWatchpoint *wp;
1885#endif
1886
thsc5be9f02007-02-28 20:20:53 +00001887 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001888
1889 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001890 new_env->next_cpu = next_cpu;
1891 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001892
1893 /* Clone all break/watchpoints.
1894 Note: Once we support ptrace with hw-debug register access, make sure
1895 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001896 QTAILQ_INIT(&env->breakpoints);
1897 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001898#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001899 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001900 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1901 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001902 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001903 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1904 wp->flags, NULL);
1905 }
1906#endif
1907
thsc5be9f02007-02-28 20:20:53 +00001908 return new_env;
1909}
1910
bellard01243112004-01-04 15:48:17 +00001911#if !defined(CONFIG_USER_ONLY)
1912
edgar_igl5c751e92008-05-06 08:44:21 +00001913static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1914{
1915 unsigned int i;
1916
1917 /* Discard jump cache entries for any tb which might potentially
1918 overlap the flushed page. */
1919 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1920 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001921 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001922
1923 i = tb_jmp_cache_hash_page(addr);
1924 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001925 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001926}
1927
Igor Kovalenko08738982009-07-12 02:15:40 +04001928static CPUTLBEntry s_cputlb_empty_entry = {
1929 .addr_read = -1,
1930 .addr_write = -1,
1931 .addr_code = -1,
1932 .addend = -1,
1933};
1934
Peter Maydell771124e2012-01-17 13:23:13 +00001935/* NOTE:
1936 * If flush_global is true (the usual case), flush all tlb entries.
1937 * If flush_global is false, flush (at least) all tlb entries not
1938 * marked global.
1939 *
1940 * Since QEMU doesn't currently implement a global/not-global flag
1941 * for tlb entries, at the moment tlb_flush() will also flush all
1942 * tlb entries in the flush_global == false case. This is OK because
1943 * CPU architectures generally permit an implementation to drop
1944 * entries from the TLB at any time, so flushing more entries than
1945 * required is only an efficiency issue, not a correctness issue.
1946 */
bellardee8b7022004-02-03 23:35:10 +00001947void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001948{
bellard33417e72003-08-10 21:47:01 +00001949 int i;
bellard01243112004-01-04 15:48:17 +00001950
bellard9fa3e852004-01-04 18:06:42 +00001951#if defined(DEBUG_TLB)
1952 printf("tlb_flush:\n");
1953#endif
bellard01243112004-01-04 15:48:17 +00001954 /* must reset current TB so that interrupts cannot modify the
1955 links while we are modifying them */
1956 env->current_tb = NULL;
1957
bellard33417e72003-08-10 21:47:01 +00001958 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001959 int mmu_idx;
1960 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001961 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001962 }
bellard33417e72003-08-10 21:47:01 +00001963 }
bellard9fa3e852004-01-04 18:06:42 +00001964
bellard8a40a182005-11-20 10:35:40 +00001965 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001966
Paul Brookd4c430a2010-03-17 02:14:28 +00001967 env->tlb_flush_addr = -1;
1968 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001969 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001970}
1971
bellard274da6b2004-05-20 21:56:27 +00001972static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001973{
ths5fafdf22007-09-16 21:08:06 +00001974 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001975 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001976 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001977 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001978 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001979 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001980 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001981 }
bellard61382a52003-10-27 21:22:23 +00001982}
1983
bellard2e126692004-04-25 21:28:44 +00001984void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001985{
bellard8a40a182005-11-20 10:35:40 +00001986 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001987 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001988
bellard9fa3e852004-01-04 18:06:42 +00001989#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001990 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001991#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001992 /* Check if we need to flush due to large pages. */
1993 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1994#if defined(DEBUG_TLB)
1995 printf("tlb_flush_page: forced full flush ("
1996 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1997 env->tlb_flush_addr, env->tlb_flush_mask);
1998#endif
1999 tlb_flush(env, 1);
2000 return;
2001 }
bellard01243112004-01-04 15:48:17 +00002002 /* must reset current TB so that interrupts cannot modify the
2003 links while we are modifying them */
2004 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002005
bellard61382a52003-10-27 21:22:23 +00002006 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002007 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002008 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2009 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002010
edgar_igl5c751e92008-05-06 08:44:21 +00002011 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002012}
2013
bellard9fa3e852004-01-04 18:06:42 +00002014/* update the TLBs so that writes to code in the virtual page 'addr'
2015 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002016static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002017{
ths5fafdf22007-09-16 21:08:06 +00002018 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002019 ram_addr + TARGET_PAGE_SIZE,
2020 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002021}
2022
bellard9fa3e852004-01-04 18:06:42 +00002023/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002024 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002025static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002026 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002027{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002028 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002029}
2030
ths5fafdf22007-09-16 21:08:06 +00002031static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002032 unsigned long start, unsigned long length)
2033{
2034 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002035 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00002036 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002037 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002038 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002039 }
2040 }
2041}
2042
pbrook5579c7f2009-04-11 14:47:08 +00002043/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002044void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002045 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002046{
2047 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002048 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002049 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002050
2051 start &= TARGET_PAGE_MASK;
2052 end = TARGET_PAGE_ALIGN(end);
2053
2054 length = end - start;
2055 if (length == 0)
2056 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002057 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002058
bellard1ccde1c2004-02-06 19:46:14 +00002059 /* we modify the TLB cache so that the dirty bit will be set again
2060 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002061 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002062 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002063 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002064 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002065 != (end - 1) - start) {
2066 abort();
2067 }
2068
bellard6a00d602005-11-21 23:25:50 +00002069 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002070 int mmu_idx;
2071 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2072 for(i = 0; i < CPU_TLB_SIZE; i++)
2073 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2074 start1, length);
2075 }
bellard6a00d602005-11-21 23:25:50 +00002076 }
bellard1ccde1c2004-02-06 19:46:14 +00002077}
2078
aliguori74576192008-10-06 14:02:03 +00002079int cpu_physical_memory_set_dirty_tracking(int enable)
2080{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002081 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002082 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002083 return ret;
aliguori74576192008-10-06 14:02:03 +00002084}
2085
bellard3a7d9292005-08-21 09:26:42 +00002086static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2087{
Anthony Liguoric227f092009-10-01 16:12:16 -05002088 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002089 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002090
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002091 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002092 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2093 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002094 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002095 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002096 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002097 }
2098 }
2099}
2100
2101/* update the TLB according to the current state of the dirty bits */
2102void cpu_tlb_update_dirty(CPUState *env)
2103{
2104 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002105 int mmu_idx;
2106 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2107 for(i = 0; i < CPU_TLB_SIZE; i++)
2108 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2109 }
bellard3a7d9292005-08-21 09:26:42 +00002110}
2111
pbrook0f459d12008-06-09 00:20:13 +00002112static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002113{
pbrook0f459d12008-06-09 00:20:13 +00002114 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2115 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002116}
2117
pbrook0f459d12008-06-09 00:20:13 +00002118/* update the TLB corresponding to virtual page vaddr
2119 so that it is no longer dirty */
2120static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002121{
bellard1ccde1c2004-02-06 19:46:14 +00002122 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002123 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002124
pbrook0f459d12008-06-09 00:20:13 +00002125 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002126 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002127 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2128 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002129}
2130
Paul Brookd4c430a2010-03-17 02:14:28 +00002131/* Our TLB does not support large pages, so remember the area covered by
2132 large pages and trigger a full TLB flush if these are invalidated. */
2133static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2134 target_ulong size)
2135{
2136 target_ulong mask = ~(size - 1);
2137
2138 if (env->tlb_flush_addr == (target_ulong)-1) {
2139 env->tlb_flush_addr = vaddr & mask;
2140 env->tlb_flush_mask = mask;
2141 return;
2142 }
2143 /* Extend the existing region to include the new page.
2144 This is a compromise between unnecessary flushes and the cost
2145 of maintaining a full variable size TLB. */
2146 mask &= env->tlb_flush_mask;
2147 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2148 mask <<= 1;
2149 }
2150 env->tlb_flush_addr &= mask;
2151 env->tlb_flush_mask = mask;
2152}
2153
Avi Kivity06ef3522012-02-13 16:11:22 +02002154static bool is_ram_rom(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002155{
Avi Kivity06ef3522012-02-13 16:11:22 +02002156 return memory_region_is_ram(s->mr);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002157}
2158
Avi Kivity06ef3522012-02-13 16:11:22 +02002159static bool is_romd(MemoryRegionSection *s)
Avi Kivity75c578d2012-01-02 15:40:52 +02002160{
Avi Kivity06ef3522012-02-13 16:11:22 +02002161 MemoryRegion *mr = s->mr;
Avi Kivity75c578d2012-01-02 15:40:52 +02002162
Avi Kivity75c578d2012-01-02 15:40:52 +02002163 return mr->rom_device && mr->readable;
2164}
2165
Avi Kivity06ef3522012-02-13 16:11:22 +02002166static bool is_ram_rom_romd(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002167{
Avi Kivity06ef3522012-02-13 16:11:22 +02002168 return is_ram_rom(s) || is_romd(s);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002169}
2170
Paul Brookd4c430a2010-03-17 02:14:28 +00002171/* Add a new TLB entry. At most one entry for a given virtual address
2172 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2173 supplied size is only used by tlb_flush_page. */
2174void tlb_set_page(CPUState *env, target_ulong vaddr,
2175 target_phys_addr_t paddr, int prot,
2176 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002177{
Avi Kivity06ef3522012-02-13 16:11:22 +02002178 MemoryRegionSection section;
bellard9fa3e852004-01-04 18:06:42 +00002179 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002180 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002181 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002182 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002183 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002184 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002185 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002186
Paul Brookd4c430a2010-03-17 02:14:28 +00002187 assert(size >= TARGET_PAGE_SIZE);
2188 if (size != TARGET_PAGE_SIZE) {
2189 tlb_add_large_page(env, vaddr, size);
2190 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002191 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002192#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002193 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2194 " prot=%x idx=%d pd=0x%08lx\n",
2195 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002196#endif
2197
pbrook0f459d12008-06-09 00:20:13 +00002198 address = vaddr;
Avi Kivity06ef3522012-02-13 16:11:22 +02002199 if (!is_ram_rom_romd(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002200 /* IO memory case (romd handled later) */
2201 address |= TLB_MMIO;
2202 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002203 if (is_ram_rom_romd(&section)) {
2204 addend = (unsigned long)(memory_region_get_ram_ptr(section.mr)
2205 + section.offset_within_region);
2206 } else {
2207 addend = 0;
2208 }
2209 if (is_ram_rom(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002210 /* Normal RAM. */
Avi Kivity06ef3522012-02-13 16:11:22 +02002211 iotlb = (memory_region_get_ram_addr(section.mr)
2212 + section.offset_within_region) & TARGET_PAGE_MASK;
2213 if (!section.readonly)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002214 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002215 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002216 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002217 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002218 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002219 It would be nice to pass an offset from the base address
2220 of that region. This would avoid having to special case RAM,
2221 and avoid full address decoding in every device.
2222 We can't use the high bits of pd for this because
2223 IO_MEM_ROMD uses these as a ram address. */
Avi Kivity06ef3522012-02-13 16:11:22 +02002224 iotlb = memory_region_get_ram_addr(section.mr) & ~TARGET_PAGE_MASK;
2225 iotlb += section.offset_within_region;
pbrook0f459d12008-06-09 00:20:13 +00002226 }
pbrook6658ffb2007-03-16 23:58:11 +00002227
pbrook0f459d12008-06-09 00:20:13 +00002228 code_address = address;
2229 /* Make accesses to pages with watchpoints go via the
2230 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002231 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002232 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002233 /* Avoid trapping reads of pages with a write breakpoint. */
2234 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivity1ec9b902012-01-02 12:47:48 +02002235 iotlb = io_mem_watch.ram_addr + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002236 address |= TLB_MMIO;
2237 break;
2238 }
pbrook6658ffb2007-03-16 23:58:11 +00002239 }
pbrook0f459d12008-06-09 00:20:13 +00002240 }
balrogd79acba2007-06-26 20:01:13 +00002241
pbrook0f459d12008-06-09 00:20:13 +00002242 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2243 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2244 te = &env->tlb_table[mmu_idx][index];
2245 te->addend = addend - vaddr;
2246 if (prot & PAGE_READ) {
2247 te->addr_read = address;
2248 } else {
2249 te->addr_read = -1;
2250 }
edgar_igl5c751e92008-05-06 08:44:21 +00002251
pbrook0f459d12008-06-09 00:20:13 +00002252 if (prot & PAGE_EXEC) {
2253 te->addr_code = code_address;
2254 } else {
2255 te->addr_code = -1;
2256 }
2257 if (prot & PAGE_WRITE) {
Avi Kivity06ef3522012-02-13 16:11:22 +02002258 if ((memory_region_is_ram(section.mr) && section.readonly)
2259 || is_romd(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002260 /* Write access calls the I/O callback. */
2261 te->addr_write = address | TLB_MMIO;
Avi Kivity06ef3522012-02-13 16:11:22 +02002262 } else if (memory_region_is_ram(section.mr)
2263 && !cpu_physical_memory_is_dirty(
2264 section.mr->ram_addr
2265 + section.offset_within_region)) {
pbrook0f459d12008-06-09 00:20:13 +00002266 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002267 } else {
pbrook0f459d12008-06-09 00:20:13 +00002268 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002269 }
pbrook0f459d12008-06-09 00:20:13 +00002270 } else {
2271 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002272 }
bellard9fa3e852004-01-04 18:06:42 +00002273}
2274
bellard01243112004-01-04 15:48:17 +00002275#else
2276
bellardee8b7022004-02-03 23:35:10 +00002277void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002278{
2279}
2280
bellard2e126692004-04-25 21:28:44 +00002281void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002282{
2283}
2284
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002285/*
2286 * Walks guest process memory "regions" one by one
2287 * and calls callback function 'fn' for each region.
2288 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002289
2290struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002291{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002292 walk_memory_regions_fn fn;
2293 void *priv;
2294 unsigned long start;
2295 int prot;
2296};
bellard9fa3e852004-01-04 18:06:42 +00002297
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002298static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002299 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002300{
2301 if (data->start != -1ul) {
2302 int rc = data->fn(data->priv, data->start, end, data->prot);
2303 if (rc != 0) {
2304 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002305 }
bellard33417e72003-08-10 21:47:01 +00002306 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002307
2308 data->start = (new_prot ? end : -1ul);
2309 data->prot = new_prot;
2310
2311 return 0;
2312}
2313
2314static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002315 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002316{
Paul Brookb480d9b2010-03-12 23:23:29 +00002317 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002318 int i, rc;
2319
2320 if (*lp == NULL) {
2321 return walk_memory_regions_end(data, base, 0);
2322 }
2323
2324 if (level == 0) {
2325 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002326 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002327 int prot = pd[i].flags;
2328
2329 pa = base | (i << TARGET_PAGE_BITS);
2330 if (prot != data->prot) {
2331 rc = walk_memory_regions_end(data, pa, prot);
2332 if (rc != 0) {
2333 return rc;
2334 }
2335 }
2336 }
2337 } else {
2338 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002339 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002340 pa = base | ((abi_ulong)i <<
2341 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002342 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2343 if (rc != 0) {
2344 return rc;
2345 }
2346 }
2347 }
2348
2349 return 0;
2350}
2351
2352int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2353{
2354 struct walk_memory_regions_data data;
2355 unsigned long i;
2356
2357 data.fn = fn;
2358 data.priv = priv;
2359 data.start = -1ul;
2360 data.prot = 0;
2361
2362 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002363 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002364 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2365 if (rc != 0) {
2366 return rc;
2367 }
2368 }
2369
2370 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002371}
2372
Paul Brookb480d9b2010-03-12 23:23:29 +00002373static int dump_region(void *priv, abi_ulong start,
2374 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002375{
2376 FILE *f = (FILE *)priv;
2377
Paul Brookb480d9b2010-03-12 23:23:29 +00002378 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2379 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002380 start, end, end - start,
2381 ((prot & PAGE_READ) ? 'r' : '-'),
2382 ((prot & PAGE_WRITE) ? 'w' : '-'),
2383 ((prot & PAGE_EXEC) ? 'x' : '-'));
2384
2385 return (0);
2386}
2387
2388/* dump memory mappings */
2389void page_dump(FILE *f)
2390{
2391 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2392 "start", "end", "size", "prot");
2393 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002394}
2395
pbrook53a59602006-03-25 19:31:22 +00002396int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002397{
bellard9fa3e852004-01-04 18:06:42 +00002398 PageDesc *p;
2399
2400 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002401 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002402 return 0;
2403 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002404}
2405
Richard Henderson376a7902010-03-10 15:57:04 -08002406/* Modify the flags of a page and invalidate the code if necessary.
2407 The flag PAGE_WRITE_ORG is positioned automatically depending
2408 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002409void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002410{
Richard Henderson376a7902010-03-10 15:57:04 -08002411 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002412
Richard Henderson376a7902010-03-10 15:57:04 -08002413 /* This function should never be called with addresses outside the
2414 guest address space. If this assert fires, it probably indicates
2415 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002416#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2417 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002418#endif
2419 assert(start < end);
2420
bellard9fa3e852004-01-04 18:06:42 +00002421 start = start & TARGET_PAGE_MASK;
2422 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002423
2424 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002425 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002426 }
2427
2428 for (addr = start, len = end - start;
2429 len != 0;
2430 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2431 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2432
2433 /* If the write protection bit is set, then we invalidate
2434 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002435 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002436 (flags & PAGE_WRITE) &&
2437 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002438 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002439 }
2440 p->flags = flags;
2441 }
bellard9fa3e852004-01-04 18:06:42 +00002442}
2443
ths3d97b402007-11-02 19:02:07 +00002444int page_check_range(target_ulong start, target_ulong len, int flags)
2445{
2446 PageDesc *p;
2447 target_ulong end;
2448 target_ulong addr;
2449
Richard Henderson376a7902010-03-10 15:57:04 -08002450 /* This function should never be called with addresses outside the
2451 guest address space. If this assert fires, it probably indicates
2452 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002453#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2454 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002455#endif
2456
Richard Henderson3e0650a2010-03-29 10:54:42 -07002457 if (len == 0) {
2458 return 0;
2459 }
Richard Henderson376a7902010-03-10 15:57:04 -08002460 if (start + len - 1 < start) {
2461 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002462 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002463 }
balrog55f280c2008-10-28 10:24:11 +00002464
ths3d97b402007-11-02 19:02:07 +00002465 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2466 start = start & TARGET_PAGE_MASK;
2467
Richard Henderson376a7902010-03-10 15:57:04 -08002468 for (addr = start, len = end - start;
2469 len != 0;
2470 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002471 p = page_find(addr >> TARGET_PAGE_BITS);
2472 if( !p )
2473 return -1;
2474 if( !(p->flags & PAGE_VALID) )
2475 return -1;
2476
bellarddae32702007-11-14 10:51:00 +00002477 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002478 return -1;
bellarddae32702007-11-14 10:51:00 +00002479 if (flags & PAGE_WRITE) {
2480 if (!(p->flags & PAGE_WRITE_ORG))
2481 return -1;
2482 /* unprotect the page if it was put read-only because it
2483 contains translated code */
2484 if (!(p->flags & PAGE_WRITE)) {
2485 if (!page_unprotect(addr, 0, NULL))
2486 return -1;
2487 }
2488 return 0;
2489 }
ths3d97b402007-11-02 19:02:07 +00002490 }
2491 return 0;
2492}
2493
bellard9fa3e852004-01-04 18:06:42 +00002494/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002495 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002496int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002497{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002498 unsigned int prot;
2499 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002500 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002501
pbrookc8a706f2008-06-02 16:16:42 +00002502 /* Technically this isn't safe inside a signal handler. However we
2503 know this only ever happens in a synchronous SEGV handler, so in
2504 practice it seems to be ok. */
2505 mmap_lock();
2506
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002507 p = page_find(address >> TARGET_PAGE_BITS);
2508 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002509 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002510 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002511 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002512
bellard9fa3e852004-01-04 18:06:42 +00002513 /* if the page was really writable, then we change its
2514 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002515 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2516 host_start = address & qemu_host_page_mask;
2517 host_end = host_start + qemu_host_page_size;
2518
2519 prot = 0;
2520 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2521 p = page_find(addr >> TARGET_PAGE_BITS);
2522 p->flags |= PAGE_WRITE;
2523 prot |= p->flags;
2524
bellard9fa3e852004-01-04 18:06:42 +00002525 /* and since the content will be modified, we must invalidate
2526 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002527 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002528#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002529 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002530#endif
bellard9fa3e852004-01-04 18:06:42 +00002531 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002532 mprotect((void *)g2h(host_start), qemu_host_page_size,
2533 prot & PAGE_BITS);
2534
2535 mmap_unlock();
2536 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002537 }
pbrookc8a706f2008-06-02 16:16:42 +00002538 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002539 return 0;
2540}
2541
bellard6a00d602005-11-21 23:25:50 +00002542static inline void tlb_set_dirty(CPUState *env,
2543 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002544{
2545}
bellard9fa3e852004-01-04 18:06:42 +00002546#endif /* defined(CONFIG_USER_ONLY) */
2547
pbrooke2eef172008-06-08 01:09:01 +00002548#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002549
Paul Brookc04b2b72010-03-01 03:31:14 +00002550#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2551typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002552 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002553 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002554 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002555} subpage_t;
2556
Anthony Liguoric227f092009-10-01 16:12:16 -05002557static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002558 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002559static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002560static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002561{
Avi Kivity5312bd82012-02-12 18:32:55 +02002562 MemoryRegionSection *section = &phys_sections[section_index];
2563 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002564
2565 if (mr->subpage) {
2566 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2567 memory_region_destroy(&subpage->iomem);
2568 g_free(subpage);
2569 }
2570}
2571
Avi Kivity4346ae32012-02-10 17:00:01 +02002572static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002573{
2574 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002575 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002576
Avi Kivityc19e8802012-02-13 20:25:31 +02002577 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002578 return;
2579 }
2580
Avi Kivityc19e8802012-02-13 20:25:31 +02002581 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002582 for (i = 0; i < L2_SIZE; ++i) {
2583 if (level > 0) {
Avi Kivity54688b12012-02-09 17:34:32 +02002584 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002585 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002586 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002587 }
Avi Kivity54688b12012-02-09 17:34:32 +02002588 }
Avi Kivityc19e8802012-02-13 20:25:31 +02002589 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002590}
2591
2592static void destroy_all_mappings(void)
2593{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002594 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002595 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002596}
2597
Avi Kivity5312bd82012-02-12 18:32:55 +02002598static uint16_t phys_section_add(MemoryRegionSection *section)
2599{
2600 if (phys_sections_nb == phys_sections_nb_alloc) {
2601 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2602 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2603 phys_sections_nb_alloc);
2604 }
2605 phys_sections[phys_sections_nb] = *section;
2606 return phys_sections_nb++;
2607}
2608
2609static void phys_sections_clear(void)
2610{
2611 phys_sections_nb = 0;
2612}
2613
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002614/* register physical memory.
2615 For RAM, 'size' must be a multiple of the target page size.
2616 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002617 io memory page. The address used when calling the IO function is
2618 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002619 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002620 before calculating this offset. This should not be a problem unless
2621 the low bits of start_addr and region_offset differ. */
Avi Kivity0f0cb162012-02-13 17:14:32 +02002622static void register_subpage(MemoryRegionSection *section)
2623{
2624 subpage_t *subpage;
2625 target_phys_addr_t base = section->offset_within_address_space
2626 & TARGET_PAGE_MASK;
2627 MemoryRegionSection existing = phys_page_find(base >> TARGET_PAGE_BITS);
2628 MemoryRegionSection subsection = {
2629 .offset_within_address_space = base,
2630 .size = TARGET_PAGE_SIZE,
2631 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002632 target_phys_addr_t start, end;
2633
2634 assert(existing.mr->subpage || existing.mr == &io_mem_unassigned);
2635
2636 if (!(existing.mr->subpage)) {
2637 subpage = subpage_init(base);
2638 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002639 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2640 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002641 } else {
2642 subpage = container_of(existing.mr, subpage_t, iomem);
2643 }
2644 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2645 end = start + section->size;
2646 subpage_register(subpage, start, end, phys_section_add(section));
2647}
2648
2649
2650static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002651{
Avi Kivitydd811242012-01-02 12:17:03 +02002652 target_phys_addr_t start_addr = section->offset_within_address_space;
2653 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002654 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002655 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002656
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002657 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002658
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002659 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002660 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2661 section_index);
bellard33417e72003-08-10 21:47:01 +00002662}
2663
Avi Kivity0f0cb162012-02-13 17:14:32 +02002664void cpu_register_physical_memory_log(MemoryRegionSection *section,
2665 bool readonly)
2666{
2667 MemoryRegionSection now = *section, remain = *section;
2668
2669 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2670 || (now.size < TARGET_PAGE_SIZE)) {
2671 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2672 - now.offset_within_address_space,
2673 now.size);
2674 register_subpage(&now);
2675 remain.size -= now.size;
2676 remain.offset_within_address_space += now.size;
2677 remain.offset_within_region += now.size;
2678 }
2679 now = remain;
2680 now.size &= TARGET_PAGE_MASK;
2681 if (now.size) {
2682 register_multipage(&now);
2683 remain.size -= now.size;
2684 remain.offset_within_address_space += now.size;
2685 remain.offset_within_region += now.size;
2686 }
2687 now = remain;
2688 if (now.size) {
2689 register_subpage(&now);
2690 }
2691}
2692
2693
Anthony Liguoric227f092009-10-01 16:12:16 -05002694void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002695{
2696 if (kvm_enabled())
2697 kvm_coalesce_mmio_region(addr, size);
2698}
2699
Anthony Liguoric227f092009-10-01 16:12:16 -05002700void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002701{
2702 if (kvm_enabled())
2703 kvm_uncoalesce_mmio_region(addr, size);
2704}
2705
Sheng Yang62a27442010-01-26 19:21:16 +08002706void qemu_flush_coalesced_mmio_buffer(void)
2707{
2708 if (kvm_enabled())
2709 kvm_flush_coalesced_mmio_buffer();
2710}
2711
Marcelo Tosattic9027602010-03-01 20:25:08 -03002712#if defined(__linux__) && !defined(TARGET_S390X)
2713
2714#include <sys/vfs.h>
2715
2716#define HUGETLBFS_MAGIC 0x958458f6
2717
2718static long gethugepagesize(const char *path)
2719{
2720 struct statfs fs;
2721 int ret;
2722
2723 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002724 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002725 } while (ret != 0 && errno == EINTR);
2726
2727 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002728 perror(path);
2729 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002730 }
2731
2732 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002733 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002734
2735 return fs.f_bsize;
2736}
2737
Alex Williamson04b16652010-07-02 11:13:17 -06002738static void *file_ram_alloc(RAMBlock *block,
2739 ram_addr_t memory,
2740 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002741{
2742 char *filename;
2743 void *area;
2744 int fd;
2745#ifdef MAP_POPULATE
2746 int flags;
2747#endif
2748 unsigned long hpagesize;
2749
2750 hpagesize = gethugepagesize(path);
2751 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002752 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002753 }
2754
2755 if (memory < hpagesize) {
2756 return NULL;
2757 }
2758
2759 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2760 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2761 return NULL;
2762 }
2763
2764 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002765 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002766 }
2767
2768 fd = mkstemp(filename);
2769 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002770 perror("unable to create backing store for hugepages");
2771 free(filename);
2772 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002773 }
2774 unlink(filename);
2775 free(filename);
2776
2777 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2778
2779 /*
2780 * ftruncate is not supported by hugetlbfs in older
2781 * hosts, so don't bother bailing out on errors.
2782 * If anything goes wrong with it under other filesystems,
2783 * mmap will fail.
2784 */
2785 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002786 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002787
2788#ifdef MAP_POPULATE
2789 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2790 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2791 * to sidestep this quirk.
2792 */
2793 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2794 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2795#else
2796 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2797#endif
2798 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002799 perror("file_ram_alloc: can't mmap RAM pages");
2800 close(fd);
2801 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002802 }
Alex Williamson04b16652010-07-02 11:13:17 -06002803 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002804 return area;
2805}
2806#endif
2807
Alex Williamsond17b5282010-06-25 11:08:38 -06002808static ram_addr_t find_ram_offset(ram_addr_t size)
2809{
Alex Williamson04b16652010-07-02 11:13:17 -06002810 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002811 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002812
2813 if (QLIST_EMPTY(&ram_list.blocks))
2814 return 0;
2815
2816 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002817 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002818
2819 end = block->offset + block->length;
2820
2821 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2822 if (next_block->offset >= end) {
2823 next = MIN(next, next_block->offset);
2824 }
2825 }
2826 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002827 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002828 mingap = next - end;
2829 }
2830 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002831
2832 if (offset == RAM_ADDR_MAX) {
2833 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2834 (uint64_t)size);
2835 abort();
2836 }
2837
Alex Williamson04b16652010-07-02 11:13:17 -06002838 return offset;
2839}
2840
2841static ram_addr_t last_ram_offset(void)
2842{
Alex Williamsond17b5282010-06-25 11:08:38 -06002843 RAMBlock *block;
2844 ram_addr_t last = 0;
2845
2846 QLIST_FOREACH(block, &ram_list.blocks, next)
2847 last = MAX(last, block->offset + block->length);
2848
2849 return last;
2850}
2851
Avi Kivityc5705a72011-12-20 15:59:12 +02002852void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002853{
2854 RAMBlock *new_block, *block;
2855
Avi Kivityc5705a72011-12-20 15:59:12 +02002856 new_block = NULL;
2857 QLIST_FOREACH(block, &ram_list.blocks, next) {
2858 if (block->offset == addr) {
2859 new_block = block;
2860 break;
2861 }
2862 }
2863 assert(new_block);
2864 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002865
2866 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2867 char *id = dev->parent_bus->info->get_dev_path(dev);
2868 if (id) {
2869 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002870 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002871 }
2872 }
2873 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2874
2875 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002876 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002877 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2878 new_block->idstr);
2879 abort();
2880 }
2881 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002882}
2883
2884ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2885 MemoryRegion *mr)
2886{
2887 RAMBlock *new_block;
2888
2889 size = TARGET_PAGE_ALIGN(size);
2890 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002891
Avi Kivity7c637362011-12-21 13:09:49 +02002892 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002893 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002894 if (host) {
2895 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002896 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002897 } else {
2898 if (mem_path) {
2899#if defined (__linux__) && !defined(TARGET_S390X)
2900 new_block->host = file_ram_alloc(new_block, size, mem_path);
2901 if (!new_block->host) {
2902 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002903 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002904 }
2905#else
2906 fprintf(stderr, "-mem-path option unsupported\n");
2907 exit(1);
2908#endif
2909 } else {
2910#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002911 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2912 an system defined value, which is at least 256GB. Larger systems
2913 have larger values. We put the guest between the end of data
2914 segment (system break) and this value. We use 32GB as a base to
2915 have enough room for the system break to grow. */
2916 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002917 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002918 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002919 if (new_block->host == MAP_FAILED) {
2920 fprintf(stderr, "Allocating RAM failed\n");
2921 abort();
2922 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002923#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002924 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002925 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002926 } else {
2927 new_block->host = qemu_vmalloc(size);
2928 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002929#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002930 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002931 }
2932 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002933 new_block->length = size;
2934
2935 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2936
Anthony Liguori7267c092011-08-20 22:09:37 -05002937 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002938 last_ram_offset() >> TARGET_PAGE_BITS);
2939 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2940 0xff, size >> TARGET_PAGE_BITS);
2941
2942 if (kvm_enabled())
2943 kvm_setup_guest_memory(new_block->host, size);
2944
2945 return new_block->offset;
2946}
2947
Avi Kivityc5705a72011-12-20 15:59:12 +02002948ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002949{
Avi Kivityc5705a72011-12-20 15:59:12 +02002950 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002951}
bellarde9a1ab12007-02-08 23:08:38 +00002952
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002953void qemu_ram_free_from_ptr(ram_addr_t addr)
2954{
2955 RAMBlock *block;
2956
2957 QLIST_FOREACH(block, &ram_list.blocks, next) {
2958 if (addr == block->offset) {
2959 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002960 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002961 return;
2962 }
2963 }
2964}
2965
Anthony Liguoric227f092009-10-01 16:12:16 -05002966void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002967{
Alex Williamson04b16652010-07-02 11:13:17 -06002968 RAMBlock *block;
2969
2970 QLIST_FOREACH(block, &ram_list.blocks, next) {
2971 if (addr == block->offset) {
2972 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002973 if (block->flags & RAM_PREALLOC_MASK) {
2974 ;
2975 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002976#if defined (__linux__) && !defined(TARGET_S390X)
2977 if (block->fd) {
2978 munmap(block->host, block->length);
2979 close(block->fd);
2980 } else {
2981 qemu_vfree(block->host);
2982 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002983#else
2984 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002985#endif
2986 } else {
2987#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2988 munmap(block->host, block->length);
2989#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002990 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002991 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002992 } else {
2993 qemu_vfree(block->host);
2994 }
Alex Williamson04b16652010-07-02 11:13:17 -06002995#endif
2996 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002997 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002998 return;
2999 }
3000 }
3001
bellarde9a1ab12007-02-08 23:08:38 +00003002}
3003
Huang Yingcd19cfa2011-03-02 08:56:19 +01003004#ifndef _WIN32
3005void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3006{
3007 RAMBlock *block;
3008 ram_addr_t offset;
3009 int flags;
3010 void *area, *vaddr;
3011
3012 QLIST_FOREACH(block, &ram_list.blocks, next) {
3013 offset = addr - block->offset;
3014 if (offset < block->length) {
3015 vaddr = block->host + offset;
3016 if (block->flags & RAM_PREALLOC_MASK) {
3017 ;
3018 } else {
3019 flags = MAP_FIXED;
3020 munmap(vaddr, length);
3021 if (mem_path) {
3022#if defined(__linux__) && !defined(TARGET_S390X)
3023 if (block->fd) {
3024#ifdef MAP_POPULATE
3025 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3026 MAP_PRIVATE;
3027#else
3028 flags |= MAP_PRIVATE;
3029#endif
3030 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3031 flags, block->fd, offset);
3032 } else {
3033 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3034 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3035 flags, -1, 0);
3036 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003037#else
3038 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003039#endif
3040 } else {
3041#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3042 flags |= MAP_SHARED | MAP_ANONYMOUS;
3043 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3044 flags, -1, 0);
3045#else
3046 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3047 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3048 flags, -1, 0);
3049#endif
3050 }
3051 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003052 fprintf(stderr, "Could not remap addr: "
3053 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003054 length, addr);
3055 exit(1);
3056 }
3057 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3058 }
3059 return;
3060 }
3061 }
3062}
3063#endif /* !_WIN32 */
3064
pbrookdc828ca2009-04-09 22:21:07 +00003065/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003066 With the exception of the softmmu code in this file, this should
3067 only be used for local memory (e.g. video ram) that the device owns,
3068 and knows it isn't going to access beyond the end of the block.
3069
3070 It should not be used for general purpose DMA.
3071 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3072 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003073void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003074{
pbrook94a6b542009-04-11 17:15:54 +00003075 RAMBlock *block;
3076
Alex Williamsonf471a172010-06-11 11:11:42 -06003077 QLIST_FOREACH(block, &ram_list.blocks, next) {
3078 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003079 /* Move this entry to to start of the list. */
3080 if (block != QLIST_FIRST(&ram_list.blocks)) {
3081 QLIST_REMOVE(block, next);
3082 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3083 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003084 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003085 /* We need to check if the requested address is in the RAM
3086 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003087 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003088 */
3089 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003090 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003091 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003092 block->host =
3093 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003094 }
3095 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003096 return block->host + (addr - block->offset);
3097 }
pbrook94a6b542009-04-11 17:15:54 +00003098 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003099
3100 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3101 abort();
3102
3103 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003104}
3105
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003106/* Return a host pointer to ram allocated with qemu_ram_alloc.
3107 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3108 */
3109void *qemu_safe_ram_ptr(ram_addr_t addr)
3110{
3111 RAMBlock *block;
3112
3113 QLIST_FOREACH(block, &ram_list.blocks, next) {
3114 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003115 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003116 /* We need to check if the requested address is in the RAM
3117 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003118 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003119 */
3120 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003121 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003122 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003123 block->host =
3124 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003125 }
3126 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003127 return block->host + (addr - block->offset);
3128 }
3129 }
3130
3131 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3132 abort();
3133
3134 return NULL;
3135}
3136
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003137/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3138 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003139void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003140{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003141 if (*size == 0) {
3142 return NULL;
3143 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003144 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003145 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003146 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003147 RAMBlock *block;
3148
3149 QLIST_FOREACH(block, &ram_list.blocks, next) {
3150 if (addr - block->offset < block->length) {
3151 if (addr - block->offset + *size > block->length)
3152 *size = block->length - addr + block->offset;
3153 return block->host + (addr - block->offset);
3154 }
3155 }
3156
3157 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3158 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003159 }
3160}
3161
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003162void qemu_put_ram_ptr(void *addr)
3163{
3164 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003165}
3166
Marcelo Tosattie8902612010-10-11 15:31:19 -03003167int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003168{
pbrook94a6b542009-04-11 17:15:54 +00003169 RAMBlock *block;
3170 uint8_t *host = ptr;
3171
Jan Kiszka868bb332011-06-21 22:59:09 +02003172 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003173 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003174 return 0;
3175 }
3176
Alex Williamsonf471a172010-06-11 11:11:42 -06003177 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003178 /* This case append when the block is not mapped. */
3179 if (block->host == NULL) {
3180 continue;
3181 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003182 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003183 *ram_addr = block->offset + (host - block->host);
3184 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003185 }
pbrook94a6b542009-04-11 17:15:54 +00003186 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003187
Marcelo Tosattie8902612010-10-11 15:31:19 -03003188 return -1;
3189}
Alex Williamsonf471a172010-06-11 11:11:42 -06003190
Marcelo Tosattie8902612010-10-11 15:31:19 -03003191/* Some of the softmmu routines need to translate from a host pointer
3192 (typically a TLB entry) back to a ram offset. */
3193ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3194{
3195 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003196
Marcelo Tosattie8902612010-10-11 15:31:19 -03003197 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3198 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3199 abort();
3200 }
3201 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003202}
3203
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003204static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3205 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003206{
pbrook67d3b952006-12-18 05:03:52 +00003207#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003208 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003209#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003210#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003211 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003212#endif
3213 return 0;
3214}
3215
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003216static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3217 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003218{
3219#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003220 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003221#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003222#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003223 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003224#endif
3225}
3226
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003227static const MemoryRegionOps unassigned_mem_ops = {
3228 .read = unassigned_mem_read,
3229 .write = unassigned_mem_write,
3230 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003231};
3232
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003233static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3234 unsigned size)
3235{
3236 abort();
3237}
3238
3239static void error_mem_write(void *opaque, target_phys_addr_t addr,
3240 uint64_t value, unsigned size)
3241{
3242 abort();
3243}
3244
3245static const MemoryRegionOps error_mem_ops = {
3246 .read = error_mem_read,
3247 .write = error_mem_write,
3248 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003249};
3250
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003251static const MemoryRegionOps rom_mem_ops = {
3252 .read = error_mem_read,
3253 .write = unassigned_mem_write,
3254 .endianness = DEVICE_NATIVE_ENDIAN,
3255};
3256
3257static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3258 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003259{
bellard3a7d9292005-08-21 09:26:42 +00003260 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003261 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003262 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3263#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003264 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003265 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003266#endif
3267 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003268 switch (size) {
3269 case 1:
3270 stb_p(qemu_get_ram_ptr(ram_addr), val);
3271 break;
3272 case 2:
3273 stw_p(qemu_get_ram_ptr(ram_addr), val);
3274 break;
3275 case 4:
3276 stl_p(qemu_get_ram_ptr(ram_addr), val);
3277 break;
3278 default:
3279 abort();
3280 }
bellardf23db162005-08-21 19:12:28 +00003281 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003282 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003283 /* we remove the notdirty callback only if the code has been
3284 flushed */
3285 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003286 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003287}
3288
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003289static const MemoryRegionOps notdirty_mem_ops = {
3290 .read = error_mem_read,
3291 .write = notdirty_mem_write,
3292 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003293};
3294
pbrook0f459d12008-06-09 00:20:13 +00003295/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003296static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003297{
3298 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003299 target_ulong pc, cs_base;
3300 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003301 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003302 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003303 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003304
aliguori06d55cc2008-11-18 20:24:06 +00003305 if (env->watchpoint_hit) {
3306 /* We re-entered the check after replacing the TB. Now raise
3307 * the debug interrupt so that is will trigger after the
3308 * current instruction. */
3309 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3310 return;
3311 }
pbrook2e70f6e2008-06-29 01:03:05 +00003312 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003313 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003314 if ((vaddr == (wp->vaddr & len_mask) ||
3315 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003316 wp->flags |= BP_WATCHPOINT_HIT;
3317 if (!env->watchpoint_hit) {
3318 env->watchpoint_hit = wp;
3319 tb = tb_find_pc(env->mem_io_pc);
3320 if (!tb) {
3321 cpu_abort(env, "check_watchpoint: could not find TB for "
3322 "pc=%p", (void *)env->mem_io_pc);
3323 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003324 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003325 tb_phys_invalidate(tb, -1);
3326 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3327 env->exception_index = EXCP_DEBUG;
3328 } else {
3329 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3330 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3331 }
3332 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003333 }
aliguori6e140f22008-11-18 20:37:55 +00003334 } else {
3335 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003336 }
3337 }
3338}
3339
pbrook6658ffb2007-03-16 23:58:11 +00003340/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3341 so these check for a hit then pass through to the normal out-of-line
3342 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003343static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3344 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003345{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003346 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3347 switch (size) {
3348 case 1: return ldub_phys(addr);
3349 case 2: return lduw_phys(addr);
3350 case 4: return ldl_phys(addr);
3351 default: abort();
3352 }
pbrook6658ffb2007-03-16 23:58:11 +00003353}
3354
Avi Kivity1ec9b902012-01-02 12:47:48 +02003355static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3356 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003357{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003358 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3359 switch (size) {
3360 case 1: stb_phys(addr, val);
3361 case 2: stw_phys(addr, val);
3362 case 4: stl_phys(addr, val);
3363 default: abort();
3364 }
pbrook6658ffb2007-03-16 23:58:11 +00003365}
3366
Avi Kivity1ec9b902012-01-02 12:47:48 +02003367static const MemoryRegionOps watch_mem_ops = {
3368 .read = watch_mem_read,
3369 .write = watch_mem_write,
3370 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003371};
pbrook6658ffb2007-03-16 23:58:11 +00003372
Avi Kivity70c68e42012-01-02 12:32:48 +02003373static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3374 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003375{
Avi Kivity70c68e42012-01-02 12:32:48 +02003376 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003377 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003378 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003379#if defined(DEBUG_SUBPAGE)
3380 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3381 mmio, len, addr, idx);
3382#endif
blueswir1db7b5422007-05-26 17:36:03 +00003383
Avi Kivity5312bd82012-02-12 18:32:55 +02003384 section = &phys_sections[mmio->sub_section[idx]];
3385 addr += mmio->base;
3386 addr -= section->offset_within_address_space;
3387 addr += section->offset_within_region;
3388 return io_mem_read(section->mr->ram_addr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003389}
3390
Avi Kivity70c68e42012-01-02 12:32:48 +02003391static void subpage_write(void *opaque, target_phys_addr_t addr,
3392 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003393{
Avi Kivity70c68e42012-01-02 12:32:48 +02003394 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003395 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003396 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003397#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003398 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3399 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003400 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003401#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003402
Avi Kivity5312bd82012-02-12 18:32:55 +02003403 section = &phys_sections[mmio->sub_section[idx]];
3404 addr += mmio->base;
3405 addr -= section->offset_within_address_space;
3406 addr += section->offset_within_region;
3407 io_mem_write(section->mr->ram_addr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003408}
3409
Avi Kivity70c68e42012-01-02 12:32:48 +02003410static const MemoryRegionOps subpage_ops = {
3411 .read = subpage_read,
3412 .write = subpage_write,
3413 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003414};
3415
Avi Kivityde712f92012-01-02 12:41:07 +02003416static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3417 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003418{
3419 ram_addr_t raddr = addr;
3420 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003421 switch (size) {
3422 case 1: return ldub_p(ptr);
3423 case 2: return lduw_p(ptr);
3424 case 4: return ldl_p(ptr);
3425 default: abort();
3426 }
Andreas Färber56384e82011-11-30 16:26:21 +01003427}
3428
Avi Kivityde712f92012-01-02 12:41:07 +02003429static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3430 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003431{
3432 ram_addr_t raddr = addr;
3433 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003434 switch (size) {
3435 case 1: return stb_p(ptr, value);
3436 case 2: return stw_p(ptr, value);
3437 case 4: return stl_p(ptr, value);
3438 default: abort();
3439 }
Andreas Färber56384e82011-11-30 16:26:21 +01003440}
3441
Avi Kivityde712f92012-01-02 12:41:07 +02003442static const MemoryRegionOps subpage_ram_ops = {
3443 .read = subpage_ram_read,
3444 .write = subpage_ram_write,
3445 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003446};
3447
Anthony Liguoric227f092009-10-01 16:12:16 -05003448static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003449 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003450{
3451 int idx, eidx;
3452
3453 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3454 return -1;
3455 idx = SUBPAGE_IDX(start);
3456 eidx = SUBPAGE_IDX(end);
3457#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003458 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003459 mmio, start, end, idx, eidx, memory);
3460#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003461 if (memory_region_is_ram(phys_sections[section].mr)) {
3462 MemoryRegionSection new_section = phys_sections[section];
3463 new_section.mr = &io_mem_subpage_ram;
3464 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003465 }
blueswir1db7b5422007-05-26 17:36:03 +00003466 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003467 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003468 }
3469
3470 return 0;
3471}
3472
Avi Kivity0f0cb162012-02-13 17:14:32 +02003473static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003474{
Anthony Liguoric227f092009-10-01 16:12:16 -05003475 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003476
Anthony Liguori7267c092011-08-20 22:09:37 -05003477 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003478
3479 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003480 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3481 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003482 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003483#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003484 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3485 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003486#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003487 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003488
3489 return mmio;
3490}
3491
aliguori88715652009-02-11 15:20:58 +00003492static int get_free_io_mem_idx(void)
3493{
3494 int i;
3495
3496 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3497 if (!io_mem_used[i]) {
3498 io_mem_used[i] = 1;
3499 return i;
3500 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003501 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003502 return -1;
3503}
3504
bellard33417e72003-08-10 21:47:01 +00003505/* mem_read and mem_write are arrays of functions containing the
3506 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003507 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003508 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003509 modified. If it is zero, a new io zone is allocated. The return
3510 value can be used with cpu_register_physical_memory(). (-1) is
3511 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003512static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003513{
bellard33417e72003-08-10 21:47:01 +00003514 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003515 io_index = get_free_io_mem_idx();
3516 if (io_index == -1)
3517 return io_index;
bellard33417e72003-08-10 21:47:01 +00003518 } else {
3519 if (io_index >= IO_MEM_NB_ENTRIES)
3520 return -1;
3521 }
bellardb5ff1b32005-11-26 10:38:39 +00003522
Avi Kivitya621f382012-01-02 13:12:08 +02003523 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003524
Avi Kivity11c7ef02012-01-02 17:21:07 +02003525 return io_index;
bellard33417e72003-08-10 21:47:01 +00003526}
bellard61382a52003-10-27 21:22:23 +00003527
Avi Kivitya621f382012-01-02 13:12:08 +02003528int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003529{
Avi Kivitya621f382012-01-02 13:12:08 +02003530 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003531}
3532
Avi Kivity11c7ef02012-01-02 17:21:07 +02003533void cpu_unregister_io_memory(int io_index)
aliguori88715652009-02-11 15:20:58 +00003534{
Avi Kivitya621f382012-01-02 13:12:08 +02003535 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003536 io_mem_used[io_index] = 0;
3537}
3538
Avi Kivity5312bd82012-02-12 18:32:55 +02003539static uint16_t dummy_section(MemoryRegion *mr)
3540{
3541 MemoryRegionSection section = {
3542 .mr = mr,
3543 .offset_within_address_space = 0,
3544 .offset_within_region = 0,
3545 .size = UINT64_MAX,
3546 };
3547
3548 return phys_section_add(&section);
3549}
3550
Avi Kivitye9179ce2009-06-14 11:38:52 +03003551static void io_mem_init(void)
3552{
3553 int i;
3554
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003555 /* Must be first: */
3556 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3557 assert(io_mem_ram.ram_addr == 0);
3558 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3559 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3560 "unassigned", UINT64_MAX);
3561 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3562 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003563 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3564 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003565 for (i=0; i<5; i++)
3566 io_mem_used[i] = 1;
3567
Avi Kivity1ec9b902012-01-02 12:47:48 +02003568 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3569 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003570}
3571
Avi Kivity50c1e142012-02-08 21:36:02 +02003572static void core_begin(MemoryListener *listener)
3573{
Avi Kivity54688b12012-02-09 17:34:32 +02003574 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003575 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003576 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003577 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivity50c1e142012-02-08 21:36:02 +02003578}
3579
3580static void core_commit(MemoryListener *listener)
3581{
Avi Kivity117712c2012-02-12 21:23:17 +02003582 CPUState *env;
3583
3584 /* since each CPU stores ram addresses in its TLB cache, we must
3585 reset the modified entries */
3586 /* XXX: slow ! */
3587 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3588 tlb_flush(env, 1);
3589 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003590}
3591
Avi Kivity93632742012-02-08 16:54:16 +02003592static void core_region_add(MemoryListener *listener,
3593 MemoryRegionSection *section)
3594{
Avi Kivity4855d412012-02-08 21:16:05 +02003595 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003596}
3597
3598static void core_region_del(MemoryListener *listener,
3599 MemoryRegionSection *section)
3600{
Avi Kivity93632742012-02-08 16:54:16 +02003601}
3602
Avi Kivity50c1e142012-02-08 21:36:02 +02003603static void core_region_nop(MemoryListener *listener,
3604 MemoryRegionSection *section)
3605{
Avi Kivity54688b12012-02-09 17:34:32 +02003606 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003607}
3608
Avi Kivity93632742012-02-08 16:54:16 +02003609static void core_log_start(MemoryListener *listener,
3610 MemoryRegionSection *section)
3611{
3612}
3613
3614static void core_log_stop(MemoryListener *listener,
3615 MemoryRegionSection *section)
3616{
3617}
3618
3619static void core_log_sync(MemoryListener *listener,
3620 MemoryRegionSection *section)
3621{
3622}
3623
3624static void core_log_global_start(MemoryListener *listener)
3625{
3626 cpu_physical_memory_set_dirty_tracking(1);
3627}
3628
3629static void core_log_global_stop(MemoryListener *listener)
3630{
3631 cpu_physical_memory_set_dirty_tracking(0);
3632}
3633
3634static void core_eventfd_add(MemoryListener *listener,
3635 MemoryRegionSection *section,
3636 bool match_data, uint64_t data, int fd)
3637{
3638}
3639
3640static void core_eventfd_del(MemoryListener *listener,
3641 MemoryRegionSection *section,
3642 bool match_data, uint64_t data, int fd)
3643{
3644}
3645
Avi Kivity50c1e142012-02-08 21:36:02 +02003646static void io_begin(MemoryListener *listener)
3647{
3648}
3649
3650static void io_commit(MemoryListener *listener)
3651{
3652}
3653
Avi Kivity4855d412012-02-08 21:16:05 +02003654static void io_region_add(MemoryListener *listener,
3655 MemoryRegionSection *section)
3656{
3657 iorange_init(&section->mr->iorange, &memory_region_iorange_ops,
3658 section->offset_within_address_space, section->size);
3659 ioport_register(&section->mr->iorange);
3660}
3661
3662static void io_region_del(MemoryListener *listener,
3663 MemoryRegionSection *section)
3664{
3665 isa_unassign_ioport(section->offset_within_address_space, section->size);
3666}
3667
Avi Kivity50c1e142012-02-08 21:36:02 +02003668static void io_region_nop(MemoryListener *listener,
3669 MemoryRegionSection *section)
3670{
3671}
3672
Avi Kivity4855d412012-02-08 21:16:05 +02003673static void io_log_start(MemoryListener *listener,
3674 MemoryRegionSection *section)
3675{
3676}
3677
3678static void io_log_stop(MemoryListener *listener,
3679 MemoryRegionSection *section)
3680{
3681}
3682
3683static void io_log_sync(MemoryListener *listener,
3684 MemoryRegionSection *section)
3685{
3686}
3687
3688static void io_log_global_start(MemoryListener *listener)
3689{
3690}
3691
3692static void io_log_global_stop(MemoryListener *listener)
3693{
3694}
3695
3696static void io_eventfd_add(MemoryListener *listener,
3697 MemoryRegionSection *section,
3698 bool match_data, uint64_t data, int fd)
3699{
3700}
3701
3702static void io_eventfd_del(MemoryListener *listener,
3703 MemoryRegionSection *section,
3704 bool match_data, uint64_t data, int fd)
3705{
3706}
3707
Avi Kivity93632742012-02-08 16:54:16 +02003708static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003709 .begin = core_begin,
3710 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003711 .region_add = core_region_add,
3712 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003713 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003714 .log_start = core_log_start,
3715 .log_stop = core_log_stop,
3716 .log_sync = core_log_sync,
3717 .log_global_start = core_log_global_start,
3718 .log_global_stop = core_log_global_stop,
3719 .eventfd_add = core_eventfd_add,
3720 .eventfd_del = core_eventfd_del,
3721 .priority = 0,
3722};
3723
Avi Kivity4855d412012-02-08 21:16:05 +02003724static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003725 .begin = io_begin,
3726 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003727 .region_add = io_region_add,
3728 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003729 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003730 .log_start = io_log_start,
3731 .log_stop = io_log_stop,
3732 .log_sync = io_log_sync,
3733 .log_global_start = io_log_global_start,
3734 .log_global_stop = io_log_global_stop,
3735 .eventfd_add = io_eventfd_add,
3736 .eventfd_del = io_eventfd_del,
3737 .priority = 0,
3738};
3739
Avi Kivity62152b82011-07-26 14:26:14 +03003740static void memory_map_init(void)
3741{
Anthony Liguori7267c092011-08-20 22:09:37 -05003742 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003743 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003744 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003745
Anthony Liguori7267c092011-08-20 22:09:37 -05003746 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003747 memory_region_init(system_io, "io", 65536);
3748 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003749
Avi Kivity4855d412012-02-08 21:16:05 +02003750 memory_listener_register(&core_memory_listener, system_memory);
3751 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003752}
3753
3754MemoryRegion *get_system_memory(void)
3755{
3756 return system_memory;
3757}
3758
Avi Kivity309cb472011-08-08 16:09:03 +03003759MemoryRegion *get_system_io(void)
3760{
3761 return system_io;
3762}
3763
pbrooke2eef172008-06-08 01:09:01 +00003764#endif /* !defined(CONFIG_USER_ONLY) */
3765
bellard13eb76e2004-01-24 15:23:36 +00003766/* physical memory access (slow version, mainly for debug) */
3767#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003768int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3769 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003770{
3771 int l, flags;
3772 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003773 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003774
3775 while (len > 0) {
3776 page = addr & TARGET_PAGE_MASK;
3777 l = (page + TARGET_PAGE_SIZE) - addr;
3778 if (l > len)
3779 l = len;
3780 flags = page_get_flags(page);
3781 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003782 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003783 if (is_write) {
3784 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003785 return -1;
bellard579a97f2007-11-11 14:26:47 +00003786 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003787 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003788 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003789 memcpy(p, buf, l);
3790 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003791 } else {
3792 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003793 return -1;
bellard579a97f2007-11-11 14:26:47 +00003794 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003795 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003796 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003797 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003798 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003799 }
3800 len -= l;
3801 buf += l;
3802 addr += l;
3803 }
Paul Brooka68fe892010-03-01 00:08:59 +00003804 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003805}
bellard8df1cd02005-01-28 22:37:22 +00003806
bellard13eb76e2004-01-24 15:23:36 +00003807#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003808void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003809 int len, int is_write)
3810{
3811 int l, io_index;
3812 uint8_t *ptr;
3813 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003814 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003815 MemoryRegionSection section;
ths3b46e622007-09-17 08:09:54 +00003816
bellard13eb76e2004-01-24 15:23:36 +00003817 while (len > 0) {
3818 page = addr & TARGET_PAGE_MASK;
3819 l = (page + TARGET_PAGE_SIZE) - addr;
3820 if (l > len)
3821 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003822 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003823
bellard13eb76e2004-01-24 15:23:36 +00003824 if (is_write) {
Avi Kivity06ef3522012-02-13 16:11:22 +02003825 if (!memory_region_is_ram(section.mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003826 target_phys_addr_t addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003827 io_index = memory_region_get_ram_addr(section.mr)
3828 & (IO_MEM_NB_ENTRIES - 1);
3829 addr1 = (addr & ~TARGET_PAGE_MASK)
3830 + section.offset_within_region;
bellard6a00d602005-11-21 23:25:50 +00003831 /* XXX: could force cpu_single_env to NULL to avoid
3832 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003833 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003834 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003835 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003836 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003837 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003838 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003839 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003840 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003841 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003842 l = 2;
3843 } else {
bellard1c213d12005-09-03 10:49:04 +00003844 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003845 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003846 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003847 l = 1;
3848 }
Avi Kivity06ef3522012-02-13 16:11:22 +02003849 } else if (!section.readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003850 ram_addr_t addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003851 addr1 = (memory_region_get_ram_addr(section.mr)
3852 + section.offset_within_region)
3853 | (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003854 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003855 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003856 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003857 if (!cpu_physical_memory_is_dirty(addr1)) {
3858 /* invalidate code */
3859 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3860 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003861 cpu_physical_memory_set_dirty_flags(
3862 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003863 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003864 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003865 }
3866 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02003867 if (!is_ram_rom_romd(&section)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003868 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003869 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02003870 io_index = memory_region_get_ram_addr(section.mr)
3871 & (IO_MEM_NB_ENTRIES - 1);
3872 addr1 = (addr & ~TARGET_PAGE_MASK)
3873 + section.offset_within_region;
aurel326c2934d2009-02-18 21:37:17 +00003874 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003875 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003876 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003877 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003878 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003879 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003880 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003881 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003882 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003883 l = 2;
3884 } else {
bellard1c213d12005-09-03 10:49:04 +00003885 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003886 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003887 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003888 l = 1;
3889 }
3890 } else {
3891 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02003892 ptr = qemu_get_ram_ptr(section.mr->ram_addr
3893 + section.offset_within_region);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003894 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3895 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003896 }
3897 }
3898 len -= l;
3899 buf += l;
3900 addr += l;
3901 }
3902}
bellard8df1cd02005-01-28 22:37:22 +00003903
bellardd0ecd2a2006-04-23 17:14:48 +00003904/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003905void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003906 const uint8_t *buf, int len)
3907{
3908 int l;
3909 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003910 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003911 MemoryRegionSection section;
ths3b46e622007-09-17 08:09:54 +00003912
bellardd0ecd2a2006-04-23 17:14:48 +00003913 while (len > 0) {
3914 page = addr & TARGET_PAGE_MASK;
3915 l = (page + TARGET_PAGE_SIZE) - addr;
3916 if (l > len)
3917 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003918 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003919
Avi Kivity06ef3522012-02-13 16:11:22 +02003920 if (!is_ram_rom_romd(&section)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003921 /* do nothing */
3922 } else {
3923 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003924 addr1 = (memory_region_get_ram_addr(section.mr)
3925 + section.offset_within_region)
3926 + (addr & ~TARGET_PAGE_MASK);
bellardd0ecd2a2006-04-23 17:14:48 +00003927 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003928 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003929 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003930 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003931 }
3932 len -= l;
3933 buf += l;
3934 addr += l;
3935 }
3936}
3937
aliguori6d16c2f2009-01-22 16:59:11 +00003938typedef struct {
3939 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003940 target_phys_addr_t addr;
3941 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003942} BounceBuffer;
3943
3944static BounceBuffer bounce;
3945
aliguoriba223c22009-01-22 16:59:16 +00003946typedef struct MapClient {
3947 void *opaque;
3948 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003949 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003950} MapClient;
3951
Blue Swirl72cf2d42009-09-12 07:36:22 +00003952static QLIST_HEAD(map_client_list, MapClient) map_client_list
3953 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003954
3955void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3956{
Anthony Liguori7267c092011-08-20 22:09:37 -05003957 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003958
3959 client->opaque = opaque;
3960 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003961 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003962 return client;
3963}
3964
3965void cpu_unregister_map_client(void *_client)
3966{
3967 MapClient *client = (MapClient *)_client;
3968
Blue Swirl72cf2d42009-09-12 07:36:22 +00003969 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003970 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003971}
3972
3973static void cpu_notify_map_clients(void)
3974{
3975 MapClient *client;
3976
Blue Swirl72cf2d42009-09-12 07:36:22 +00003977 while (!QLIST_EMPTY(&map_client_list)) {
3978 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003979 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003980 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003981 }
3982}
3983
aliguori6d16c2f2009-01-22 16:59:11 +00003984/* Map a physical memory region into a host virtual address.
3985 * May map a subset of the requested range, given by and returned in *plen.
3986 * May return NULL if resources needed to perform the mapping are exhausted.
3987 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003988 * Use cpu_register_map_client() to know when retrying the map operation is
3989 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003990 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003991void *cpu_physical_memory_map(target_phys_addr_t addr,
3992 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003993 int is_write)
3994{
Anthony Liguoric227f092009-10-01 16:12:16 -05003995 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003996 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003997 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003998 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003999 MemoryRegionSection section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00004000 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004001 ram_addr_t rlen;
4002 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004003
4004 while (len > 0) {
4005 page = addr & TARGET_PAGE_MASK;
4006 l = (page + TARGET_PAGE_SIZE) - addr;
4007 if (l > len)
4008 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02004009 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00004010
Avi Kivity06ef3522012-02-13 16:11:22 +02004011 if (!(memory_region_is_ram(section.mr) && !section.readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004012 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004013 break;
4014 }
4015 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4016 bounce.addr = addr;
4017 bounce.len = l;
4018 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004019 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004020 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004021
4022 *plen = l;
4023 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004024 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004025 if (!todo) {
Avi Kivity06ef3522012-02-13 16:11:22 +02004026 raddr = memory_region_get_ram_addr(section.mr)
4027 + section.offset_within_region
4028 + (addr & ~TARGET_PAGE_MASK);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004029 }
aliguori6d16c2f2009-01-22 16:59:11 +00004030
4031 len -= l;
4032 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004033 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004034 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004035 rlen = todo;
4036 ret = qemu_ram_ptr_length(raddr, &rlen);
4037 *plen = rlen;
4038 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004039}
4040
4041/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4042 * Will also mark the memory as dirty if is_write == 1. access_len gives
4043 * the amount of memory that was actually read or written by the caller.
4044 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004045void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4046 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004047{
4048 if (buffer != bounce.buffer) {
4049 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004050 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004051 while (access_len) {
4052 unsigned l;
4053 l = TARGET_PAGE_SIZE;
4054 if (l > access_len)
4055 l = access_len;
4056 if (!cpu_physical_memory_is_dirty(addr1)) {
4057 /* invalidate code */
4058 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4059 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004060 cpu_physical_memory_set_dirty_flags(
4061 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004062 }
4063 addr1 += l;
4064 access_len -= l;
4065 }
4066 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004067 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004068 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004069 }
aliguori6d16c2f2009-01-22 16:59:11 +00004070 return;
4071 }
4072 if (is_write) {
4073 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4074 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004075 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004076 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004077 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004078}
bellardd0ecd2a2006-04-23 17:14:48 +00004079
bellard8df1cd02005-01-28 22:37:22 +00004080/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004081static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4082 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004083{
4084 int io_index;
4085 uint8_t *ptr;
4086 uint32_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004087 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004088
Avi Kivity06ef3522012-02-13 16:11:22 +02004089 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004090
Avi Kivity06ef3522012-02-13 16:11:22 +02004091 if (!is_ram_rom_romd(&section)) {
bellard8df1cd02005-01-28 22:37:22 +00004092 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004093 io_index = memory_region_get_ram_addr(section.mr)
4094 & (IO_MEM_NB_ENTRIES - 1);
4095 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004096 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004097#if defined(TARGET_WORDS_BIGENDIAN)
4098 if (endian == DEVICE_LITTLE_ENDIAN) {
4099 val = bswap32(val);
4100 }
4101#else
4102 if (endian == DEVICE_BIG_ENDIAN) {
4103 val = bswap32(val);
4104 }
4105#endif
bellard8df1cd02005-01-28 22:37:22 +00004106 } else {
4107 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004108 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4109 & TARGET_PAGE_MASK)
4110 + section.offset_within_region) +
bellard8df1cd02005-01-28 22:37:22 +00004111 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004112 switch (endian) {
4113 case DEVICE_LITTLE_ENDIAN:
4114 val = ldl_le_p(ptr);
4115 break;
4116 case DEVICE_BIG_ENDIAN:
4117 val = ldl_be_p(ptr);
4118 break;
4119 default:
4120 val = ldl_p(ptr);
4121 break;
4122 }
bellard8df1cd02005-01-28 22:37:22 +00004123 }
4124 return val;
4125}
4126
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004127uint32_t ldl_phys(target_phys_addr_t addr)
4128{
4129 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4130}
4131
4132uint32_t ldl_le_phys(target_phys_addr_t addr)
4133{
4134 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4135}
4136
4137uint32_t ldl_be_phys(target_phys_addr_t addr)
4138{
4139 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4140}
4141
bellard84b7b8e2005-11-28 21:19:04 +00004142/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004143static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4144 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004145{
4146 int io_index;
4147 uint8_t *ptr;
4148 uint64_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004149 MemoryRegionSection section;
bellard84b7b8e2005-11-28 21:19:04 +00004150
Avi Kivity06ef3522012-02-13 16:11:22 +02004151 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004152
Avi Kivity06ef3522012-02-13 16:11:22 +02004153 if (!is_ram_rom_romd(&section)) {
bellard84b7b8e2005-11-28 21:19:04 +00004154 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004155 io_index = memory_region_get_ram_addr(section.mr)
4156 & (IO_MEM_NB_ENTRIES - 1);
4157 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004158
4159 /* XXX This is broken when device endian != cpu endian.
4160 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004161#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004162 val = io_mem_read(io_index, addr, 4) << 32;
4163 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004164#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004165 val = io_mem_read(io_index, addr, 4);
4166 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004167#endif
4168 } else {
4169 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004170 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4171 & TARGET_PAGE_MASK)
4172 + section.offset_within_region)
4173 + (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004174 switch (endian) {
4175 case DEVICE_LITTLE_ENDIAN:
4176 val = ldq_le_p(ptr);
4177 break;
4178 case DEVICE_BIG_ENDIAN:
4179 val = ldq_be_p(ptr);
4180 break;
4181 default:
4182 val = ldq_p(ptr);
4183 break;
4184 }
bellard84b7b8e2005-11-28 21:19:04 +00004185 }
4186 return val;
4187}
4188
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004189uint64_t ldq_phys(target_phys_addr_t addr)
4190{
4191 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4192}
4193
4194uint64_t ldq_le_phys(target_phys_addr_t addr)
4195{
4196 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4197}
4198
4199uint64_t ldq_be_phys(target_phys_addr_t addr)
4200{
4201 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4202}
4203
bellardaab33092005-10-30 20:48:42 +00004204/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004205uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004206{
4207 uint8_t val;
4208 cpu_physical_memory_read(addr, &val, 1);
4209 return val;
4210}
4211
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004212/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004213static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4214 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004215{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004216 int io_index;
4217 uint8_t *ptr;
4218 uint64_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004219 MemoryRegionSection section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004220
Avi Kivity06ef3522012-02-13 16:11:22 +02004221 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004222
Avi Kivity06ef3522012-02-13 16:11:22 +02004223 if (!is_ram_rom_romd(&section)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004224 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004225 io_index = memory_region_get_ram_addr(section.mr)
4226 & (IO_MEM_NB_ENTRIES - 1);
4227 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004228 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004229#if defined(TARGET_WORDS_BIGENDIAN)
4230 if (endian == DEVICE_LITTLE_ENDIAN) {
4231 val = bswap16(val);
4232 }
4233#else
4234 if (endian == DEVICE_BIG_ENDIAN) {
4235 val = bswap16(val);
4236 }
4237#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004238 } else {
4239 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004240 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4241 & TARGET_PAGE_MASK)
4242 + section.offset_within_region)
4243 + (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004244 switch (endian) {
4245 case DEVICE_LITTLE_ENDIAN:
4246 val = lduw_le_p(ptr);
4247 break;
4248 case DEVICE_BIG_ENDIAN:
4249 val = lduw_be_p(ptr);
4250 break;
4251 default:
4252 val = lduw_p(ptr);
4253 break;
4254 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004255 }
4256 return val;
bellardaab33092005-10-30 20:48:42 +00004257}
4258
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004259uint32_t lduw_phys(target_phys_addr_t addr)
4260{
4261 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4262}
4263
4264uint32_t lduw_le_phys(target_phys_addr_t addr)
4265{
4266 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4267}
4268
4269uint32_t lduw_be_phys(target_phys_addr_t addr)
4270{
4271 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4272}
4273
bellard8df1cd02005-01-28 22:37:22 +00004274/* warning: addr must be aligned. The ram page is not masked as dirty
4275 and the code inside is not invalidated. It is useful if the dirty
4276 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004277void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004278{
4279 int io_index;
4280 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004281 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004282
Avi Kivity06ef3522012-02-13 16:11:22 +02004283 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004284
Avi Kivity06ef3522012-02-13 16:11:22 +02004285 if (!memory_region_is_ram(section.mr) || section.readonly) {
4286 if (memory_region_is_ram(section.mr)) {
4287 io_index = io_mem_rom.ram_addr;
4288 } else {
4289 io_index = memory_region_get_ram_addr(section.mr);
4290 }
4291 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004292 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004293 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02004294 unsigned long addr1 = (memory_region_get_ram_addr(section.mr)
4295 & TARGET_PAGE_MASK)
4296 + section.offset_within_region
4297 + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004298 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004299 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004300
4301 if (unlikely(in_migration)) {
4302 if (!cpu_physical_memory_is_dirty(addr1)) {
4303 /* invalidate code */
4304 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4305 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004306 cpu_physical_memory_set_dirty_flags(
4307 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004308 }
4309 }
bellard8df1cd02005-01-28 22:37:22 +00004310 }
4311}
4312
Anthony Liguoric227f092009-10-01 16:12:16 -05004313void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004314{
4315 int io_index;
4316 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004317 MemoryRegionSection section;
j_mayerbc98a7e2007-04-04 07:55:12 +00004318
Avi Kivity06ef3522012-02-13 16:11:22 +02004319 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004320
Avi Kivity06ef3522012-02-13 16:11:22 +02004321 if (!memory_region_is_ram(section.mr) || section.readonly) {
4322 if (memory_region_is_ram(section.mr)) {
4323 io_index = io_mem_rom.ram_addr;
4324 } else {
4325 io_index = memory_region_get_ram_addr(section.mr)
4326 & (IO_MEM_NB_ENTRIES - 1);
4327 }
4328 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
j_mayerbc98a7e2007-04-04 07:55:12 +00004329#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004330 io_mem_write(io_index, addr, val >> 32, 4);
4331 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004332#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004333 io_mem_write(io_index, addr, (uint32_t)val, 4);
4334 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004335#endif
4336 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02004337 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4338 & TARGET_PAGE_MASK)
4339 + section.offset_within_region)
4340 + (addr & ~TARGET_PAGE_MASK);
j_mayerbc98a7e2007-04-04 07:55:12 +00004341 stq_p(ptr, val);
4342 }
4343}
4344
bellard8df1cd02005-01-28 22:37:22 +00004345/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004346static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4347 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004348{
4349 int io_index;
4350 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004351 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004352
Avi Kivity06ef3522012-02-13 16:11:22 +02004353 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004354
Avi Kivity06ef3522012-02-13 16:11:22 +02004355 if (!memory_region_is_ram(section.mr) || section.readonly) {
4356 if (memory_region_is_ram(section.mr)) {
4357 io_index = io_mem_rom.ram_addr;
4358 } else {
4359 io_index = memory_region_get_ram_addr(section.mr)
4360 & (IO_MEM_NB_ENTRIES - 1);
4361 }
4362 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004363#if defined(TARGET_WORDS_BIGENDIAN)
4364 if (endian == DEVICE_LITTLE_ENDIAN) {
4365 val = bswap32(val);
4366 }
4367#else
4368 if (endian == DEVICE_BIG_ENDIAN) {
4369 val = bswap32(val);
4370 }
4371#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004372 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004373 } else {
4374 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02004375 addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
4376 + section.offset_within_region
4377 + (addr & ~TARGET_PAGE_MASK);
bellard8df1cd02005-01-28 22:37:22 +00004378 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004379 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004380 switch (endian) {
4381 case DEVICE_LITTLE_ENDIAN:
4382 stl_le_p(ptr, val);
4383 break;
4384 case DEVICE_BIG_ENDIAN:
4385 stl_be_p(ptr, val);
4386 break;
4387 default:
4388 stl_p(ptr, val);
4389 break;
4390 }
bellard3a7d9292005-08-21 09:26:42 +00004391 if (!cpu_physical_memory_is_dirty(addr1)) {
4392 /* invalidate code */
4393 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4394 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004395 cpu_physical_memory_set_dirty_flags(addr1,
4396 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004397 }
bellard8df1cd02005-01-28 22:37:22 +00004398 }
4399}
4400
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004401void stl_phys(target_phys_addr_t addr, uint32_t val)
4402{
4403 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4404}
4405
4406void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4407{
4408 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4409}
4410
4411void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4412{
4413 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4414}
4415
bellardaab33092005-10-30 20:48:42 +00004416/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004417void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004418{
4419 uint8_t v = val;
4420 cpu_physical_memory_write(addr, &v, 1);
4421}
4422
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004423/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004424static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4425 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004426{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004427 int io_index;
4428 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004429 MemoryRegionSection section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004430
Avi Kivity06ef3522012-02-13 16:11:22 +02004431 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004432
Avi Kivity06ef3522012-02-13 16:11:22 +02004433 if (!memory_region_is_ram(section.mr) || section.readonly) {
4434 if (memory_region_is_ram(section.mr)) {
4435 io_index = io_mem_rom.ram_addr;
4436 } else {
4437 io_index = memory_region_get_ram_addr(section.mr)
4438 & (IO_MEM_NB_ENTRIES - 1);
4439 }
4440 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004441#if defined(TARGET_WORDS_BIGENDIAN)
4442 if (endian == DEVICE_LITTLE_ENDIAN) {
4443 val = bswap16(val);
4444 }
4445#else
4446 if (endian == DEVICE_BIG_ENDIAN) {
4447 val = bswap16(val);
4448 }
4449#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004450 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004451 } else {
4452 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02004453 addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
4454 + section.offset_within_region + (addr & ~TARGET_PAGE_MASK);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004455 /* RAM case */
4456 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004457 switch (endian) {
4458 case DEVICE_LITTLE_ENDIAN:
4459 stw_le_p(ptr, val);
4460 break;
4461 case DEVICE_BIG_ENDIAN:
4462 stw_be_p(ptr, val);
4463 break;
4464 default:
4465 stw_p(ptr, val);
4466 break;
4467 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004468 if (!cpu_physical_memory_is_dirty(addr1)) {
4469 /* invalidate code */
4470 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4471 /* set dirty bit */
4472 cpu_physical_memory_set_dirty_flags(addr1,
4473 (0xff & ~CODE_DIRTY_FLAG));
4474 }
4475 }
bellardaab33092005-10-30 20:48:42 +00004476}
4477
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004478void stw_phys(target_phys_addr_t addr, uint32_t val)
4479{
4480 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4481}
4482
4483void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4484{
4485 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4486}
4487
4488void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4489{
4490 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4491}
4492
bellardaab33092005-10-30 20:48:42 +00004493/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004494void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004495{
4496 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004497 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004498}
4499
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004500void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4501{
4502 val = cpu_to_le64(val);
4503 cpu_physical_memory_write(addr, &val, 8);
4504}
4505
4506void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4507{
4508 val = cpu_to_be64(val);
4509 cpu_physical_memory_write(addr, &val, 8);
4510}
4511
aliguori5e2972f2009-03-28 17:51:36 +00004512/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004513int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004514 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004515{
4516 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004517 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004518 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004519
4520 while (len > 0) {
4521 page = addr & TARGET_PAGE_MASK;
4522 phys_addr = cpu_get_phys_page_debug(env, page);
4523 /* if no physical page mapped, return an error */
4524 if (phys_addr == -1)
4525 return -1;
4526 l = (page + TARGET_PAGE_SIZE) - addr;
4527 if (l > len)
4528 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004529 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004530 if (is_write)
4531 cpu_physical_memory_write_rom(phys_addr, buf, l);
4532 else
aliguori5e2972f2009-03-28 17:51:36 +00004533 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004534 len -= l;
4535 buf += l;
4536 addr += l;
4537 }
4538 return 0;
4539}
Paul Brooka68fe892010-03-01 00:08:59 +00004540#endif
bellard13eb76e2004-01-24 15:23:36 +00004541
pbrook2e70f6e2008-06-29 01:03:05 +00004542/* in deterministic execution mode, instructions doing device I/Os
4543 must be at the end of the TB */
4544void cpu_io_recompile(CPUState *env, void *retaddr)
4545{
4546 TranslationBlock *tb;
4547 uint32_t n, cflags;
4548 target_ulong pc, cs_base;
4549 uint64_t flags;
4550
4551 tb = tb_find_pc((unsigned long)retaddr);
4552 if (!tb) {
4553 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4554 retaddr);
4555 }
4556 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004557 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004558 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004559 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004560 n = n - env->icount_decr.u16.low;
4561 /* Generate a new TB ending on the I/O insn. */
4562 n++;
4563 /* On MIPS and SH, delay slot instructions can only be restarted if
4564 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004565 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004566 branch. */
4567#if defined(TARGET_MIPS)
4568 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4569 env->active_tc.PC -= 4;
4570 env->icount_decr.u16.low++;
4571 env->hflags &= ~MIPS_HFLAG_BMASK;
4572 }
4573#elif defined(TARGET_SH4)
4574 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4575 && n > 1) {
4576 env->pc -= 2;
4577 env->icount_decr.u16.low++;
4578 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4579 }
4580#endif
4581 /* This should never happen. */
4582 if (n > CF_COUNT_MASK)
4583 cpu_abort(env, "TB too big during recompile");
4584
4585 cflags = n | CF_LAST_IO;
4586 pc = tb->pc;
4587 cs_base = tb->cs_base;
4588 flags = tb->flags;
4589 tb_phys_invalidate(tb, -1);
4590 /* FIXME: In theory this could raise an exception. In practice
4591 we have already translated the block once so it's probably ok. */
4592 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004593 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004594 the first in the TB) then we end up generating a whole new TB and
4595 repeating the fault, which is horribly inefficient.
4596 Better would be to execute just this insn uncached, or generate a
4597 second new TB. */
4598 cpu_resume_from_signal(env, NULL);
4599}
4600
Paul Brookb3755a92010-03-12 16:54:58 +00004601#if !defined(CONFIG_USER_ONLY)
4602
Stefan Weil055403b2010-10-22 23:03:32 +02004603void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004604{
4605 int i, target_code_size, max_target_code_size;
4606 int direct_jmp_count, direct_jmp2_count, cross_page;
4607 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004608
bellarde3db7222005-01-26 22:00:47 +00004609 target_code_size = 0;
4610 max_target_code_size = 0;
4611 cross_page = 0;
4612 direct_jmp_count = 0;
4613 direct_jmp2_count = 0;
4614 for(i = 0; i < nb_tbs; i++) {
4615 tb = &tbs[i];
4616 target_code_size += tb->size;
4617 if (tb->size > max_target_code_size)
4618 max_target_code_size = tb->size;
4619 if (tb->page_addr[1] != -1)
4620 cross_page++;
4621 if (tb->tb_next_offset[0] != 0xffff) {
4622 direct_jmp_count++;
4623 if (tb->tb_next_offset[1] != 0xffff) {
4624 direct_jmp2_count++;
4625 }
4626 }
4627 }
4628 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004629 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004630 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004631 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4632 cpu_fprintf(f, "TB count %d/%d\n",
4633 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004634 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004635 nb_tbs ? target_code_size / nb_tbs : 0,
4636 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004637 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004638 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4639 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004640 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4641 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004642 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4643 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004644 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004645 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4646 direct_jmp2_count,
4647 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004648 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004649 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4650 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4651 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004652 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004653}
4654
Avi Kivityd39e8222012-01-01 23:35:10 +02004655/* NOTE: this function can trigger an exception */
4656/* NOTE2: the returned address is not exactly the physical address: it
4657 is the offset relative to phys_ram_base */
4658tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4659{
4660 int mmu_idx, page_index, pd;
4661 void *p;
4662
4663 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4664 mmu_idx = cpu_mmu_index(env1);
4665 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4666 (addr & TARGET_PAGE_MASK))) {
4667 ldub_code(addr);
4668 }
4669 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004670 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
Avi Kivity06ef3522012-02-13 16:11:22 +02004671 && !io_mem_region[pd]->rom_device) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004672#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4673 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4674#else
4675 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4676#endif
4677 }
4678 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4679 return qemu_ram_addr_from_host_nofail(p);
4680}
4681
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004682/*
4683 * A helper function for the _utterly broken_ virtio device model to find out if
4684 * it's running on a big endian machine. Don't do this at home kids!
4685 */
4686bool virtio_is_big_endian(void);
4687bool virtio_is_big_endian(void)
4688{
4689#if defined(TARGET_WORDS_BIGENDIAN)
4690 return true;
4691#else
4692 return false;
4693#endif
4694}
4695
bellard61382a52003-10-27 21:22:23 +00004696#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004697#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004698#define GETPC() NULL
4699#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004700#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004701
4702#define SHIFT 0
4703#include "softmmu_template.h"
4704
4705#define SHIFT 1
4706#include "softmmu_template.h"
4707
4708#define SHIFT 2
4709#include "softmmu_template.h"
4710
4711#define SHIFT 3
4712#include "softmmu_template.h"
4713
4714#undef env
4715
4716#endif