blob: bf34dc9e43f2eda10cce4292a5e05cf2db0109b4 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Avi Kivity3eef53d2012-02-10 14:57:31 +0200163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200189typedef struct PhysPageEntry PhysPageEntry;
190
Avi Kivity5312bd82012-02-12 18:32:55 +0200191static MemoryRegionSection *phys_sections;
192static unsigned phys_sections_nb, phys_sections_nb_alloc;
193static uint16_t phys_section_unassigned;
194
Avi Kivity4346ae32012-02-10 17:00:01 +0200195struct PhysPageEntry {
196 union {
Avi Kivity5312bd82012-02-12 18:32:55 +0200197 uint16_t leaf; /* index into phys_sections */
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200198 uint16_t node; /* index into phys_map_nodes */
Avi Kivity4346ae32012-02-10 17:00:01 +0200199 } u;
200};
201
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202/* Simple allocator for PhysPageEntry nodes */
203static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
204static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
205
206#define PHYS_MAP_NODE_NIL ((uint16_t)~0)
207
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800208/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200209 The bottom level has pointers to MemoryRegionSections. */
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200210static PhysPageEntry phys_map = { .u.node = PHYS_MAP_NODE_NIL };
Paul Brook6d9a1302010-02-28 23:55:53 +0000211
pbrooke2eef172008-06-08 01:09:01 +0000212static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300213static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000214
bellard33417e72003-08-10 21:47:01 +0000215/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200216MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000217static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200218static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000219#endif
bellard33417e72003-08-10 21:47:01 +0000220
bellard34865132003-10-05 14:28:56 +0000221/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200222#ifdef WIN32
223static const char *logfilename = "qemu.log";
224#else
blueswir1d9b630f2008-10-05 09:57:08 +0000225static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200226#endif
bellard34865132003-10-05 14:28:56 +0000227FILE *logfile;
228int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000229static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000230
bellarde3db7222005-01-26 22:00:47 +0000231/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000232#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000233static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000234#endif
bellarde3db7222005-01-26 22:00:47 +0000235static int tb_flush_count;
236static int tb_phys_invalidate_count;
237
bellard7cb69ca2008-05-10 10:55:51 +0000238#ifdef _WIN32
239static void map_exec(void *addr, long size)
240{
241 DWORD old_protect;
242 VirtualProtect(addr, size,
243 PAGE_EXECUTE_READWRITE, &old_protect);
244
245}
246#else
247static void map_exec(void *addr, long size)
248{
bellard43694152008-05-29 09:35:57 +0000249 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000250
bellard43694152008-05-29 09:35:57 +0000251 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000252 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000253 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000254
255 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000256 end += page_size - 1;
257 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000258
259 mprotect((void *)start, end - start,
260 PROT_READ | PROT_WRITE | PROT_EXEC);
261}
262#endif
263
bellardb346ff42003-06-15 20:05:50 +0000264static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000265{
bellard83fb7ad2004-07-05 21:25:26 +0000266 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000267 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000268#ifdef _WIN32
269 {
270 SYSTEM_INFO system_info;
271
272 GetSystemInfo(&system_info);
273 qemu_real_host_page_size = system_info.dwPageSize;
274 }
275#else
276 qemu_real_host_page_size = getpagesize();
277#endif
bellard83fb7ad2004-07-05 21:25:26 +0000278 if (qemu_host_page_size == 0)
279 qemu_host_page_size = qemu_real_host_page_size;
280 if (qemu_host_page_size < TARGET_PAGE_SIZE)
281 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000282 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000283
Paul Brook2e9a5712010-05-05 16:32:59 +0100284#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000285 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100286#ifdef HAVE_KINFO_GETVMMAP
287 struct kinfo_vmentry *freep;
288 int i, cnt;
289
290 freep = kinfo_getvmmap(getpid(), &cnt);
291 if (freep) {
292 mmap_lock();
293 for (i = 0; i < cnt; i++) {
294 unsigned long startaddr, endaddr;
295
296 startaddr = freep[i].kve_start;
297 endaddr = freep[i].kve_end;
298 if (h2g_valid(startaddr)) {
299 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
300
301 if (h2g_valid(endaddr)) {
302 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200303 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100304 } else {
305#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
306 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200307 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100308#endif
309 }
310 }
311 }
312 free(freep);
313 mmap_unlock();
314 }
315#else
balrog50a95692007-12-12 01:16:23 +0000316 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000317
pbrook07765902008-05-31 16:33:53 +0000318 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800319
Aurelien Jarnofd436902010-04-10 17:20:36 +0200320 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000321 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800322 mmap_lock();
323
balrog50a95692007-12-12 01:16:23 +0000324 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800325 unsigned long startaddr, endaddr;
326 int n;
327
328 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
329
330 if (n == 2 && h2g_valid(startaddr)) {
331 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
332
333 if (h2g_valid(endaddr)) {
334 endaddr = h2g(endaddr);
335 } else {
336 endaddr = ~0ul;
337 }
338 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000339 }
340 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800341
balrog50a95692007-12-12 01:16:23 +0000342 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800343 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000344 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100345#endif
balrog50a95692007-12-12 01:16:23 +0000346 }
347#endif
bellard54936002003-05-13 00:25:15 +0000348}
349
Paul Brook41c1b1c2010-03-12 16:54:58 +0000350static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000351{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000352 PageDesc *pd;
353 void **lp;
354 int i;
355
pbrook17e23772008-06-09 13:47:45 +0000356#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500357 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800358# define ALLOC(P, SIZE) \
359 do { \
360 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
361 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000363#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800364# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500365 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000366#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800367
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800368 /* Level 1. Always allocated. */
369 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
370
371 /* Level 2..N-1. */
372 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
373 void **p = *lp;
374
375 if (p == NULL) {
376 if (!alloc) {
377 return NULL;
378 }
379 ALLOC(p, sizeof(void *) * L2_SIZE);
380 *lp = p;
381 }
382
383 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000384 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800385
386 pd = *lp;
387 if (pd == NULL) {
388 if (!alloc) {
389 return NULL;
390 }
391 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
392 *lp = pd;
393 }
394
395#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800396
397 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000398}
399
Paul Brook41c1b1c2010-03-12 16:54:58 +0000400static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000401{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800402 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000403}
404
Paul Brook6d9a1302010-02-28 23:55:53 +0000405#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200406
407static PhysPageEntry *phys_map_node_alloc(uint16_t *ptr)
408{
409 unsigned i;
410 uint16_t ret;
411
412 /* Assign early to avoid the pointer being invalidated by g_renew() */
413 *ptr = ret = phys_map_nodes_nb++;
414 assert(ret != PHYS_MAP_NODE_NIL);
415 if (ret == phys_map_nodes_nb_alloc) {
416 typedef PhysPageEntry Node[L2_SIZE];
417 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
418 phys_map_nodes = g_renew(Node, phys_map_nodes,
419 phys_map_nodes_nb_alloc);
420 }
421 for (i = 0; i < L2_SIZE; ++i) {
422 phys_map_nodes[ret][i].u.node = PHYS_MAP_NODE_NIL;
423 }
424 return phys_map_nodes[ret];
425}
426
427static void phys_map_nodes_reset(void)
428{
429 phys_map_nodes_nb = 0;
430}
431
Avi Kivity5312bd82012-02-12 18:32:55 +0200432static uint16_t *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000433{
Avi Kivity4346ae32012-02-10 17:00:01 +0200434 PhysPageEntry *lp, *p;
435 int i, j;
bellard92e873b2004-05-21 14:52:29 +0000436
Avi Kivity3eef53d2012-02-10 14:57:31 +0200437 lp = &phys_map;
bellard108c49b2005-07-24 12:55:09 +0000438
Avi Kivity4346ae32012-02-10 17:00:01 +0200439 /* Level 1..N. */
440 for (i = P_L2_LEVELS - 1; i >= 0; i--) {
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200441 if (lp->u.node == PHYS_MAP_NODE_NIL) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800442 if (!alloc) {
443 return NULL;
444 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200445 p = phys_map_node_alloc(&lp->u.node);
Avi Kivity4346ae32012-02-10 17:00:01 +0200446 if (i == 0) {
Avi Kivity4346ae32012-02-10 17:00:01 +0200447 for (j = 0; j < L2_SIZE; j++) {
Avi Kivity5312bd82012-02-12 18:32:55 +0200448 p[j].u.leaf = phys_section_unassigned;
Avi Kivity4346ae32012-02-10 17:00:01 +0200449 }
450 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200451 } else {
452 p = phys_map_nodes[lp->u.node];
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800453 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200454 lp = &p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
bellard108c49b2005-07-24 12:55:09 +0000455 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800456
Avi Kivity4346ae32012-02-10 17:00:01 +0200457 return &lp->u.leaf;
bellard92e873b2004-05-21 14:52:29 +0000458}
459
Avi Kivity06ef3522012-02-13 16:11:22 +0200460static MemoryRegionSection phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000461{
Avi Kivity5312bd82012-02-12 18:32:55 +0200462 uint16_t *p = phys_page_find_alloc(index, 0);
463 uint16_t s_index = phys_section_unassigned;
Avi Kivity06ef3522012-02-13 16:11:22 +0200464 MemoryRegionSection section;
465 target_phys_addr_t delta;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200466
467 if (p) {
Avi Kivity5312bd82012-02-12 18:32:55 +0200468 s_index = *p;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200469 }
Avi Kivity06ef3522012-02-13 16:11:22 +0200470 section = phys_sections[s_index];
Avi Kivity5312bd82012-02-12 18:32:55 +0200471 index <<= TARGET_PAGE_BITS;
Avi Kivity06ef3522012-02-13 16:11:22 +0200472 assert(section.offset_within_address_space <= index
473 && index <= section.offset_within_address_space + section.size-1);
474 delta = index - section.offset_within_address_space;
475 section.offset_within_address_space += delta;
476 section.offset_within_region += delta;
477 section.size -= delta;
478 return section;
bellard92e873b2004-05-21 14:52:29 +0000479}
480
Anthony Liguoric227f092009-10-01 16:12:16 -0500481static void tlb_protect_code(ram_addr_t ram_addr);
482static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000483 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000484#define mmap_lock() do { } while(0)
485#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000486#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000487
bellard43694152008-05-29 09:35:57 +0000488#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
489
490#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100491/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000492 user mode. It will change when a dedicated libc will be used */
493#define USE_STATIC_CODE_GEN_BUFFER
494#endif
495
496#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200497static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
498 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000499#endif
500
blueswir18fcd3692008-08-17 20:26:25 +0000501static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000502{
bellard43694152008-05-29 09:35:57 +0000503#ifdef USE_STATIC_CODE_GEN_BUFFER
504 code_gen_buffer = static_code_gen_buffer;
505 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
506 map_exec(code_gen_buffer, code_gen_buffer_size);
507#else
bellard26a5f132008-05-28 12:30:31 +0000508 code_gen_buffer_size = tb_size;
509 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000510#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000511 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
512#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100513 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000514 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000515#endif
bellard26a5f132008-05-28 12:30:31 +0000516 }
517 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
518 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
519 /* The code gen buffer location may have constraints depending on
520 the host cpu and OS */
521#if defined(__linux__)
522 {
523 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000524 void *start = NULL;
525
bellard26a5f132008-05-28 12:30:31 +0000526 flags = MAP_PRIVATE | MAP_ANONYMOUS;
527#if defined(__x86_64__)
528 flags |= MAP_32BIT;
529 /* Cannot map more than that */
530 if (code_gen_buffer_size > (800 * 1024 * 1024))
531 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000532#elif defined(__sparc_v9__)
533 // Map the buffer below 2G, so we can use direct calls and branches
534 flags |= MAP_FIXED;
535 start = (void *) 0x60000000UL;
536 if (code_gen_buffer_size > (512 * 1024 * 1024))
537 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000538#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100539 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000540 if (code_gen_buffer_size > 16 * 1024 * 1024)
541 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700542#elif defined(__s390x__)
543 /* Map the buffer so that we can use direct calls and branches. */
544 /* We have a +- 4GB range on the branches; leave some slop. */
545 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
546 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
547 }
548 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000549#endif
blueswir1141ac462008-07-26 15:05:57 +0000550 code_gen_buffer = mmap(start, code_gen_buffer_size,
551 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000552 flags, -1, 0);
553 if (code_gen_buffer == MAP_FAILED) {
554 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
555 exit(1);
556 }
557 }
Bradcbb608a2010-12-20 21:25:40 -0500558#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000559 || defined(__DragonFly__) || defined(__OpenBSD__) \
560 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000561 {
562 int flags;
563 void *addr = NULL;
564 flags = MAP_PRIVATE | MAP_ANONYMOUS;
565#if defined(__x86_64__)
566 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
567 * 0x40000000 is free */
568 flags |= MAP_FIXED;
569 addr = (void *)0x40000000;
570 /* Cannot map more than that */
571 if (code_gen_buffer_size > (800 * 1024 * 1024))
572 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000573#elif defined(__sparc_v9__)
574 // Map the buffer below 2G, so we can use direct calls and branches
575 flags |= MAP_FIXED;
576 addr = (void *) 0x60000000UL;
577 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
578 code_gen_buffer_size = (512 * 1024 * 1024);
579 }
aliguori06e67a82008-09-27 15:32:41 +0000580#endif
581 code_gen_buffer = mmap(addr, code_gen_buffer_size,
582 PROT_WRITE | PROT_READ | PROT_EXEC,
583 flags, -1, 0);
584 if (code_gen_buffer == MAP_FAILED) {
585 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
586 exit(1);
587 }
588 }
bellard26a5f132008-05-28 12:30:31 +0000589#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500590 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000591 map_exec(code_gen_buffer, code_gen_buffer_size);
592#endif
bellard43694152008-05-29 09:35:57 +0000593#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000594 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100595 code_gen_buffer_max_size = code_gen_buffer_size -
596 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000597 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500598 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000599}
600
601/* Must be called before using the QEMU cpus. 'tb_size' is the size
602 (in bytes) allocated to the translation buffer. Zero means default
603 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200604void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000605{
bellard26a5f132008-05-28 12:30:31 +0000606 cpu_gen_init();
607 code_gen_alloc(tb_size);
608 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000609 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700610#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
611 /* There's no guest base to take into account, so go ahead and
612 initialize the prologue now. */
613 tcg_prologue_init(&tcg_ctx);
614#endif
bellard26a5f132008-05-28 12:30:31 +0000615}
616
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200617bool tcg_enabled(void)
618{
619 return code_gen_buffer != NULL;
620}
621
622void cpu_exec_init_all(void)
623{
624#if !defined(CONFIG_USER_ONLY)
625 memory_map_init();
626 io_mem_init();
627#endif
628}
629
pbrook9656f322008-07-01 20:01:19 +0000630#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
631
Juan Quintelae59fb372009-09-29 22:48:21 +0200632static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200633{
634 CPUState *env = opaque;
635
aurel323098dba2009-03-07 21:28:24 +0000636 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
637 version_id is increased. */
638 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000639 tlb_flush(env, 1);
640
641 return 0;
642}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200643
644static const VMStateDescription vmstate_cpu_common = {
645 .name = "cpu_common",
646 .version_id = 1,
647 .minimum_version_id = 1,
648 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200649 .post_load = cpu_common_post_load,
650 .fields = (VMStateField []) {
651 VMSTATE_UINT32(halted, CPUState),
652 VMSTATE_UINT32(interrupt_request, CPUState),
653 VMSTATE_END_OF_LIST()
654 }
655};
pbrook9656f322008-07-01 20:01:19 +0000656#endif
657
Glauber Costa950f1472009-06-09 12:15:18 -0400658CPUState *qemu_get_cpu(int cpu)
659{
660 CPUState *env = first_cpu;
661
662 while (env) {
663 if (env->cpu_index == cpu)
664 break;
665 env = env->next_cpu;
666 }
667
668 return env;
669}
670
bellard6a00d602005-11-21 23:25:50 +0000671void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000672{
bellard6a00d602005-11-21 23:25:50 +0000673 CPUState **penv;
674 int cpu_index;
675
pbrookc2764712009-03-07 15:24:59 +0000676#if defined(CONFIG_USER_ONLY)
677 cpu_list_lock();
678#endif
bellard6a00d602005-11-21 23:25:50 +0000679 env->next_cpu = NULL;
680 penv = &first_cpu;
681 cpu_index = 0;
682 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700683 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000684 cpu_index++;
685 }
686 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000687 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000688 QTAILQ_INIT(&env->breakpoints);
689 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100690#ifndef CONFIG_USER_ONLY
691 env->thread_id = qemu_get_thread_id();
692#endif
bellard6a00d602005-11-21 23:25:50 +0000693 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000694#if defined(CONFIG_USER_ONLY)
695 cpu_list_unlock();
696#endif
pbrookb3c77242008-06-30 16:31:04 +0000697#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600698 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
699 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000700 cpu_save, cpu_load, env);
701#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000702}
703
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100704/* Allocate a new translation block. Flush the translation buffer if
705 too many translation blocks or too much generated code. */
706static TranslationBlock *tb_alloc(target_ulong pc)
707{
708 TranslationBlock *tb;
709
710 if (nb_tbs >= code_gen_max_blocks ||
711 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
712 return NULL;
713 tb = &tbs[nb_tbs++];
714 tb->pc = pc;
715 tb->cflags = 0;
716 return tb;
717}
718
719void tb_free(TranslationBlock *tb)
720{
721 /* In practice this is mostly used for single use temporary TB
722 Ignore the hard cases and just back up if this TB happens to
723 be the last one generated. */
724 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
725 code_gen_ptr = tb->tc_ptr;
726 nb_tbs--;
727 }
728}
729
bellard9fa3e852004-01-04 18:06:42 +0000730static inline void invalidate_page_bitmap(PageDesc *p)
731{
732 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500733 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000734 p->code_bitmap = NULL;
735 }
736 p->code_write_count = 0;
737}
738
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800739/* Set to NULL all the 'first_tb' fields in all PageDescs. */
740
741static void page_flush_tb_1 (int level, void **lp)
742{
743 int i;
744
745 if (*lp == NULL) {
746 return;
747 }
748 if (level == 0) {
749 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000750 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800751 pd[i].first_tb = NULL;
752 invalidate_page_bitmap(pd + i);
753 }
754 } else {
755 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000756 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800757 page_flush_tb_1 (level - 1, pp + i);
758 }
759 }
760}
761
bellardfd6ce8f2003-05-14 19:00:11 +0000762static void page_flush_tb(void)
763{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800764 int i;
765 for (i = 0; i < V_L1_SIZE; i++) {
766 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000767 }
768}
769
770/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000771/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000772void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000773{
bellard6a00d602005-11-21 23:25:50 +0000774 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000775#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000776 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
777 (unsigned long)(code_gen_ptr - code_gen_buffer),
778 nb_tbs, nb_tbs > 0 ?
779 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000780#endif
bellard26a5f132008-05-28 12:30:31 +0000781 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000782 cpu_abort(env1, "Internal error: code buffer overflow\n");
783
bellardfd6ce8f2003-05-14 19:00:11 +0000784 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000785
bellard6a00d602005-11-21 23:25:50 +0000786 for(env = first_cpu; env != NULL; env = env->next_cpu) {
787 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
788 }
bellard9fa3e852004-01-04 18:06:42 +0000789
bellard8a8a6082004-10-03 13:36:49 +0000790 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000791 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000792
bellardfd6ce8f2003-05-14 19:00:11 +0000793 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000794 /* XXX: flush processor icache at this point if cache flush is
795 expensive */
bellarde3db7222005-01-26 22:00:47 +0000796 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000797}
798
799#ifdef DEBUG_TB_CHECK
800
j_mayerbc98a7e2007-04-04 07:55:12 +0000801static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000802{
803 TranslationBlock *tb;
804 int i;
805 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000806 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
807 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000808 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
809 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000810 printf("ERROR invalidate: address=" TARGET_FMT_lx
811 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000812 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000813 }
814 }
815 }
816}
817
818/* verify that all the pages have correct rights for code */
819static void tb_page_check(void)
820{
821 TranslationBlock *tb;
822 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000823
pbrook99773bd2006-04-16 15:14:59 +0000824 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
825 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000826 flags1 = page_get_flags(tb->pc);
827 flags2 = page_get_flags(tb->pc + tb->size - 1);
828 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
829 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000830 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000831 }
832 }
833 }
834}
835
836#endif
837
838/* invalidate one TB */
839static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
840 int next_offset)
841{
842 TranslationBlock *tb1;
843 for(;;) {
844 tb1 = *ptb;
845 if (tb1 == tb) {
846 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
847 break;
848 }
849 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
850 }
851}
852
bellard9fa3e852004-01-04 18:06:42 +0000853static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
854{
855 TranslationBlock *tb1;
856 unsigned int n1;
857
858 for(;;) {
859 tb1 = *ptb;
860 n1 = (long)tb1 & 3;
861 tb1 = (TranslationBlock *)((long)tb1 & ~3);
862 if (tb1 == tb) {
863 *ptb = tb1->page_next[n1];
864 break;
865 }
866 ptb = &tb1->page_next[n1];
867 }
868}
869
bellardd4e81642003-05-25 16:46:15 +0000870static inline void tb_jmp_remove(TranslationBlock *tb, int n)
871{
872 TranslationBlock *tb1, **ptb;
873 unsigned int n1;
874
875 ptb = &tb->jmp_next[n];
876 tb1 = *ptb;
877 if (tb1) {
878 /* find tb(n) in circular list */
879 for(;;) {
880 tb1 = *ptb;
881 n1 = (long)tb1 & 3;
882 tb1 = (TranslationBlock *)((long)tb1 & ~3);
883 if (n1 == n && tb1 == tb)
884 break;
885 if (n1 == 2) {
886 ptb = &tb1->jmp_first;
887 } else {
888 ptb = &tb1->jmp_next[n1];
889 }
890 }
891 /* now we can suppress tb(n) from the list */
892 *ptb = tb->jmp_next[n];
893
894 tb->jmp_next[n] = NULL;
895 }
896}
897
898/* reset the jump entry 'n' of a TB so that it is not chained to
899 another TB */
900static inline void tb_reset_jump(TranslationBlock *tb, int n)
901{
902 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
903}
904
Paul Brook41c1b1c2010-03-12 16:54:58 +0000905void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000906{
bellard6a00d602005-11-21 23:25:50 +0000907 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000908 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000909 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000910 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000911 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000912
bellard9fa3e852004-01-04 18:06:42 +0000913 /* remove the TB from the hash list */
914 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
915 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000916 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000917 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000918
bellard9fa3e852004-01-04 18:06:42 +0000919 /* remove the TB from the page list */
920 if (tb->page_addr[0] != page_addr) {
921 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
922 tb_page_remove(&p->first_tb, tb);
923 invalidate_page_bitmap(p);
924 }
925 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
926 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
927 tb_page_remove(&p->first_tb, tb);
928 invalidate_page_bitmap(p);
929 }
930
bellard8a40a182005-11-20 10:35:40 +0000931 tb_invalidated_flag = 1;
932
933 /* remove the TB from the hash list */
934 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000935 for(env = first_cpu; env != NULL; env = env->next_cpu) {
936 if (env->tb_jmp_cache[h] == tb)
937 env->tb_jmp_cache[h] = NULL;
938 }
bellard8a40a182005-11-20 10:35:40 +0000939
940 /* suppress this TB from the two jump lists */
941 tb_jmp_remove(tb, 0);
942 tb_jmp_remove(tb, 1);
943
944 /* suppress any remaining jumps to this TB */
945 tb1 = tb->jmp_first;
946 for(;;) {
947 n1 = (long)tb1 & 3;
948 if (n1 == 2)
949 break;
950 tb1 = (TranslationBlock *)((long)tb1 & ~3);
951 tb2 = tb1->jmp_next[n1];
952 tb_reset_jump(tb1, n1);
953 tb1->jmp_next[n1] = NULL;
954 tb1 = tb2;
955 }
956 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
957
bellarde3db7222005-01-26 22:00:47 +0000958 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000959}
960
961static inline void set_bits(uint8_t *tab, int start, int len)
962{
963 int end, mask, end1;
964
965 end = start + len;
966 tab += start >> 3;
967 mask = 0xff << (start & 7);
968 if ((start & ~7) == (end & ~7)) {
969 if (start < end) {
970 mask &= ~(0xff << (end & 7));
971 *tab |= mask;
972 }
973 } else {
974 *tab++ |= mask;
975 start = (start + 8) & ~7;
976 end1 = end & ~7;
977 while (start < end1) {
978 *tab++ = 0xff;
979 start += 8;
980 }
981 if (start < end) {
982 mask = ~(0xff << (end & 7));
983 *tab |= mask;
984 }
985 }
986}
987
988static void build_page_bitmap(PageDesc *p)
989{
990 int n, tb_start, tb_end;
991 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000992
Anthony Liguori7267c092011-08-20 22:09:37 -0500993 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000994
995 tb = p->first_tb;
996 while (tb != NULL) {
997 n = (long)tb & 3;
998 tb = (TranslationBlock *)((long)tb & ~3);
999 /* NOTE: this is subtle as a TB may span two physical pages */
1000 if (n == 0) {
1001 /* NOTE: tb_end may be after the end of the page, but
1002 it is not a problem */
1003 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1004 tb_end = tb_start + tb->size;
1005 if (tb_end > TARGET_PAGE_SIZE)
1006 tb_end = TARGET_PAGE_SIZE;
1007 } else {
1008 tb_start = 0;
1009 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1010 }
1011 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1012 tb = tb->page_next[n];
1013 }
1014}
1015
pbrook2e70f6e2008-06-29 01:03:05 +00001016TranslationBlock *tb_gen_code(CPUState *env,
1017 target_ulong pc, target_ulong cs_base,
1018 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001019{
1020 TranslationBlock *tb;
1021 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001022 tb_page_addr_t phys_pc, phys_page2;
1023 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001024 int code_gen_size;
1025
Paul Brook41c1b1c2010-03-12 16:54:58 +00001026 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001027 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001028 if (!tb) {
1029 /* flush must be done */
1030 tb_flush(env);
1031 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001032 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001033 /* Don't forget to invalidate previous TB info. */
1034 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001035 }
1036 tc_ptr = code_gen_ptr;
1037 tb->tc_ptr = tc_ptr;
1038 tb->cs_base = cs_base;
1039 tb->flags = flags;
1040 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001041 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001042 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001043
bellardd720b932004-04-25 17:57:43 +00001044 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001045 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001046 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001047 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001048 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001049 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001050 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001051 return tb;
bellardd720b932004-04-25 17:57:43 +00001052}
ths3b46e622007-09-17 08:09:54 +00001053
bellard9fa3e852004-01-04 18:06:42 +00001054/* invalidate all TBs which intersect with the target physical page
1055 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001056 the same physical page. 'is_cpu_write_access' should be true if called
1057 from a real cpu write access: the virtual CPU will exit the current
1058 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001059void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001060 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001061{
aliguori6b917542008-11-18 19:46:41 +00001062 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001063 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001064 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001065 PageDesc *p;
1066 int n;
1067#ifdef TARGET_HAS_PRECISE_SMC
1068 int current_tb_not_found = is_cpu_write_access;
1069 TranslationBlock *current_tb = NULL;
1070 int current_tb_modified = 0;
1071 target_ulong current_pc = 0;
1072 target_ulong current_cs_base = 0;
1073 int current_flags = 0;
1074#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001075
1076 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001077 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001078 return;
ths5fafdf22007-09-16 21:08:06 +00001079 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001080 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1081 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001082 /* build code bitmap */
1083 build_page_bitmap(p);
1084 }
1085
1086 /* we remove all the TBs in the range [start, end[ */
1087 /* XXX: see if in some cases it could be faster to invalidate all the code */
1088 tb = p->first_tb;
1089 while (tb != NULL) {
1090 n = (long)tb & 3;
1091 tb = (TranslationBlock *)((long)tb & ~3);
1092 tb_next = tb->page_next[n];
1093 /* NOTE: this is subtle as a TB may span two physical pages */
1094 if (n == 0) {
1095 /* NOTE: tb_end may be after the end of the page, but
1096 it is not a problem */
1097 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1098 tb_end = tb_start + tb->size;
1099 } else {
1100 tb_start = tb->page_addr[1];
1101 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1102 }
1103 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001104#ifdef TARGET_HAS_PRECISE_SMC
1105 if (current_tb_not_found) {
1106 current_tb_not_found = 0;
1107 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001108 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001109 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001110 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001111 }
1112 }
1113 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001114 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001115 /* If we are modifying the current TB, we must stop
1116 its execution. We could be more precise by checking
1117 that the modification is after the current PC, but it
1118 would require a specialized function to partially
1119 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001120
bellardd720b932004-04-25 17:57:43 +00001121 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001122 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001123 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1124 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001125 }
1126#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001127 /* we need to do that to handle the case where a signal
1128 occurs while doing tb_phys_invalidate() */
1129 saved_tb = NULL;
1130 if (env) {
1131 saved_tb = env->current_tb;
1132 env->current_tb = NULL;
1133 }
bellard9fa3e852004-01-04 18:06:42 +00001134 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001135 if (env) {
1136 env->current_tb = saved_tb;
1137 if (env->interrupt_request && env->current_tb)
1138 cpu_interrupt(env, env->interrupt_request);
1139 }
bellard9fa3e852004-01-04 18:06:42 +00001140 }
1141 tb = tb_next;
1142 }
1143#if !defined(CONFIG_USER_ONLY)
1144 /* if no code remaining, no need to continue to use slow writes */
1145 if (!p->first_tb) {
1146 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001147 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001148 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001149 }
1150 }
1151#endif
1152#ifdef TARGET_HAS_PRECISE_SMC
1153 if (current_tb_modified) {
1154 /* we generate a block containing just the instruction
1155 modifying the memory. It will ensure that it cannot modify
1156 itself */
bellardea1c1802004-06-14 18:56:36 +00001157 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001158 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001159 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001160 }
1161#endif
1162}
1163
1164/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001165static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001166{
1167 PageDesc *p;
1168 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001169#if 0
bellarda4193c82004-06-03 14:01:43 +00001170 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001171 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1172 cpu_single_env->mem_io_vaddr, len,
1173 cpu_single_env->eip,
1174 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001175 }
1176#endif
bellard9fa3e852004-01-04 18:06:42 +00001177 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001178 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001179 return;
1180 if (p->code_bitmap) {
1181 offset = start & ~TARGET_PAGE_MASK;
1182 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1183 if (b & ((1 << len) - 1))
1184 goto do_invalidate;
1185 } else {
1186 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001187 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001188 }
1189}
1190
bellard9fa3e852004-01-04 18:06:42 +00001191#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001192static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001193 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001194{
aliguori6b917542008-11-18 19:46:41 +00001195 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001196 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001197 int n;
bellardd720b932004-04-25 17:57:43 +00001198#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001199 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001200 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001201 int current_tb_modified = 0;
1202 target_ulong current_pc = 0;
1203 target_ulong current_cs_base = 0;
1204 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001205#endif
bellard9fa3e852004-01-04 18:06:42 +00001206
1207 addr &= TARGET_PAGE_MASK;
1208 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001209 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001210 return;
1211 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001212#ifdef TARGET_HAS_PRECISE_SMC
1213 if (tb && pc != 0) {
1214 current_tb = tb_find_pc(pc);
1215 }
1216#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001217 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001218 n = (long)tb & 3;
1219 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001220#ifdef TARGET_HAS_PRECISE_SMC
1221 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001222 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001223 /* If we are modifying the current TB, we must stop
1224 its execution. We could be more precise by checking
1225 that the modification is after the current PC, but it
1226 would require a specialized function to partially
1227 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001228
bellardd720b932004-04-25 17:57:43 +00001229 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001230 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001231 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1232 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001233 }
1234#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001235 tb_phys_invalidate(tb, addr);
1236 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001237 }
1238 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001239#ifdef TARGET_HAS_PRECISE_SMC
1240 if (current_tb_modified) {
1241 /* we generate a block containing just the instruction
1242 modifying the memory. It will ensure that it cannot modify
1243 itself */
bellardea1c1802004-06-14 18:56:36 +00001244 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001245 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001246 cpu_resume_from_signal(env, puc);
1247 }
1248#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001249}
bellard9fa3e852004-01-04 18:06:42 +00001250#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001251
1252/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001253static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001254 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001255{
1256 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001257#ifndef CONFIG_USER_ONLY
1258 bool page_already_protected;
1259#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001260
bellard9fa3e852004-01-04 18:06:42 +00001261 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001262 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001263 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001264#ifndef CONFIG_USER_ONLY
1265 page_already_protected = p->first_tb != NULL;
1266#endif
bellard9fa3e852004-01-04 18:06:42 +00001267 p->first_tb = (TranslationBlock *)((long)tb | n);
1268 invalidate_page_bitmap(p);
1269
bellard107db442004-06-22 18:48:46 +00001270#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001271
bellard9fa3e852004-01-04 18:06:42 +00001272#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001273 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001274 target_ulong addr;
1275 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001276 int prot;
1277
bellardfd6ce8f2003-05-14 19:00:11 +00001278 /* force the host page as non writable (writes will have a
1279 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001280 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001281 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001282 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1283 addr += TARGET_PAGE_SIZE) {
1284
1285 p2 = page_find (addr >> TARGET_PAGE_BITS);
1286 if (!p2)
1287 continue;
1288 prot |= p2->flags;
1289 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001290 }
ths5fafdf22007-09-16 21:08:06 +00001291 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001292 (prot & PAGE_BITS) & ~PAGE_WRITE);
1293#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001294 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001295 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001296#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001297 }
bellard9fa3e852004-01-04 18:06:42 +00001298#else
1299 /* if some code is already present, then the pages are already
1300 protected. So we handle the case where only the first TB is
1301 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001302 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001303 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001304 }
1305#endif
bellardd720b932004-04-25 17:57:43 +00001306
1307#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001308}
1309
bellard9fa3e852004-01-04 18:06:42 +00001310/* add a new TB and link it to the physical page tables. phys_page2 is
1311 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001312void tb_link_page(TranslationBlock *tb,
1313 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001314{
bellard9fa3e852004-01-04 18:06:42 +00001315 unsigned int h;
1316 TranslationBlock **ptb;
1317
pbrookc8a706f2008-06-02 16:16:42 +00001318 /* Grab the mmap lock to stop another thread invalidating this TB
1319 before we are done. */
1320 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001321 /* add in the physical hash table */
1322 h = tb_phys_hash_func(phys_pc);
1323 ptb = &tb_phys_hash[h];
1324 tb->phys_hash_next = *ptb;
1325 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001326
1327 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001328 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1329 if (phys_page2 != -1)
1330 tb_alloc_page(tb, 1, phys_page2);
1331 else
1332 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001333
bellardd4e81642003-05-25 16:46:15 +00001334 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1335 tb->jmp_next[0] = NULL;
1336 tb->jmp_next[1] = NULL;
1337
1338 /* init original jump addresses */
1339 if (tb->tb_next_offset[0] != 0xffff)
1340 tb_reset_jump(tb, 0);
1341 if (tb->tb_next_offset[1] != 0xffff)
1342 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001343
1344#ifdef DEBUG_TB_CHECK
1345 tb_page_check();
1346#endif
pbrookc8a706f2008-06-02 16:16:42 +00001347 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001348}
1349
bellarda513fe12003-05-27 23:29:48 +00001350/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1351 tb[1].tc_ptr. Return NULL if not found */
1352TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1353{
1354 int m_min, m_max, m;
1355 unsigned long v;
1356 TranslationBlock *tb;
1357
1358 if (nb_tbs <= 0)
1359 return NULL;
1360 if (tc_ptr < (unsigned long)code_gen_buffer ||
1361 tc_ptr >= (unsigned long)code_gen_ptr)
1362 return NULL;
1363 /* binary search (cf Knuth) */
1364 m_min = 0;
1365 m_max = nb_tbs - 1;
1366 while (m_min <= m_max) {
1367 m = (m_min + m_max) >> 1;
1368 tb = &tbs[m];
1369 v = (unsigned long)tb->tc_ptr;
1370 if (v == tc_ptr)
1371 return tb;
1372 else if (tc_ptr < v) {
1373 m_max = m - 1;
1374 } else {
1375 m_min = m + 1;
1376 }
ths5fafdf22007-09-16 21:08:06 +00001377 }
bellarda513fe12003-05-27 23:29:48 +00001378 return &tbs[m_max];
1379}
bellard75012672003-06-21 13:11:07 +00001380
bellardea041c02003-06-25 16:16:50 +00001381static void tb_reset_jump_recursive(TranslationBlock *tb);
1382
1383static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1384{
1385 TranslationBlock *tb1, *tb_next, **ptb;
1386 unsigned int n1;
1387
1388 tb1 = tb->jmp_next[n];
1389 if (tb1 != NULL) {
1390 /* find head of list */
1391 for(;;) {
1392 n1 = (long)tb1 & 3;
1393 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1394 if (n1 == 2)
1395 break;
1396 tb1 = tb1->jmp_next[n1];
1397 }
1398 /* we are now sure now that tb jumps to tb1 */
1399 tb_next = tb1;
1400
1401 /* remove tb from the jmp_first list */
1402 ptb = &tb_next->jmp_first;
1403 for(;;) {
1404 tb1 = *ptb;
1405 n1 = (long)tb1 & 3;
1406 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1407 if (n1 == n && tb1 == tb)
1408 break;
1409 ptb = &tb1->jmp_next[n1];
1410 }
1411 *ptb = tb->jmp_next[n];
1412 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001413
bellardea041c02003-06-25 16:16:50 +00001414 /* suppress the jump to next tb in generated code */
1415 tb_reset_jump(tb, n);
1416
bellard01243112004-01-04 15:48:17 +00001417 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001418 tb_reset_jump_recursive(tb_next);
1419 }
1420}
1421
1422static void tb_reset_jump_recursive(TranslationBlock *tb)
1423{
1424 tb_reset_jump_recursive2(tb, 0);
1425 tb_reset_jump_recursive2(tb, 1);
1426}
1427
bellard1fddef42005-04-17 19:16:13 +00001428#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001429#if defined(CONFIG_USER_ONLY)
1430static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1431{
1432 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1433}
1434#else
bellardd720b932004-04-25 17:57:43 +00001435static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1436{
Anthony Liguoric227f092009-10-01 16:12:16 -05001437 target_phys_addr_t addr;
Anthony Liguoric227f092009-10-01 16:12:16 -05001438 ram_addr_t ram_addr;
Avi Kivity06ef3522012-02-13 16:11:22 +02001439 MemoryRegionSection section;
bellardd720b932004-04-25 17:57:43 +00001440
pbrookc2f07f82006-04-08 17:14:56 +00001441 addr = cpu_get_phys_page_debug(env, pc);
Avi Kivity06ef3522012-02-13 16:11:22 +02001442 section = phys_page_find(addr >> TARGET_PAGE_BITS);
1443 if (!(memory_region_is_ram(section.mr)
1444 || (section.mr->rom_device && section.mr->readable))) {
1445 return;
1446 }
1447 ram_addr = (memory_region_get_ram_addr(section.mr)
1448 + section.offset_within_region) & TARGET_PAGE_MASK;
1449 ram_addr |= (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001450 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001451}
bellardc27004e2005-01-03 23:35:10 +00001452#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001453#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001454
Paul Brookc527ee82010-03-01 03:31:14 +00001455#if defined(CONFIG_USER_ONLY)
1456void cpu_watchpoint_remove_all(CPUState *env, int mask)
1457
1458{
1459}
1460
1461int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1462 int flags, CPUWatchpoint **watchpoint)
1463{
1464 return -ENOSYS;
1465}
1466#else
pbrook6658ffb2007-03-16 23:58:11 +00001467/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001468int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1469 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001470{
aliguorib4051332008-11-18 20:14:20 +00001471 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001472 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001473
aliguorib4051332008-11-18 20:14:20 +00001474 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1475 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1476 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1477 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1478 return -EINVAL;
1479 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001480 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001481
aliguoria1d1bb32008-11-18 20:07:32 +00001482 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001483 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001484 wp->flags = flags;
1485
aliguori2dc9f412008-11-18 20:56:59 +00001486 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001487 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001488 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001489 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001490 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001491
pbrook6658ffb2007-03-16 23:58:11 +00001492 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001493
1494 if (watchpoint)
1495 *watchpoint = wp;
1496 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001497}
1498
aliguoria1d1bb32008-11-18 20:07:32 +00001499/* Remove a specific watchpoint. */
1500int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1501 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001502{
aliguorib4051332008-11-18 20:14:20 +00001503 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001504 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001505
Blue Swirl72cf2d42009-09-12 07:36:22 +00001506 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001507 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001508 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001509 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001510 return 0;
1511 }
1512 }
aliguoria1d1bb32008-11-18 20:07:32 +00001513 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001514}
1515
aliguoria1d1bb32008-11-18 20:07:32 +00001516/* Remove a specific watchpoint by reference. */
1517void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1518{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001519 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001520
aliguoria1d1bb32008-11-18 20:07:32 +00001521 tlb_flush_page(env, watchpoint->vaddr);
1522
Anthony Liguori7267c092011-08-20 22:09:37 -05001523 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001524}
1525
aliguoria1d1bb32008-11-18 20:07:32 +00001526/* Remove all matching watchpoints. */
1527void cpu_watchpoint_remove_all(CPUState *env, int mask)
1528{
aliguoric0ce9982008-11-25 22:13:57 +00001529 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001530
Blue Swirl72cf2d42009-09-12 07:36:22 +00001531 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001532 if (wp->flags & mask)
1533 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001534 }
aliguoria1d1bb32008-11-18 20:07:32 +00001535}
Paul Brookc527ee82010-03-01 03:31:14 +00001536#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001537
1538/* Add a breakpoint. */
1539int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1540 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001541{
bellard1fddef42005-04-17 19:16:13 +00001542#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001543 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001544
Anthony Liguori7267c092011-08-20 22:09:37 -05001545 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001546
1547 bp->pc = pc;
1548 bp->flags = flags;
1549
aliguori2dc9f412008-11-18 20:56:59 +00001550 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001551 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001552 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001553 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001554 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001555
1556 breakpoint_invalidate(env, pc);
1557
1558 if (breakpoint)
1559 *breakpoint = bp;
1560 return 0;
1561#else
1562 return -ENOSYS;
1563#endif
1564}
1565
1566/* Remove a specific breakpoint. */
1567int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1568{
1569#if defined(TARGET_HAS_ICE)
1570 CPUBreakpoint *bp;
1571
Blue Swirl72cf2d42009-09-12 07:36:22 +00001572 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001573 if (bp->pc == pc && bp->flags == flags) {
1574 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001575 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001576 }
bellard4c3a88a2003-07-26 12:06:08 +00001577 }
aliguoria1d1bb32008-11-18 20:07:32 +00001578 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001579#else
aliguoria1d1bb32008-11-18 20:07:32 +00001580 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001581#endif
1582}
1583
aliguoria1d1bb32008-11-18 20:07:32 +00001584/* Remove a specific breakpoint by reference. */
1585void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001586{
bellard1fddef42005-04-17 19:16:13 +00001587#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001588 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001589
aliguoria1d1bb32008-11-18 20:07:32 +00001590 breakpoint_invalidate(env, breakpoint->pc);
1591
Anthony Liguori7267c092011-08-20 22:09:37 -05001592 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001593#endif
1594}
1595
1596/* Remove all matching breakpoints. */
1597void cpu_breakpoint_remove_all(CPUState *env, int mask)
1598{
1599#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001600 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001601
Blue Swirl72cf2d42009-09-12 07:36:22 +00001602 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001603 if (bp->flags & mask)
1604 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001605 }
bellard4c3a88a2003-07-26 12:06:08 +00001606#endif
1607}
1608
bellardc33a3462003-07-29 20:50:33 +00001609/* enable or disable single step mode. EXCP_DEBUG is returned by the
1610 CPU loop after each instruction */
1611void cpu_single_step(CPUState *env, int enabled)
1612{
bellard1fddef42005-04-17 19:16:13 +00001613#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001614 if (env->singlestep_enabled != enabled) {
1615 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001616 if (kvm_enabled())
1617 kvm_update_guest_debug(env, 0);
1618 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001619 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001620 /* XXX: only flush what is necessary */
1621 tb_flush(env);
1622 }
bellardc33a3462003-07-29 20:50:33 +00001623 }
1624#endif
1625}
1626
bellard34865132003-10-05 14:28:56 +00001627/* enable or disable low levels log */
1628void cpu_set_log(int log_flags)
1629{
1630 loglevel = log_flags;
1631 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001632 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001633 if (!logfile) {
1634 perror(logfilename);
1635 _exit(1);
1636 }
bellard9fa3e852004-01-04 18:06:42 +00001637#if !defined(CONFIG_SOFTMMU)
1638 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1639 {
blueswir1b55266b2008-09-20 08:07:15 +00001640 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001641 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1642 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001643#elif defined(_WIN32)
1644 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1645 setvbuf(logfile, NULL, _IONBF, 0);
1646#else
bellard34865132003-10-05 14:28:56 +00001647 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001648#endif
pbrooke735b912007-06-30 13:53:24 +00001649 log_append = 1;
1650 }
1651 if (!loglevel && logfile) {
1652 fclose(logfile);
1653 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001654 }
1655}
1656
1657void cpu_set_log_filename(const char *filename)
1658{
1659 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001660 if (logfile) {
1661 fclose(logfile);
1662 logfile = NULL;
1663 }
1664 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001665}
bellardc33a3462003-07-29 20:50:33 +00001666
aurel323098dba2009-03-07 21:28:24 +00001667static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001668{
pbrookd5975362008-06-07 20:50:51 +00001669 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1670 problem and hope the cpu will stop of its own accord. For userspace
1671 emulation this often isn't actually as bad as it sounds. Often
1672 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001673 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001674 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001675
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001676 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001677 tb = env->current_tb;
1678 /* if the cpu is currently executing code, we must unlink it and
1679 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001680 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001681 env->current_tb = NULL;
1682 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001683 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001684 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001685}
1686
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001687#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001688/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001689static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001690{
1691 int old_mask;
1692
1693 old_mask = env->interrupt_request;
1694 env->interrupt_request |= mask;
1695
aliguori8edac962009-04-24 18:03:45 +00001696 /*
1697 * If called from iothread context, wake the target cpu in
1698 * case its halted.
1699 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001700 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001701 qemu_cpu_kick(env);
1702 return;
1703 }
aliguori8edac962009-04-24 18:03:45 +00001704
pbrook2e70f6e2008-06-29 01:03:05 +00001705 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001706 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001707 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001708 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001709 cpu_abort(env, "Raised interrupt while not in I/O function");
1710 }
pbrook2e70f6e2008-06-29 01:03:05 +00001711 } else {
aurel323098dba2009-03-07 21:28:24 +00001712 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001713 }
1714}
1715
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001716CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1717
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001718#else /* CONFIG_USER_ONLY */
1719
1720void cpu_interrupt(CPUState *env, int mask)
1721{
1722 env->interrupt_request |= mask;
1723 cpu_unlink_tb(env);
1724}
1725#endif /* CONFIG_USER_ONLY */
1726
bellardb54ad042004-05-20 13:42:52 +00001727void cpu_reset_interrupt(CPUState *env, int mask)
1728{
1729 env->interrupt_request &= ~mask;
1730}
1731
aurel323098dba2009-03-07 21:28:24 +00001732void cpu_exit(CPUState *env)
1733{
1734 env->exit_request = 1;
1735 cpu_unlink_tb(env);
1736}
1737
blueswir1c7cd6a32008-10-02 18:27:46 +00001738const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001739 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001740 "show generated host assembly code for each compiled TB" },
1741 { CPU_LOG_TB_IN_ASM, "in_asm",
1742 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001743 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001744 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001745 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001746 "show micro ops "
1747#ifdef TARGET_I386
1748 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001749#endif
blueswir1e01a1152008-03-14 17:37:11 +00001750 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001751 { CPU_LOG_INT, "int",
1752 "show interrupts/exceptions in short format" },
1753 { CPU_LOG_EXEC, "exec",
1754 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001755 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001756 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001757#ifdef TARGET_I386
1758 { CPU_LOG_PCALL, "pcall",
1759 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001760 { CPU_LOG_RESET, "cpu_reset",
1761 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001762#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001763#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001764 { CPU_LOG_IOPORT, "ioport",
1765 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001766#endif
bellardf193c792004-03-21 17:06:25 +00001767 { 0, NULL, NULL },
1768};
1769
1770static int cmp1(const char *s1, int n, const char *s2)
1771{
1772 if (strlen(s2) != n)
1773 return 0;
1774 return memcmp(s1, s2, n) == 0;
1775}
ths3b46e622007-09-17 08:09:54 +00001776
bellardf193c792004-03-21 17:06:25 +00001777/* takes a comma separated list of log masks. Return 0 if error. */
1778int cpu_str_to_log_mask(const char *str)
1779{
blueswir1c7cd6a32008-10-02 18:27:46 +00001780 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001781 int mask;
1782 const char *p, *p1;
1783
1784 p = str;
1785 mask = 0;
1786 for(;;) {
1787 p1 = strchr(p, ',');
1788 if (!p1)
1789 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001790 if(cmp1(p,p1-p,"all")) {
1791 for(item = cpu_log_items; item->mask != 0; item++) {
1792 mask |= item->mask;
1793 }
1794 } else {
1795 for(item = cpu_log_items; item->mask != 0; item++) {
1796 if (cmp1(p, p1 - p, item->name))
1797 goto found;
1798 }
1799 return 0;
bellardf193c792004-03-21 17:06:25 +00001800 }
bellardf193c792004-03-21 17:06:25 +00001801 found:
1802 mask |= item->mask;
1803 if (*p1 != ',')
1804 break;
1805 p = p1 + 1;
1806 }
1807 return mask;
1808}
bellardea041c02003-06-25 16:16:50 +00001809
bellard75012672003-06-21 13:11:07 +00001810void cpu_abort(CPUState *env, const char *fmt, ...)
1811{
1812 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001813 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001814
1815 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001816 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001817 fprintf(stderr, "qemu: fatal: ");
1818 vfprintf(stderr, fmt, ap);
1819 fprintf(stderr, "\n");
1820#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001821 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1822#else
1823 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001824#endif
aliguori93fcfe32009-01-15 22:34:14 +00001825 if (qemu_log_enabled()) {
1826 qemu_log("qemu: fatal: ");
1827 qemu_log_vprintf(fmt, ap2);
1828 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001829#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001830 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001831#else
aliguori93fcfe32009-01-15 22:34:14 +00001832 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001833#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001834 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001835 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001836 }
pbrook493ae1f2007-11-23 16:53:59 +00001837 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001838 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001839#if defined(CONFIG_USER_ONLY)
1840 {
1841 struct sigaction act;
1842 sigfillset(&act.sa_mask);
1843 act.sa_handler = SIG_DFL;
1844 sigaction(SIGABRT, &act, NULL);
1845 }
1846#endif
bellard75012672003-06-21 13:11:07 +00001847 abort();
1848}
1849
thsc5be9f02007-02-28 20:20:53 +00001850CPUState *cpu_copy(CPUState *env)
1851{
ths01ba9812007-12-09 02:22:57 +00001852 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001853 CPUState *next_cpu = new_env->next_cpu;
1854 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001855#if defined(TARGET_HAS_ICE)
1856 CPUBreakpoint *bp;
1857 CPUWatchpoint *wp;
1858#endif
1859
thsc5be9f02007-02-28 20:20:53 +00001860 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001861
1862 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001863 new_env->next_cpu = next_cpu;
1864 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001865
1866 /* Clone all break/watchpoints.
1867 Note: Once we support ptrace with hw-debug register access, make sure
1868 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001869 QTAILQ_INIT(&env->breakpoints);
1870 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001871#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001872 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001873 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1874 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001875 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001876 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1877 wp->flags, NULL);
1878 }
1879#endif
1880
thsc5be9f02007-02-28 20:20:53 +00001881 return new_env;
1882}
1883
bellard01243112004-01-04 15:48:17 +00001884#if !defined(CONFIG_USER_ONLY)
1885
edgar_igl5c751e92008-05-06 08:44:21 +00001886static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1887{
1888 unsigned int i;
1889
1890 /* Discard jump cache entries for any tb which might potentially
1891 overlap the flushed page. */
1892 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1893 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001894 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001895
1896 i = tb_jmp_cache_hash_page(addr);
1897 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001898 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001899}
1900
Igor Kovalenko08738982009-07-12 02:15:40 +04001901static CPUTLBEntry s_cputlb_empty_entry = {
1902 .addr_read = -1,
1903 .addr_write = -1,
1904 .addr_code = -1,
1905 .addend = -1,
1906};
1907
Peter Maydell771124e2012-01-17 13:23:13 +00001908/* NOTE:
1909 * If flush_global is true (the usual case), flush all tlb entries.
1910 * If flush_global is false, flush (at least) all tlb entries not
1911 * marked global.
1912 *
1913 * Since QEMU doesn't currently implement a global/not-global flag
1914 * for tlb entries, at the moment tlb_flush() will also flush all
1915 * tlb entries in the flush_global == false case. This is OK because
1916 * CPU architectures generally permit an implementation to drop
1917 * entries from the TLB at any time, so flushing more entries than
1918 * required is only an efficiency issue, not a correctness issue.
1919 */
bellardee8b7022004-02-03 23:35:10 +00001920void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001921{
bellard33417e72003-08-10 21:47:01 +00001922 int i;
bellard01243112004-01-04 15:48:17 +00001923
bellard9fa3e852004-01-04 18:06:42 +00001924#if defined(DEBUG_TLB)
1925 printf("tlb_flush:\n");
1926#endif
bellard01243112004-01-04 15:48:17 +00001927 /* must reset current TB so that interrupts cannot modify the
1928 links while we are modifying them */
1929 env->current_tb = NULL;
1930
bellard33417e72003-08-10 21:47:01 +00001931 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001932 int mmu_idx;
1933 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001934 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001935 }
bellard33417e72003-08-10 21:47:01 +00001936 }
bellard9fa3e852004-01-04 18:06:42 +00001937
bellard8a40a182005-11-20 10:35:40 +00001938 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001939
Paul Brookd4c430a2010-03-17 02:14:28 +00001940 env->tlb_flush_addr = -1;
1941 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001942 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001943}
1944
bellard274da6b2004-05-20 21:56:27 +00001945static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001946{
ths5fafdf22007-09-16 21:08:06 +00001947 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001948 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001949 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001950 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001951 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001952 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001953 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001954 }
bellard61382a52003-10-27 21:22:23 +00001955}
1956
bellard2e126692004-04-25 21:28:44 +00001957void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001958{
bellard8a40a182005-11-20 10:35:40 +00001959 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001960 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001961
bellard9fa3e852004-01-04 18:06:42 +00001962#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001963 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001964#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001965 /* Check if we need to flush due to large pages. */
1966 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1967#if defined(DEBUG_TLB)
1968 printf("tlb_flush_page: forced full flush ("
1969 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1970 env->tlb_flush_addr, env->tlb_flush_mask);
1971#endif
1972 tlb_flush(env, 1);
1973 return;
1974 }
bellard01243112004-01-04 15:48:17 +00001975 /* must reset current TB so that interrupts cannot modify the
1976 links while we are modifying them */
1977 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001978
bellard61382a52003-10-27 21:22:23 +00001979 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001980 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001981 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1982 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001983
edgar_igl5c751e92008-05-06 08:44:21 +00001984 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001985}
1986
bellard9fa3e852004-01-04 18:06:42 +00001987/* update the TLBs so that writes to code in the virtual page 'addr'
1988 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001989static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001990{
ths5fafdf22007-09-16 21:08:06 +00001991 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001992 ram_addr + TARGET_PAGE_SIZE,
1993 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001994}
1995
bellard9fa3e852004-01-04 18:06:42 +00001996/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001997 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001998static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001999 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002000{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002001 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002002}
2003
ths5fafdf22007-09-16 21:08:06 +00002004static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002005 unsigned long start, unsigned long length)
2006{
2007 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002008 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00002009 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002010 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002011 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002012 }
2013 }
2014}
2015
pbrook5579c7f2009-04-11 14:47:08 +00002016/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002017void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002018 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002019{
2020 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002021 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002022 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002023
2024 start &= TARGET_PAGE_MASK;
2025 end = TARGET_PAGE_ALIGN(end);
2026
2027 length = end - start;
2028 if (length == 0)
2029 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002030 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002031
bellard1ccde1c2004-02-06 19:46:14 +00002032 /* we modify the TLB cache so that the dirty bit will be set again
2033 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002034 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002035 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002036 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002037 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002038 != (end - 1) - start) {
2039 abort();
2040 }
2041
bellard6a00d602005-11-21 23:25:50 +00002042 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002043 int mmu_idx;
2044 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2045 for(i = 0; i < CPU_TLB_SIZE; i++)
2046 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2047 start1, length);
2048 }
bellard6a00d602005-11-21 23:25:50 +00002049 }
bellard1ccde1c2004-02-06 19:46:14 +00002050}
2051
aliguori74576192008-10-06 14:02:03 +00002052int cpu_physical_memory_set_dirty_tracking(int enable)
2053{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002054 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002055 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002056 return ret;
aliguori74576192008-10-06 14:02:03 +00002057}
2058
bellard3a7d9292005-08-21 09:26:42 +00002059static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2060{
Anthony Liguoric227f092009-10-01 16:12:16 -05002061 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002062 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002063
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002064 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002065 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2066 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002067 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002068 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002069 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002070 }
2071 }
2072}
2073
2074/* update the TLB according to the current state of the dirty bits */
2075void cpu_tlb_update_dirty(CPUState *env)
2076{
2077 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002078 int mmu_idx;
2079 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2080 for(i = 0; i < CPU_TLB_SIZE; i++)
2081 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2082 }
bellard3a7d9292005-08-21 09:26:42 +00002083}
2084
pbrook0f459d12008-06-09 00:20:13 +00002085static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002086{
pbrook0f459d12008-06-09 00:20:13 +00002087 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2088 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002089}
2090
pbrook0f459d12008-06-09 00:20:13 +00002091/* update the TLB corresponding to virtual page vaddr
2092 so that it is no longer dirty */
2093static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002094{
bellard1ccde1c2004-02-06 19:46:14 +00002095 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002096 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002097
pbrook0f459d12008-06-09 00:20:13 +00002098 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002099 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002100 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2101 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002102}
2103
Paul Brookd4c430a2010-03-17 02:14:28 +00002104/* Our TLB does not support large pages, so remember the area covered by
2105 large pages and trigger a full TLB flush if these are invalidated. */
2106static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2107 target_ulong size)
2108{
2109 target_ulong mask = ~(size - 1);
2110
2111 if (env->tlb_flush_addr == (target_ulong)-1) {
2112 env->tlb_flush_addr = vaddr & mask;
2113 env->tlb_flush_mask = mask;
2114 return;
2115 }
2116 /* Extend the existing region to include the new page.
2117 This is a compromise between unnecessary flushes and the cost
2118 of maintaining a full variable size TLB. */
2119 mask &= env->tlb_flush_mask;
2120 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2121 mask <<= 1;
2122 }
2123 env->tlb_flush_addr &= mask;
2124 env->tlb_flush_mask = mask;
2125}
2126
Avi Kivity06ef3522012-02-13 16:11:22 +02002127static bool is_ram_rom(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002128{
Avi Kivity06ef3522012-02-13 16:11:22 +02002129 return memory_region_is_ram(s->mr);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002130}
2131
Avi Kivity06ef3522012-02-13 16:11:22 +02002132static bool is_romd(MemoryRegionSection *s)
Avi Kivity75c578d2012-01-02 15:40:52 +02002133{
Avi Kivity06ef3522012-02-13 16:11:22 +02002134 MemoryRegion *mr = s->mr;
Avi Kivity75c578d2012-01-02 15:40:52 +02002135
Avi Kivity75c578d2012-01-02 15:40:52 +02002136 return mr->rom_device && mr->readable;
2137}
2138
Avi Kivity06ef3522012-02-13 16:11:22 +02002139static bool is_ram_rom_romd(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002140{
Avi Kivity06ef3522012-02-13 16:11:22 +02002141 return is_ram_rom(s) || is_romd(s);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002142}
2143
Paul Brookd4c430a2010-03-17 02:14:28 +00002144/* Add a new TLB entry. At most one entry for a given virtual address
2145 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2146 supplied size is only used by tlb_flush_page. */
2147void tlb_set_page(CPUState *env, target_ulong vaddr,
2148 target_phys_addr_t paddr, int prot,
2149 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002150{
Avi Kivity06ef3522012-02-13 16:11:22 +02002151 MemoryRegionSection section;
bellard9fa3e852004-01-04 18:06:42 +00002152 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002153 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002154 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002155 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002156 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002157 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002158 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002159
Paul Brookd4c430a2010-03-17 02:14:28 +00002160 assert(size >= TARGET_PAGE_SIZE);
2161 if (size != TARGET_PAGE_SIZE) {
2162 tlb_add_large_page(env, vaddr, size);
2163 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002164 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002165#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002166 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2167 " prot=%x idx=%d pd=0x%08lx\n",
2168 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002169#endif
2170
pbrook0f459d12008-06-09 00:20:13 +00002171 address = vaddr;
Avi Kivity06ef3522012-02-13 16:11:22 +02002172 if (!is_ram_rom_romd(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002173 /* IO memory case (romd handled later) */
2174 address |= TLB_MMIO;
2175 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002176 if (is_ram_rom_romd(&section)) {
2177 addend = (unsigned long)(memory_region_get_ram_ptr(section.mr)
2178 + section.offset_within_region);
2179 } else {
2180 addend = 0;
2181 }
2182 if (is_ram_rom(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002183 /* Normal RAM. */
Avi Kivity06ef3522012-02-13 16:11:22 +02002184 iotlb = (memory_region_get_ram_addr(section.mr)
2185 + section.offset_within_region) & TARGET_PAGE_MASK;
2186 if (!section.readonly)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002187 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002188 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002189 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002190 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002191 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002192 It would be nice to pass an offset from the base address
2193 of that region. This would avoid having to special case RAM,
2194 and avoid full address decoding in every device.
2195 We can't use the high bits of pd for this because
2196 IO_MEM_ROMD uses these as a ram address. */
Avi Kivity06ef3522012-02-13 16:11:22 +02002197 iotlb = memory_region_get_ram_addr(section.mr) & ~TARGET_PAGE_MASK;
2198 iotlb += section.offset_within_region;
pbrook0f459d12008-06-09 00:20:13 +00002199 }
pbrook6658ffb2007-03-16 23:58:11 +00002200
pbrook0f459d12008-06-09 00:20:13 +00002201 code_address = address;
2202 /* Make accesses to pages with watchpoints go via the
2203 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002204 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002205 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002206 /* Avoid trapping reads of pages with a write breakpoint. */
2207 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivity1ec9b902012-01-02 12:47:48 +02002208 iotlb = io_mem_watch.ram_addr + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002209 address |= TLB_MMIO;
2210 break;
2211 }
pbrook6658ffb2007-03-16 23:58:11 +00002212 }
pbrook0f459d12008-06-09 00:20:13 +00002213 }
balrogd79acba2007-06-26 20:01:13 +00002214
pbrook0f459d12008-06-09 00:20:13 +00002215 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2216 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2217 te = &env->tlb_table[mmu_idx][index];
2218 te->addend = addend - vaddr;
2219 if (prot & PAGE_READ) {
2220 te->addr_read = address;
2221 } else {
2222 te->addr_read = -1;
2223 }
edgar_igl5c751e92008-05-06 08:44:21 +00002224
pbrook0f459d12008-06-09 00:20:13 +00002225 if (prot & PAGE_EXEC) {
2226 te->addr_code = code_address;
2227 } else {
2228 te->addr_code = -1;
2229 }
2230 if (prot & PAGE_WRITE) {
Avi Kivity06ef3522012-02-13 16:11:22 +02002231 if ((memory_region_is_ram(section.mr) && section.readonly)
2232 || is_romd(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002233 /* Write access calls the I/O callback. */
2234 te->addr_write = address | TLB_MMIO;
Avi Kivity06ef3522012-02-13 16:11:22 +02002235 } else if (memory_region_is_ram(section.mr)
2236 && !cpu_physical_memory_is_dirty(
2237 section.mr->ram_addr
2238 + section.offset_within_region)) {
pbrook0f459d12008-06-09 00:20:13 +00002239 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002240 } else {
pbrook0f459d12008-06-09 00:20:13 +00002241 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002242 }
pbrook0f459d12008-06-09 00:20:13 +00002243 } else {
2244 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002245 }
bellard9fa3e852004-01-04 18:06:42 +00002246}
2247
bellard01243112004-01-04 15:48:17 +00002248#else
2249
bellardee8b7022004-02-03 23:35:10 +00002250void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002251{
2252}
2253
bellard2e126692004-04-25 21:28:44 +00002254void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002255{
2256}
2257
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002258/*
2259 * Walks guest process memory "regions" one by one
2260 * and calls callback function 'fn' for each region.
2261 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002262
2263struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002264{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002265 walk_memory_regions_fn fn;
2266 void *priv;
2267 unsigned long start;
2268 int prot;
2269};
bellard9fa3e852004-01-04 18:06:42 +00002270
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002271static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002272 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002273{
2274 if (data->start != -1ul) {
2275 int rc = data->fn(data->priv, data->start, end, data->prot);
2276 if (rc != 0) {
2277 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002278 }
bellard33417e72003-08-10 21:47:01 +00002279 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002280
2281 data->start = (new_prot ? end : -1ul);
2282 data->prot = new_prot;
2283
2284 return 0;
2285}
2286
2287static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002288 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002289{
Paul Brookb480d9b2010-03-12 23:23:29 +00002290 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002291 int i, rc;
2292
2293 if (*lp == NULL) {
2294 return walk_memory_regions_end(data, base, 0);
2295 }
2296
2297 if (level == 0) {
2298 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002299 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002300 int prot = pd[i].flags;
2301
2302 pa = base | (i << TARGET_PAGE_BITS);
2303 if (prot != data->prot) {
2304 rc = walk_memory_regions_end(data, pa, prot);
2305 if (rc != 0) {
2306 return rc;
2307 }
2308 }
2309 }
2310 } else {
2311 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002312 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002313 pa = base | ((abi_ulong)i <<
2314 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002315 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2316 if (rc != 0) {
2317 return rc;
2318 }
2319 }
2320 }
2321
2322 return 0;
2323}
2324
2325int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2326{
2327 struct walk_memory_regions_data data;
2328 unsigned long i;
2329
2330 data.fn = fn;
2331 data.priv = priv;
2332 data.start = -1ul;
2333 data.prot = 0;
2334
2335 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002336 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002337 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2338 if (rc != 0) {
2339 return rc;
2340 }
2341 }
2342
2343 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002344}
2345
Paul Brookb480d9b2010-03-12 23:23:29 +00002346static int dump_region(void *priv, abi_ulong start,
2347 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002348{
2349 FILE *f = (FILE *)priv;
2350
Paul Brookb480d9b2010-03-12 23:23:29 +00002351 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2352 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002353 start, end, end - start,
2354 ((prot & PAGE_READ) ? 'r' : '-'),
2355 ((prot & PAGE_WRITE) ? 'w' : '-'),
2356 ((prot & PAGE_EXEC) ? 'x' : '-'));
2357
2358 return (0);
2359}
2360
2361/* dump memory mappings */
2362void page_dump(FILE *f)
2363{
2364 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2365 "start", "end", "size", "prot");
2366 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002367}
2368
pbrook53a59602006-03-25 19:31:22 +00002369int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002370{
bellard9fa3e852004-01-04 18:06:42 +00002371 PageDesc *p;
2372
2373 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002374 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002375 return 0;
2376 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002377}
2378
Richard Henderson376a7902010-03-10 15:57:04 -08002379/* Modify the flags of a page and invalidate the code if necessary.
2380 The flag PAGE_WRITE_ORG is positioned automatically depending
2381 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002382void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002383{
Richard Henderson376a7902010-03-10 15:57:04 -08002384 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002385
Richard Henderson376a7902010-03-10 15:57:04 -08002386 /* This function should never be called with addresses outside the
2387 guest address space. If this assert fires, it probably indicates
2388 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002389#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2390 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002391#endif
2392 assert(start < end);
2393
bellard9fa3e852004-01-04 18:06:42 +00002394 start = start & TARGET_PAGE_MASK;
2395 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002396
2397 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002398 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002399 }
2400
2401 for (addr = start, len = end - start;
2402 len != 0;
2403 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2404 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2405
2406 /* If the write protection bit is set, then we invalidate
2407 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002408 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002409 (flags & PAGE_WRITE) &&
2410 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002411 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002412 }
2413 p->flags = flags;
2414 }
bellard9fa3e852004-01-04 18:06:42 +00002415}
2416
ths3d97b402007-11-02 19:02:07 +00002417int page_check_range(target_ulong start, target_ulong len, int flags)
2418{
2419 PageDesc *p;
2420 target_ulong end;
2421 target_ulong addr;
2422
Richard Henderson376a7902010-03-10 15:57:04 -08002423 /* This function should never be called with addresses outside the
2424 guest address space. If this assert fires, it probably indicates
2425 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002426#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2427 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002428#endif
2429
Richard Henderson3e0650a2010-03-29 10:54:42 -07002430 if (len == 0) {
2431 return 0;
2432 }
Richard Henderson376a7902010-03-10 15:57:04 -08002433 if (start + len - 1 < start) {
2434 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002435 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002436 }
balrog55f280c2008-10-28 10:24:11 +00002437
ths3d97b402007-11-02 19:02:07 +00002438 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2439 start = start & TARGET_PAGE_MASK;
2440
Richard Henderson376a7902010-03-10 15:57:04 -08002441 for (addr = start, len = end - start;
2442 len != 0;
2443 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002444 p = page_find(addr >> TARGET_PAGE_BITS);
2445 if( !p )
2446 return -1;
2447 if( !(p->flags & PAGE_VALID) )
2448 return -1;
2449
bellarddae32702007-11-14 10:51:00 +00002450 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002451 return -1;
bellarddae32702007-11-14 10:51:00 +00002452 if (flags & PAGE_WRITE) {
2453 if (!(p->flags & PAGE_WRITE_ORG))
2454 return -1;
2455 /* unprotect the page if it was put read-only because it
2456 contains translated code */
2457 if (!(p->flags & PAGE_WRITE)) {
2458 if (!page_unprotect(addr, 0, NULL))
2459 return -1;
2460 }
2461 return 0;
2462 }
ths3d97b402007-11-02 19:02:07 +00002463 }
2464 return 0;
2465}
2466
bellard9fa3e852004-01-04 18:06:42 +00002467/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002468 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002469int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002470{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002471 unsigned int prot;
2472 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002473 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002474
pbrookc8a706f2008-06-02 16:16:42 +00002475 /* Technically this isn't safe inside a signal handler. However we
2476 know this only ever happens in a synchronous SEGV handler, so in
2477 practice it seems to be ok. */
2478 mmap_lock();
2479
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002480 p = page_find(address >> TARGET_PAGE_BITS);
2481 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002482 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002483 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002484 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002485
bellard9fa3e852004-01-04 18:06:42 +00002486 /* if the page was really writable, then we change its
2487 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002488 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2489 host_start = address & qemu_host_page_mask;
2490 host_end = host_start + qemu_host_page_size;
2491
2492 prot = 0;
2493 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2494 p = page_find(addr >> TARGET_PAGE_BITS);
2495 p->flags |= PAGE_WRITE;
2496 prot |= p->flags;
2497
bellard9fa3e852004-01-04 18:06:42 +00002498 /* and since the content will be modified, we must invalidate
2499 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002500 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002501#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002502 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002503#endif
bellard9fa3e852004-01-04 18:06:42 +00002504 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002505 mprotect((void *)g2h(host_start), qemu_host_page_size,
2506 prot & PAGE_BITS);
2507
2508 mmap_unlock();
2509 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002510 }
pbrookc8a706f2008-06-02 16:16:42 +00002511 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002512 return 0;
2513}
2514
bellard6a00d602005-11-21 23:25:50 +00002515static inline void tlb_set_dirty(CPUState *env,
2516 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002517{
2518}
bellard9fa3e852004-01-04 18:06:42 +00002519#endif /* defined(CONFIG_USER_ONLY) */
2520
pbrooke2eef172008-06-08 01:09:01 +00002521#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002522
Paul Brookc04b2b72010-03-01 03:31:14 +00002523#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2524typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002525 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002526 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002527 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002528} subpage_t;
2529
Anthony Liguoric227f092009-10-01 16:12:16 -05002530static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002531 uint16_t section);
2532static subpage_t *subpage_init (target_phys_addr_t base, uint16_t *section,
2533 uint16_t orig_section);
blueswir1db7b5422007-05-26 17:36:03 +00002534#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2535 need_subpage) \
2536 do { \
2537 if (addr > start_addr) \
2538 start_addr2 = 0; \
2539 else { \
2540 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2541 if (start_addr2 > 0) \
2542 need_subpage = 1; \
2543 } \
2544 \
blueswir149e9fba2007-05-30 17:25:06 +00002545 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002546 end_addr2 = TARGET_PAGE_SIZE - 1; \
2547 else { \
2548 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2549 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2550 need_subpage = 1; \
2551 } \
2552 } while (0)
2553
Avi Kivity5312bd82012-02-12 18:32:55 +02002554static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002555{
Avi Kivity5312bd82012-02-12 18:32:55 +02002556 MemoryRegionSection *section = &phys_sections[section_index];
2557 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002558
2559 if (mr->subpage) {
2560 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2561 memory_region_destroy(&subpage->iomem);
2562 g_free(subpage);
2563 }
2564}
2565
Avi Kivity4346ae32012-02-10 17:00:01 +02002566static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002567{
2568 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002569 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002570
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002571 if (lp->u.node == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002572 return;
2573 }
2574
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002575 p = phys_map_nodes[lp->u.node];
Avi Kivity4346ae32012-02-10 17:00:01 +02002576 for (i = 0; i < L2_SIZE; ++i) {
2577 if (level > 0) {
Avi Kivity54688b12012-02-09 17:34:32 +02002578 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002579 } else {
2580 destroy_page_desc(p[i].u.leaf);
Avi Kivity54688b12012-02-09 17:34:32 +02002581 }
Avi Kivity54688b12012-02-09 17:34:32 +02002582 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002583 lp->u.node = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002584}
2585
2586static void destroy_all_mappings(void)
2587{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002588 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002589 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002590}
2591
Avi Kivity5312bd82012-02-12 18:32:55 +02002592static uint16_t phys_section_add(MemoryRegionSection *section)
2593{
2594 if (phys_sections_nb == phys_sections_nb_alloc) {
2595 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2596 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2597 phys_sections_nb_alloc);
2598 }
2599 phys_sections[phys_sections_nb] = *section;
2600 return phys_sections_nb++;
2601}
2602
2603static void phys_sections_clear(void)
2604{
2605 phys_sections_nb = 0;
2606}
2607
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002608/* register physical memory.
2609 For RAM, 'size' must be a multiple of the target page size.
2610 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002611 io memory page. The address used when calling the IO function is
2612 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002613 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002614 before calculating this offset. This should not be a problem unless
2615 the low bits of start_addr and region_offset differ. */
Avi Kivitydd811242012-01-02 12:17:03 +02002616void cpu_register_physical_memory_log(MemoryRegionSection *section,
Avi Kivityd7ec83e2012-02-08 17:07:26 +02002617 bool readonly)
bellard33417e72003-08-10 21:47:01 +00002618{
Avi Kivitydd811242012-01-02 12:17:03 +02002619 target_phys_addr_t start_addr = section->offset_within_address_space;
2620 ram_addr_t size = section->size;
Anthony Liguoric227f092009-10-01 16:12:16 -05002621 target_phys_addr_t addr, end_addr;
Anthony Liguoric227f092009-10-01 16:12:16 -05002622 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002623 subpage_t *subpage;
Avi Kivity5312bd82012-02-12 18:32:55 +02002624 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002625
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002626 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002627
bellard5fd386f2004-05-23 21:11:22 +00002628 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002629 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002630
2631 addr = start_addr;
2632 do {
Avi Kivity717cb7b2012-02-12 21:21:21 +02002633 uint16_t *p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2634 uint16_t orig_memory = *p;
2635 target_phys_addr_t start_addr2, end_addr2;
2636 int need_subpage = 0;
2637 MemoryRegion *mr = phys_sections[orig_memory].mr;
blueswir1db7b5422007-05-26 17:36:03 +00002638
Avi Kivity717cb7b2012-02-12 21:21:21 +02002639 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2640 need_subpage);
2641 if (need_subpage) {
2642 if (!(mr->subpage)) {
Avi Kivity8636b922012-02-12 21:10:50 +02002643 subpage = subpage_init((addr & TARGET_PAGE_MASK),
Avi Kivity717cb7b2012-02-12 21:21:21 +02002644 p, orig_memory);
2645 } else {
2646 subpage = container_of(mr, subpage_t, iomem);
blueswir1db7b5422007-05-26 17:36:03 +00002647 }
Avi Kivity717cb7b2012-02-12 21:21:21 +02002648 subpage_register(subpage, start_addr2, end_addr2,
2649 section_index);
2650 } else {
2651 *p = section_index;
blueswir1db7b5422007-05-26 17:36:03 +00002652 }
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002653 addr += TARGET_PAGE_SIZE;
2654 } while (addr != end_addr);
bellard33417e72003-08-10 21:47:01 +00002655}
2656
Anthony Liguoric227f092009-10-01 16:12:16 -05002657void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002658{
2659 if (kvm_enabled())
2660 kvm_coalesce_mmio_region(addr, size);
2661}
2662
Anthony Liguoric227f092009-10-01 16:12:16 -05002663void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002664{
2665 if (kvm_enabled())
2666 kvm_uncoalesce_mmio_region(addr, size);
2667}
2668
Sheng Yang62a27442010-01-26 19:21:16 +08002669void qemu_flush_coalesced_mmio_buffer(void)
2670{
2671 if (kvm_enabled())
2672 kvm_flush_coalesced_mmio_buffer();
2673}
2674
Marcelo Tosattic9027602010-03-01 20:25:08 -03002675#if defined(__linux__) && !defined(TARGET_S390X)
2676
2677#include <sys/vfs.h>
2678
2679#define HUGETLBFS_MAGIC 0x958458f6
2680
2681static long gethugepagesize(const char *path)
2682{
2683 struct statfs fs;
2684 int ret;
2685
2686 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002687 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002688 } while (ret != 0 && errno == EINTR);
2689
2690 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002691 perror(path);
2692 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002693 }
2694
2695 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002696 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002697
2698 return fs.f_bsize;
2699}
2700
Alex Williamson04b16652010-07-02 11:13:17 -06002701static void *file_ram_alloc(RAMBlock *block,
2702 ram_addr_t memory,
2703 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002704{
2705 char *filename;
2706 void *area;
2707 int fd;
2708#ifdef MAP_POPULATE
2709 int flags;
2710#endif
2711 unsigned long hpagesize;
2712
2713 hpagesize = gethugepagesize(path);
2714 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002715 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002716 }
2717
2718 if (memory < hpagesize) {
2719 return NULL;
2720 }
2721
2722 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2723 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2724 return NULL;
2725 }
2726
2727 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002728 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002729 }
2730
2731 fd = mkstemp(filename);
2732 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002733 perror("unable to create backing store for hugepages");
2734 free(filename);
2735 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002736 }
2737 unlink(filename);
2738 free(filename);
2739
2740 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2741
2742 /*
2743 * ftruncate is not supported by hugetlbfs in older
2744 * hosts, so don't bother bailing out on errors.
2745 * If anything goes wrong with it under other filesystems,
2746 * mmap will fail.
2747 */
2748 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002749 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002750
2751#ifdef MAP_POPULATE
2752 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2753 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2754 * to sidestep this quirk.
2755 */
2756 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2757 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2758#else
2759 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2760#endif
2761 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002762 perror("file_ram_alloc: can't mmap RAM pages");
2763 close(fd);
2764 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002765 }
Alex Williamson04b16652010-07-02 11:13:17 -06002766 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002767 return area;
2768}
2769#endif
2770
Alex Williamsond17b5282010-06-25 11:08:38 -06002771static ram_addr_t find_ram_offset(ram_addr_t size)
2772{
Alex Williamson04b16652010-07-02 11:13:17 -06002773 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002774 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002775
2776 if (QLIST_EMPTY(&ram_list.blocks))
2777 return 0;
2778
2779 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002780 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002781
2782 end = block->offset + block->length;
2783
2784 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2785 if (next_block->offset >= end) {
2786 next = MIN(next, next_block->offset);
2787 }
2788 }
2789 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002790 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002791 mingap = next - end;
2792 }
2793 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002794
2795 if (offset == RAM_ADDR_MAX) {
2796 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2797 (uint64_t)size);
2798 abort();
2799 }
2800
Alex Williamson04b16652010-07-02 11:13:17 -06002801 return offset;
2802}
2803
2804static ram_addr_t last_ram_offset(void)
2805{
Alex Williamsond17b5282010-06-25 11:08:38 -06002806 RAMBlock *block;
2807 ram_addr_t last = 0;
2808
2809 QLIST_FOREACH(block, &ram_list.blocks, next)
2810 last = MAX(last, block->offset + block->length);
2811
2812 return last;
2813}
2814
Avi Kivityc5705a72011-12-20 15:59:12 +02002815void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002816{
2817 RAMBlock *new_block, *block;
2818
Avi Kivityc5705a72011-12-20 15:59:12 +02002819 new_block = NULL;
2820 QLIST_FOREACH(block, &ram_list.blocks, next) {
2821 if (block->offset == addr) {
2822 new_block = block;
2823 break;
2824 }
2825 }
2826 assert(new_block);
2827 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002828
2829 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2830 char *id = dev->parent_bus->info->get_dev_path(dev);
2831 if (id) {
2832 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002833 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002834 }
2835 }
2836 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2837
2838 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002839 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002840 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2841 new_block->idstr);
2842 abort();
2843 }
2844 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002845}
2846
2847ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2848 MemoryRegion *mr)
2849{
2850 RAMBlock *new_block;
2851
2852 size = TARGET_PAGE_ALIGN(size);
2853 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002854
Avi Kivity7c637362011-12-21 13:09:49 +02002855 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002856 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002857 if (host) {
2858 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002859 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002860 } else {
2861 if (mem_path) {
2862#if defined (__linux__) && !defined(TARGET_S390X)
2863 new_block->host = file_ram_alloc(new_block, size, mem_path);
2864 if (!new_block->host) {
2865 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002866 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002867 }
2868#else
2869 fprintf(stderr, "-mem-path option unsupported\n");
2870 exit(1);
2871#endif
2872 } else {
2873#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002874 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2875 an system defined value, which is at least 256GB. Larger systems
2876 have larger values. We put the guest between the end of data
2877 segment (system break) and this value. We use 32GB as a base to
2878 have enough room for the system break to grow. */
2879 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002880 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002881 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002882 if (new_block->host == MAP_FAILED) {
2883 fprintf(stderr, "Allocating RAM failed\n");
2884 abort();
2885 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002886#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002887 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002888 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002889 } else {
2890 new_block->host = qemu_vmalloc(size);
2891 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002892#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002893 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002894 }
2895 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002896 new_block->length = size;
2897
2898 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2899
Anthony Liguori7267c092011-08-20 22:09:37 -05002900 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002901 last_ram_offset() >> TARGET_PAGE_BITS);
2902 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2903 0xff, size >> TARGET_PAGE_BITS);
2904
2905 if (kvm_enabled())
2906 kvm_setup_guest_memory(new_block->host, size);
2907
2908 return new_block->offset;
2909}
2910
Avi Kivityc5705a72011-12-20 15:59:12 +02002911ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002912{
Avi Kivityc5705a72011-12-20 15:59:12 +02002913 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002914}
bellarde9a1ab12007-02-08 23:08:38 +00002915
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002916void qemu_ram_free_from_ptr(ram_addr_t addr)
2917{
2918 RAMBlock *block;
2919
2920 QLIST_FOREACH(block, &ram_list.blocks, next) {
2921 if (addr == block->offset) {
2922 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002923 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002924 return;
2925 }
2926 }
2927}
2928
Anthony Liguoric227f092009-10-01 16:12:16 -05002929void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002930{
Alex Williamson04b16652010-07-02 11:13:17 -06002931 RAMBlock *block;
2932
2933 QLIST_FOREACH(block, &ram_list.blocks, next) {
2934 if (addr == block->offset) {
2935 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002936 if (block->flags & RAM_PREALLOC_MASK) {
2937 ;
2938 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002939#if defined (__linux__) && !defined(TARGET_S390X)
2940 if (block->fd) {
2941 munmap(block->host, block->length);
2942 close(block->fd);
2943 } else {
2944 qemu_vfree(block->host);
2945 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002946#else
2947 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002948#endif
2949 } else {
2950#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2951 munmap(block->host, block->length);
2952#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002953 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002954 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002955 } else {
2956 qemu_vfree(block->host);
2957 }
Alex Williamson04b16652010-07-02 11:13:17 -06002958#endif
2959 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002960 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002961 return;
2962 }
2963 }
2964
bellarde9a1ab12007-02-08 23:08:38 +00002965}
2966
Huang Yingcd19cfa2011-03-02 08:56:19 +01002967#ifndef _WIN32
2968void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2969{
2970 RAMBlock *block;
2971 ram_addr_t offset;
2972 int flags;
2973 void *area, *vaddr;
2974
2975 QLIST_FOREACH(block, &ram_list.blocks, next) {
2976 offset = addr - block->offset;
2977 if (offset < block->length) {
2978 vaddr = block->host + offset;
2979 if (block->flags & RAM_PREALLOC_MASK) {
2980 ;
2981 } else {
2982 flags = MAP_FIXED;
2983 munmap(vaddr, length);
2984 if (mem_path) {
2985#if defined(__linux__) && !defined(TARGET_S390X)
2986 if (block->fd) {
2987#ifdef MAP_POPULATE
2988 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2989 MAP_PRIVATE;
2990#else
2991 flags |= MAP_PRIVATE;
2992#endif
2993 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2994 flags, block->fd, offset);
2995 } else {
2996 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2997 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2998 flags, -1, 0);
2999 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003000#else
3001 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003002#endif
3003 } else {
3004#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3005 flags |= MAP_SHARED | MAP_ANONYMOUS;
3006 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3007 flags, -1, 0);
3008#else
3009 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3010 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3011 flags, -1, 0);
3012#endif
3013 }
3014 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003015 fprintf(stderr, "Could not remap addr: "
3016 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003017 length, addr);
3018 exit(1);
3019 }
3020 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3021 }
3022 return;
3023 }
3024 }
3025}
3026#endif /* !_WIN32 */
3027
pbrookdc828ca2009-04-09 22:21:07 +00003028/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003029 With the exception of the softmmu code in this file, this should
3030 only be used for local memory (e.g. video ram) that the device owns,
3031 and knows it isn't going to access beyond the end of the block.
3032
3033 It should not be used for general purpose DMA.
3034 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3035 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003036void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003037{
pbrook94a6b542009-04-11 17:15:54 +00003038 RAMBlock *block;
3039
Alex Williamsonf471a172010-06-11 11:11:42 -06003040 QLIST_FOREACH(block, &ram_list.blocks, next) {
3041 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003042 /* Move this entry to to start of the list. */
3043 if (block != QLIST_FIRST(&ram_list.blocks)) {
3044 QLIST_REMOVE(block, next);
3045 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3046 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003047 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003048 /* We need to check if the requested address is in the RAM
3049 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003050 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003051 */
3052 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003053 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003054 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003055 block->host =
3056 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003057 }
3058 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003059 return block->host + (addr - block->offset);
3060 }
pbrook94a6b542009-04-11 17:15:54 +00003061 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003062
3063 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3064 abort();
3065
3066 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003067}
3068
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003069/* Return a host pointer to ram allocated with qemu_ram_alloc.
3070 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3071 */
3072void *qemu_safe_ram_ptr(ram_addr_t addr)
3073{
3074 RAMBlock *block;
3075
3076 QLIST_FOREACH(block, &ram_list.blocks, next) {
3077 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003078 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003079 /* We need to check if the requested address is in the RAM
3080 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003081 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003082 */
3083 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003084 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003085 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003086 block->host =
3087 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003088 }
3089 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003090 return block->host + (addr - block->offset);
3091 }
3092 }
3093
3094 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3095 abort();
3096
3097 return NULL;
3098}
3099
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003100/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3101 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003102void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003103{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003104 if (*size == 0) {
3105 return NULL;
3106 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003107 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003108 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003109 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003110 RAMBlock *block;
3111
3112 QLIST_FOREACH(block, &ram_list.blocks, next) {
3113 if (addr - block->offset < block->length) {
3114 if (addr - block->offset + *size > block->length)
3115 *size = block->length - addr + block->offset;
3116 return block->host + (addr - block->offset);
3117 }
3118 }
3119
3120 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3121 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003122 }
3123}
3124
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003125void qemu_put_ram_ptr(void *addr)
3126{
3127 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003128}
3129
Marcelo Tosattie8902612010-10-11 15:31:19 -03003130int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003131{
pbrook94a6b542009-04-11 17:15:54 +00003132 RAMBlock *block;
3133 uint8_t *host = ptr;
3134
Jan Kiszka868bb332011-06-21 22:59:09 +02003135 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003136 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003137 return 0;
3138 }
3139
Alex Williamsonf471a172010-06-11 11:11:42 -06003140 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003141 /* This case append when the block is not mapped. */
3142 if (block->host == NULL) {
3143 continue;
3144 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003145 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003146 *ram_addr = block->offset + (host - block->host);
3147 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003148 }
pbrook94a6b542009-04-11 17:15:54 +00003149 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003150
Marcelo Tosattie8902612010-10-11 15:31:19 -03003151 return -1;
3152}
Alex Williamsonf471a172010-06-11 11:11:42 -06003153
Marcelo Tosattie8902612010-10-11 15:31:19 -03003154/* Some of the softmmu routines need to translate from a host pointer
3155 (typically a TLB entry) back to a ram offset. */
3156ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3157{
3158 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003159
Marcelo Tosattie8902612010-10-11 15:31:19 -03003160 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3161 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3162 abort();
3163 }
3164 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003165}
3166
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003167static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3168 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003169{
pbrook67d3b952006-12-18 05:03:52 +00003170#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003171 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003172#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003173#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003174 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003175#endif
3176 return 0;
3177}
3178
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003179static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3180 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003181{
3182#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003183 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003184#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003185#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003186 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003187#endif
3188}
3189
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003190static const MemoryRegionOps unassigned_mem_ops = {
3191 .read = unassigned_mem_read,
3192 .write = unassigned_mem_write,
3193 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003194};
3195
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003196static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3197 unsigned size)
3198{
3199 abort();
3200}
3201
3202static void error_mem_write(void *opaque, target_phys_addr_t addr,
3203 uint64_t value, unsigned size)
3204{
3205 abort();
3206}
3207
3208static const MemoryRegionOps error_mem_ops = {
3209 .read = error_mem_read,
3210 .write = error_mem_write,
3211 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003212};
3213
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003214static const MemoryRegionOps rom_mem_ops = {
3215 .read = error_mem_read,
3216 .write = unassigned_mem_write,
3217 .endianness = DEVICE_NATIVE_ENDIAN,
3218};
3219
3220static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3221 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003222{
bellard3a7d9292005-08-21 09:26:42 +00003223 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003224 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003225 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3226#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003227 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003228 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003229#endif
3230 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003231 switch (size) {
3232 case 1:
3233 stb_p(qemu_get_ram_ptr(ram_addr), val);
3234 break;
3235 case 2:
3236 stw_p(qemu_get_ram_ptr(ram_addr), val);
3237 break;
3238 case 4:
3239 stl_p(qemu_get_ram_ptr(ram_addr), val);
3240 break;
3241 default:
3242 abort();
3243 }
bellardf23db162005-08-21 19:12:28 +00003244 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003245 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003246 /* we remove the notdirty callback only if the code has been
3247 flushed */
3248 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003249 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003250}
3251
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003252static const MemoryRegionOps notdirty_mem_ops = {
3253 .read = error_mem_read,
3254 .write = notdirty_mem_write,
3255 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003256};
3257
pbrook0f459d12008-06-09 00:20:13 +00003258/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003259static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003260{
3261 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003262 target_ulong pc, cs_base;
3263 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003264 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003265 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003266 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003267
aliguori06d55cc2008-11-18 20:24:06 +00003268 if (env->watchpoint_hit) {
3269 /* We re-entered the check after replacing the TB. Now raise
3270 * the debug interrupt so that is will trigger after the
3271 * current instruction. */
3272 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3273 return;
3274 }
pbrook2e70f6e2008-06-29 01:03:05 +00003275 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003276 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003277 if ((vaddr == (wp->vaddr & len_mask) ||
3278 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003279 wp->flags |= BP_WATCHPOINT_HIT;
3280 if (!env->watchpoint_hit) {
3281 env->watchpoint_hit = wp;
3282 tb = tb_find_pc(env->mem_io_pc);
3283 if (!tb) {
3284 cpu_abort(env, "check_watchpoint: could not find TB for "
3285 "pc=%p", (void *)env->mem_io_pc);
3286 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003287 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003288 tb_phys_invalidate(tb, -1);
3289 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3290 env->exception_index = EXCP_DEBUG;
3291 } else {
3292 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3293 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3294 }
3295 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003296 }
aliguori6e140f22008-11-18 20:37:55 +00003297 } else {
3298 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003299 }
3300 }
3301}
3302
pbrook6658ffb2007-03-16 23:58:11 +00003303/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3304 so these check for a hit then pass through to the normal out-of-line
3305 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003306static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3307 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003308{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003309 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3310 switch (size) {
3311 case 1: return ldub_phys(addr);
3312 case 2: return lduw_phys(addr);
3313 case 4: return ldl_phys(addr);
3314 default: abort();
3315 }
pbrook6658ffb2007-03-16 23:58:11 +00003316}
3317
Avi Kivity1ec9b902012-01-02 12:47:48 +02003318static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3319 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003320{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003321 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3322 switch (size) {
3323 case 1: stb_phys(addr, val);
3324 case 2: stw_phys(addr, val);
3325 case 4: stl_phys(addr, val);
3326 default: abort();
3327 }
pbrook6658ffb2007-03-16 23:58:11 +00003328}
3329
Avi Kivity1ec9b902012-01-02 12:47:48 +02003330static const MemoryRegionOps watch_mem_ops = {
3331 .read = watch_mem_read,
3332 .write = watch_mem_write,
3333 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003334};
pbrook6658ffb2007-03-16 23:58:11 +00003335
Avi Kivity70c68e42012-01-02 12:32:48 +02003336static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3337 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003338{
Avi Kivity70c68e42012-01-02 12:32:48 +02003339 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003340 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003341 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003342#if defined(DEBUG_SUBPAGE)
3343 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3344 mmio, len, addr, idx);
3345#endif
blueswir1db7b5422007-05-26 17:36:03 +00003346
Avi Kivity5312bd82012-02-12 18:32:55 +02003347 section = &phys_sections[mmio->sub_section[idx]];
3348 addr += mmio->base;
3349 addr -= section->offset_within_address_space;
3350 addr += section->offset_within_region;
3351 return io_mem_read(section->mr->ram_addr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003352}
3353
Avi Kivity70c68e42012-01-02 12:32:48 +02003354static void subpage_write(void *opaque, target_phys_addr_t addr,
3355 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003356{
Avi Kivity70c68e42012-01-02 12:32:48 +02003357 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003358 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003359 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003360#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003361 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3362 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003363 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003364#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003365
Avi Kivity5312bd82012-02-12 18:32:55 +02003366 section = &phys_sections[mmio->sub_section[idx]];
3367 addr += mmio->base;
3368 addr -= section->offset_within_address_space;
3369 addr += section->offset_within_region;
3370 io_mem_write(section->mr->ram_addr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003371}
3372
Avi Kivity70c68e42012-01-02 12:32:48 +02003373static const MemoryRegionOps subpage_ops = {
3374 .read = subpage_read,
3375 .write = subpage_write,
3376 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003377};
3378
Avi Kivityde712f92012-01-02 12:41:07 +02003379static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3380 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003381{
3382 ram_addr_t raddr = addr;
3383 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003384 switch (size) {
3385 case 1: return ldub_p(ptr);
3386 case 2: return lduw_p(ptr);
3387 case 4: return ldl_p(ptr);
3388 default: abort();
3389 }
Andreas Färber56384e82011-11-30 16:26:21 +01003390}
3391
Avi Kivityde712f92012-01-02 12:41:07 +02003392static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3393 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003394{
3395 ram_addr_t raddr = addr;
3396 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003397 switch (size) {
3398 case 1: return stb_p(ptr, value);
3399 case 2: return stw_p(ptr, value);
3400 case 4: return stl_p(ptr, value);
3401 default: abort();
3402 }
Andreas Färber56384e82011-11-30 16:26:21 +01003403}
3404
Avi Kivityde712f92012-01-02 12:41:07 +02003405static const MemoryRegionOps subpage_ram_ops = {
3406 .read = subpage_ram_read,
3407 .write = subpage_ram_write,
3408 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003409};
3410
Anthony Liguoric227f092009-10-01 16:12:16 -05003411static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003412 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003413{
3414 int idx, eidx;
3415
3416 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3417 return -1;
3418 idx = SUBPAGE_IDX(start);
3419 eidx = SUBPAGE_IDX(end);
3420#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003421 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003422 mmio, start, end, idx, eidx, memory);
3423#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003424 if (memory_region_is_ram(phys_sections[section].mr)) {
3425 MemoryRegionSection new_section = phys_sections[section];
3426 new_section.mr = &io_mem_subpage_ram;
3427 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003428 }
blueswir1db7b5422007-05-26 17:36:03 +00003429 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003430 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003431 }
3432
3433 return 0;
3434}
3435
Avi Kivity5312bd82012-02-12 18:32:55 +02003436static subpage_t *subpage_init (target_phys_addr_t base, uint16_t *section_ind,
3437 uint16_t orig_section)
blueswir1db7b5422007-05-26 17:36:03 +00003438{
Anthony Liguoric227f092009-10-01 16:12:16 -05003439 subpage_t *mmio;
Avi Kivity5312bd82012-02-12 18:32:55 +02003440 MemoryRegionSection section = {
3441 .offset_within_address_space = base,
3442 .size = TARGET_PAGE_SIZE,
3443 };
blueswir1db7b5422007-05-26 17:36:03 +00003444
Anthony Liguori7267c092011-08-20 22:09:37 -05003445 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003446
3447 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003448 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3449 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003450 mmio->iomem.subpage = true;
Avi Kivity5312bd82012-02-12 18:32:55 +02003451 section.mr = &mmio->iomem;
blueswir1db7b5422007-05-26 17:36:03 +00003452#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003453 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3454 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003455#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003456 *section_ind = phys_section_add(&section);
3457 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_section);
blueswir1db7b5422007-05-26 17:36:03 +00003458
3459 return mmio;
3460}
3461
aliguori88715652009-02-11 15:20:58 +00003462static int get_free_io_mem_idx(void)
3463{
3464 int i;
3465
3466 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3467 if (!io_mem_used[i]) {
3468 io_mem_used[i] = 1;
3469 return i;
3470 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003471 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003472 return -1;
3473}
3474
bellard33417e72003-08-10 21:47:01 +00003475/* mem_read and mem_write are arrays of functions containing the
3476 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003477 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003478 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003479 modified. If it is zero, a new io zone is allocated. The return
3480 value can be used with cpu_register_physical_memory(). (-1) is
3481 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003482static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003483{
bellard33417e72003-08-10 21:47:01 +00003484 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003485 io_index = get_free_io_mem_idx();
3486 if (io_index == -1)
3487 return io_index;
bellard33417e72003-08-10 21:47:01 +00003488 } else {
3489 if (io_index >= IO_MEM_NB_ENTRIES)
3490 return -1;
3491 }
bellardb5ff1b32005-11-26 10:38:39 +00003492
Avi Kivitya621f382012-01-02 13:12:08 +02003493 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003494
Avi Kivity11c7ef02012-01-02 17:21:07 +02003495 return io_index;
bellard33417e72003-08-10 21:47:01 +00003496}
bellard61382a52003-10-27 21:22:23 +00003497
Avi Kivitya621f382012-01-02 13:12:08 +02003498int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003499{
Avi Kivitya621f382012-01-02 13:12:08 +02003500 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003501}
3502
Avi Kivity11c7ef02012-01-02 17:21:07 +02003503void cpu_unregister_io_memory(int io_index)
aliguori88715652009-02-11 15:20:58 +00003504{
Avi Kivitya621f382012-01-02 13:12:08 +02003505 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003506 io_mem_used[io_index] = 0;
3507}
3508
Avi Kivity5312bd82012-02-12 18:32:55 +02003509static uint16_t dummy_section(MemoryRegion *mr)
3510{
3511 MemoryRegionSection section = {
3512 .mr = mr,
3513 .offset_within_address_space = 0,
3514 .offset_within_region = 0,
3515 .size = UINT64_MAX,
3516 };
3517
3518 return phys_section_add(&section);
3519}
3520
Avi Kivitye9179ce2009-06-14 11:38:52 +03003521static void io_mem_init(void)
3522{
3523 int i;
3524
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003525 /* Must be first: */
3526 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3527 assert(io_mem_ram.ram_addr == 0);
3528 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3529 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3530 "unassigned", UINT64_MAX);
3531 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3532 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003533 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3534 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003535 for (i=0; i<5; i++)
3536 io_mem_used[i] = 1;
3537
Avi Kivity1ec9b902012-01-02 12:47:48 +02003538 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3539 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003540}
3541
Avi Kivity50c1e142012-02-08 21:36:02 +02003542static void core_begin(MemoryListener *listener)
3543{
Avi Kivity54688b12012-02-09 17:34:32 +02003544 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003545 phys_sections_clear();
Avi Kivityd6f2ea22012-02-12 20:12:49 +02003546 phys_map.u.node = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003547 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivity50c1e142012-02-08 21:36:02 +02003548}
3549
3550static void core_commit(MemoryListener *listener)
3551{
Avi Kivity117712c2012-02-12 21:23:17 +02003552 CPUState *env;
3553
3554 /* since each CPU stores ram addresses in its TLB cache, we must
3555 reset the modified entries */
3556 /* XXX: slow ! */
3557 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3558 tlb_flush(env, 1);
3559 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003560}
3561
Avi Kivity93632742012-02-08 16:54:16 +02003562static void core_region_add(MemoryListener *listener,
3563 MemoryRegionSection *section)
3564{
Avi Kivity4855d412012-02-08 21:16:05 +02003565 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003566}
3567
3568static void core_region_del(MemoryListener *listener,
3569 MemoryRegionSection *section)
3570{
Avi Kivity93632742012-02-08 16:54:16 +02003571}
3572
Avi Kivity50c1e142012-02-08 21:36:02 +02003573static void core_region_nop(MemoryListener *listener,
3574 MemoryRegionSection *section)
3575{
Avi Kivity54688b12012-02-09 17:34:32 +02003576 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003577}
3578
Avi Kivity93632742012-02-08 16:54:16 +02003579static void core_log_start(MemoryListener *listener,
3580 MemoryRegionSection *section)
3581{
3582}
3583
3584static void core_log_stop(MemoryListener *listener,
3585 MemoryRegionSection *section)
3586{
3587}
3588
3589static void core_log_sync(MemoryListener *listener,
3590 MemoryRegionSection *section)
3591{
3592}
3593
3594static void core_log_global_start(MemoryListener *listener)
3595{
3596 cpu_physical_memory_set_dirty_tracking(1);
3597}
3598
3599static void core_log_global_stop(MemoryListener *listener)
3600{
3601 cpu_physical_memory_set_dirty_tracking(0);
3602}
3603
3604static void core_eventfd_add(MemoryListener *listener,
3605 MemoryRegionSection *section,
3606 bool match_data, uint64_t data, int fd)
3607{
3608}
3609
3610static void core_eventfd_del(MemoryListener *listener,
3611 MemoryRegionSection *section,
3612 bool match_data, uint64_t data, int fd)
3613{
3614}
3615
Avi Kivity50c1e142012-02-08 21:36:02 +02003616static void io_begin(MemoryListener *listener)
3617{
3618}
3619
3620static void io_commit(MemoryListener *listener)
3621{
3622}
3623
Avi Kivity4855d412012-02-08 21:16:05 +02003624static void io_region_add(MemoryListener *listener,
3625 MemoryRegionSection *section)
3626{
3627 iorange_init(&section->mr->iorange, &memory_region_iorange_ops,
3628 section->offset_within_address_space, section->size);
3629 ioport_register(&section->mr->iorange);
3630}
3631
3632static void io_region_del(MemoryListener *listener,
3633 MemoryRegionSection *section)
3634{
3635 isa_unassign_ioport(section->offset_within_address_space, section->size);
3636}
3637
Avi Kivity50c1e142012-02-08 21:36:02 +02003638static void io_region_nop(MemoryListener *listener,
3639 MemoryRegionSection *section)
3640{
3641}
3642
Avi Kivity4855d412012-02-08 21:16:05 +02003643static void io_log_start(MemoryListener *listener,
3644 MemoryRegionSection *section)
3645{
3646}
3647
3648static void io_log_stop(MemoryListener *listener,
3649 MemoryRegionSection *section)
3650{
3651}
3652
3653static void io_log_sync(MemoryListener *listener,
3654 MemoryRegionSection *section)
3655{
3656}
3657
3658static void io_log_global_start(MemoryListener *listener)
3659{
3660}
3661
3662static void io_log_global_stop(MemoryListener *listener)
3663{
3664}
3665
3666static void io_eventfd_add(MemoryListener *listener,
3667 MemoryRegionSection *section,
3668 bool match_data, uint64_t data, int fd)
3669{
3670}
3671
3672static void io_eventfd_del(MemoryListener *listener,
3673 MemoryRegionSection *section,
3674 bool match_data, uint64_t data, int fd)
3675{
3676}
3677
Avi Kivity93632742012-02-08 16:54:16 +02003678static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003679 .begin = core_begin,
3680 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003681 .region_add = core_region_add,
3682 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003683 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003684 .log_start = core_log_start,
3685 .log_stop = core_log_stop,
3686 .log_sync = core_log_sync,
3687 .log_global_start = core_log_global_start,
3688 .log_global_stop = core_log_global_stop,
3689 .eventfd_add = core_eventfd_add,
3690 .eventfd_del = core_eventfd_del,
3691 .priority = 0,
3692};
3693
Avi Kivity4855d412012-02-08 21:16:05 +02003694static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003695 .begin = io_begin,
3696 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003697 .region_add = io_region_add,
3698 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003699 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003700 .log_start = io_log_start,
3701 .log_stop = io_log_stop,
3702 .log_sync = io_log_sync,
3703 .log_global_start = io_log_global_start,
3704 .log_global_stop = io_log_global_stop,
3705 .eventfd_add = io_eventfd_add,
3706 .eventfd_del = io_eventfd_del,
3707 .priority = 0,
3708};
3709
Avi Kivity62152b82011-07-26 14:26:14 +03003710static void memory_map_init(void)
3711{
Anthony Liguori7267c092011-08-20 22:09:37 -05003712 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003713 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003714 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003715
Anthony Liguori7267c092011-08-20 22:09:37 -05003716 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003717 memory_region_init(system_io, "io", 65536);
3718 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003719
Avi Kivity4855d412012-02-08 21:16:05 +02003720 memory_listener_register(&core_memory_listener, system_memory);
3721 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003722}
3723
3724MemoryRegion *get_system_memory(void)
3725{
3726 return system_memory;
3727}
3728
Avi Kivity309cb472011-08-08 16:09:03 +03003729MemoryRegion *get_system_io(void)
3730{
3731 return system_io;
3732}
3733
pbrooke2eef172008-06-08 01:09:01 +00003734#endif /* !defined(CONFIG_USER_ONLY) */
3735
bellard13eb76e2004-01-24 15:23:36 +00003736/* physical memory access (slow version, mainly for debug) */
3737#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003738int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3739 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003740{
3741 int l, flags;
3742 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003743 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003744
3745 while (len > 0) {
3746 page = addr & TARGET_PAGE_MASK;
3747 l = (page + TARGET_PAGE_SIZE) - addr;
3748 if (l > len)
3749 l = len;
3750 flags = page_get_flags(page);
3751 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003752 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003753 if (is_write) {
3754 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003755 return -1;
bellard579a97f2007-11-11 14:26:47 +00003756 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003757 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003758 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003759 memcpy(p, buf, l);
3760 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003761 } else {
3762 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003763 return -1;
bellard579a97f2007-11-11 14:26:47 +00003764 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003765 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003766 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003767 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003768 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003769 }
3770 len -= l;
3771 buf += l;
3772 addr += l;
3773 }
Paul Brooka68fe892010-03-01 00:08:59 +00003774 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003775}
bellard8df1cd02005-01-28 22:37:22 +00003776
bellard13eb76e2004-01-24 15:23:36 +00003777#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003778void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003779 int len, int is_write)
3780{
3781 int l, io_index;
3782 uint8_t *ptr;
3783 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003784 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003785 MemoryRegionSection section;
ths3b46e622007-09-17 08:09:54 +00003786
bellard13eb76e2004-01-24 15:23:36 +00003787 while (len > 0) {
3788 page = addr & TARGET_PAGE_MASK;
3789 l = (page + TARGET_PAGE_SIZE) - addr;
3790 if (l > len)
3791 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003792 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003793
bellard13eb76e2004-01-24 15:23:36 +00003794 if (is_write) {
Avi Kivity06ef3522012-02-13 16:11:22 +02003795 if (!memory_region_is_ram(section.mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003796 target_phys_addr_t addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003797 io_index = memory_region_get_ram_addr(section.mr)
3798 & (IO_MEM_NB_ENTRIES - 1);
3799 addr1 = (addr & ~TARGET_PAGE_MASK)
3800 + section.offset_within_region;
bellard6a00d602005-11-21 23:25:50 +00003801 /* XXX: could force cpu_single_env to NULL to avoid
3802 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003803 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003804 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003805 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003806 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003807 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003808 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003809 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003810 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003811 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003812 l = 2;
3813 } else {
bellard1c213d12005-09-03 10:49:04 +00003814 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003815 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003816 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003817 l = 1;
3818 }
Avi Kivity06ef3522012-02-13 16:11:22 +02003819 } else if (!section.readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003820 ram_addr_t addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003821 addr1 = (memory_region_get_ram_addr(section.mr)
3822 + section.offset_within_region)
3823 | (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003824 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003825 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003826 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003827 if (!cpu_physical_memory_is_dirty(addr1)) {
3828 /* invalidate code */
3829 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3830 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003831 cpu_physical_memory_set_dirty_flags(
3832 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003833 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003834 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003835 }
3836 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02003837 if (!is_ram_rom_romd(&section)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003838 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003839 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02003840 io_index = memory_region_get_ram_addr(section.mr)
3841 & (IO_MEM_NB_ENTRIES - 1);
3842 addr1 = (addr & ~TARGET_PAGE_MASK)
3843 + section.offset_within_region;
aurel326c2934d2009-02-18 21:37:17 +00003844 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003845 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003846 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003847 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003848 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003849 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003850 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003851 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003852 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003853 l = 2;
3854 } else {
bellard1c213d12005-09-03 10:49:04 +00003855 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003856 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003857 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003858 l = 1;
3859 }
3860 } else {
3861 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02003862 ptr = qemu_get_ram_ptr(section.mr->ram_addr
3863 + section.offset_within_region);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003864 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3865 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003866 }
3867 }
3868 len -= l;
3869 buf += l;
3870 addr += l;
3871 }
3872}
bellard8df1cd02005-01-28 22:37:22 +00003873
bellardd0ecd2a2006-04-23 17:14:48 +00003874/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003875void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003876 const uint8_t *buf, int len)
3877{
3878 int l;
3879 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003880 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003881 MemoryRegionSection section;
ths3b46e622007-09-17 08:09:54 +00003882
bellardd0ecd2a2006-04-23 17:14:48 +00003883 while (len > 0) {
3884 page = addr & TARGET_PAGE_MASK;
3885 l = (page + TARGET_PAGE_SIZE) - addr;
3886 if (l > len)
3887 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003888 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003889
Avi Kivity06ef3522012-02-13 16:11:22 +02003890 if (!is_ram_rom_romd(&section)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003891 /* do nothing */
3892 } else {
3893 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003894 addr1 = (memory_region_get_ram_addr(section.mr)
3895 + section.offset_within_region)
3896 + (addr & ~TARGET_PAGE_MASK);
bellardd0ecd2a2006-04-23 17:14:48 +00003897 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003898 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003899 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003900 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003901 }
3902 len -= l;
3903 buf += l;
3904 addr += l;
3905 }
3906}
3907
aliguori6d16c2f2009-01-22 16:59:11 +00003908typedef struct {
3909 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003910 target_phys_addr_t addr;
3911 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003912} BounceBuffer;
3913
3914static BounceBuffer bounce;
3915
aliguoriba223c22009-01-22 16:59:16 +00003916typedef struct MapClient {
3917 void *opaque;
3918 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003919 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003920} MapClient;
3921
Blue Swirl72cf2d42009-09-12 07:36:22 +00003922static QLIST_HEAD(map_client_list, MapClient) map_client_list
3923 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003924
3925void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3926{
Anthony Liguori7267c092011-08-20 22:09:37 -05003927 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003928
3929 client->opaque = opaque;
3930 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003931 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003932 return client;
3933}
3934
3935void cpu_unregister_map_client(void *_client)
3936{
3937 MapClient *client = (MapClient *)_client;
3938
Blue Swirl72cf2d42009-09-12 07:36:22 +00003939 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003940 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003941}
3942
3943static void cpu_notify_map_clients(void)
3944{
3945 MapClient *client;
3946
Blue Swirl72cf2d42009-09-12 07:36:22 +00003947 while (!QLIST_EMPTY(&map_client_list)) {
3948 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003949 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003950 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003951 }
3952}
3953
aliguori6d16c2f2009-01-22 16:59:11 +00003954/* Map a physical memory region into a host virtual address.
3955 * May map a subset of the requested range, given by and returned in *plen.
3956 * May return NULL if resources needed to perform the mapping are exhausted.
3957 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003958 * Use cpu_register_map_client() to know when retrying the map operation is
3959 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003960 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003961void *cpu_physical_memory_map(target_phys_addr_t addr,
3962 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003963 int is_write)
3964{
Anthony Liguoric227f092009-10-01 16:12:16 -05003965 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003966 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003967 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003968 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003969 MemoryRegionSection section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003970 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003971 ram_addr_t rlen;
3972 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003973
3974 while (len > 0) {
3975 page = addr & TARGET_PAGE_MASK;
3976 l = (page + TARGET_PAGE_SIZE) - addr;
3977 if (l > len)
3978 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003979 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003980
Avi Kivity06ef3522012-02-13 16:11:22 +02003981 if (!(memory_region_is_ram(section.mr) && !section.readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003982 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003983 break;
3984 }
3985 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3986 bounce.addr = addr;
3987 bounce.len = l;
3988 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003989 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003990 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003991
3992 *plen = l;
3993 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003994 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003995 if (!todo) {
Avi Kivity06ef3522012-02-13 16:11:22 +02003996 raddr = memory_region_get_ram_addr(section.mr)
3997 + section.offset_within_region
3998 + (addr & ~TARGET_PAGE_MASK);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003999 }
aliguori6d16c2f2009-01-22 16:59:11 +00004000
4001 len -= l;
4002 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004003 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004004 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004005 rlen = todo;
4006 ret = qemu_ram_ptr_length(raddr, &rlen);
4007 *plen = rlen;
4008 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004009}
4010
4011/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4012 * Will also mark the memory as dirty if is_write == 1. access_len gives
4013 * the amount of memory that was actually read or written by the caller.
4014 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004015void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4016 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004017{
4018 if (buffer != bounce.buffer) {
4019 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004020 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004021 while (access_len) {
4022 unsigned l;
4023 l = TARGET_PAGE_SIZE;
4024 if (l > access_len)
4025 l = access_len;
4026 if (!cpu_physical_memory_is_dirty(addr1)) {
4027 /* invalidate code */
4028 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4029 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004030 cpu_physical_memory_set_dirty_flags(
4031 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004032 }
4033 addr1 += l;
4034 access_len -= l;
4035 }
4036 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004037 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004038 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004039 }
aliguori6d16c2f2009-01-22 16:59:11 +00004040 return;
4041 }
4042 if (is_write) {
4043 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4044 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004045 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004046 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004047 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004048}
bellardd0ecd2a2006-04-23 17:14:48 +00004049
bellard8df1cd02005-01-28 22:37:22 +00004050/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004051static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4052 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004053{
4054 int io_index;
4055 uint8_t *ptr;
4056 uint32_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004057 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004058
Avi Kivity06ef3522012-02-13 16:11:22 +02004059 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004060
Avi Kivity06ef3522012-02-13 16:11:22 +02004061 if (!is_ram_rom_romd(&section)) {
bellard8df1cd02005-01-28 22:37:22 +00004062 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004063 io_index = memory_region_get_ram_addr(section.mr)
4064 & (IO_MEM_NB_ENTRIES - 1);
4065 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004066 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004067#if defined(TARGET_WORDS_BIGENDIAN)
4068 if (endian == DEVICE_LITTLE_ENDIAN) {
4069 val = bswap32(val);
4070 }
4071#else
4072 if (endian == DEVICE_BIG_ENDIAN) {
4073 val = bswap32(val);
4074 }
4075#endif
bellard8df1cd02005-01-28 22:37:22 +00004076 } else {
4077 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004078 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4079 & TARGET_PAGE_MASK)
4080 + section.offset_within_region) +
bellard8df1cd02005-01-28 22:37:22 +00004081 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004082 switch (endian) {
4083 case DEVICE_LITTLE_ENDIAN:
4084 val = ldl_le_p(ptr);
4085 break;
4086 case DEVICE_BIG_ENDIAN:
4087 val = ldl_be_p(ptr);
4088 break;
4089 default:
4090 val = ldl_p(ptr);
4091 break;
4092 }
bellard8df1cd02005-01-28 22:37:22 +00004093 }
4094 return val;
4095}
4096
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004097uint32_t ldl_phys(target_phys_addr_t addr)
4098{
4099 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4100}
4101
4102uint32_t ldl_le_phys(target_phys_addr_t addr)
4103{
4104 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4105}
4106
4107uint32_t ldl_be_phys(target_phys_addr_t addr)
4108{
4109 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4110}
4111
bellard84b7b8e2005-11-28 21:19:04 +00004112/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004113static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4114 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004115{
4116 int io_index;
4117 uint8_t *ptr;
4118 uint64_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004119 MemoryRegionSection section;
bellard84b7b8e2005-11-28 21:19:04 +00004120
Avi Kivity06ef3522012-02-13 16:11:22 +02004121 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004122
Avi Kivity06ef3522012-02-13 16:11:22 +02004123 if (!is_ram_rom_romd(&section)) {
bellard84b7b8e2005-11-28 21:19:04 +00004124 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004125 io_index = memory_region_get_ram_addr(section.mr)
4126 & (IO_MEM_NB_ENTRIES - 1);
4127 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004128
4129 /* XXX This is broken when device endian != cpu endian.
4130 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004131#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004132 val = io_mem_read(io_index, addr, 4) << 32;
4133 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004134#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004135 val = io_mem_read(io_index, addr, 4);
4136 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004137#endif
4138 } else {
4139 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004140 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4141 & TARGET_PAGE_MASK)
4142 + section.offset_within_region)
4143 + (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004144 switch (endian) {
4145 case DEVICE_LITTLE_ENDIAN:
4146 val = ldq_le_p(ptr);
4147 break;
4148 case DEVICE_BIG_ENDIAN:
4149 val = ldq_be_p(ptr);
4150 break;
4151 default:
4152 val = ldq_p(ptr);
4153 break;
4154 }
bellard84b7b8e2005-11-28 21:19:04 +00004155 }
4156 return val;
4157}
4158
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004159uint64_t ldq_phys(target_phys_addr_t addr)
4160{
4161 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4162}
4163
4164uint64_t ldq_le_phys(target_phys_addr_t addr)
4165{
4166 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4167}
4168
4169uint64_t ldq_be_phys(target_phys_addr_t addr)
4170{
4171 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4172}
4173
bellardaab33092005-10-30 20:48:42 +00004174/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004175uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004176{
4177 uint8_t val;
4178 cpu_physical_memory_read(addr, &val, 1);
4179 return val;
4180}
4181
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004182/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004183static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4184 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004185{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004186 int io_index;
4187 uint8_t *ptr;
4188 uint64_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004189 MemoryRegionSection section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004190
Avi Kivity06ef3522012-02-13 16:11:22 +02004191 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004192
Avi Kivity06ef3522012-02-13 16:11:22 +02004193 if (!is_ram_rom_romd(&section)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004194 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004195 io_index = memory_region_get_ram_addr(section.mr)
4196 & (IO_MEM_NB_ENTRIES - 1);
4197 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004198 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004199#if defined(TARGET_WORDS_BIGENDIAN)
4200 if (endian == DEVICE_LITTLE_ENDIAN) {
4201 val = bswap16(val);
4202 }
4203#else
4204 if (endian == DEVICE_BIG_ENDIAN) {
4205 val = bswap16(val);
4206 }
4207#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004208 } else {
4209 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004210 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4211 & TARGET_PAGE_MASK)
4212 + section.offset_within_region)
4213 + (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004214 switch (endian) {
4215 case DEVICE_LITTLE_ENDIAN:
4216 val = lduw_le_p(ptr);
4217 break;
4218 case DEVICE_BIG_ENDIAN:
4219 val = lduw_be_p(ptr);
4220 break;
4221 default:
4222 val = lduw_p(ptr);
4223 break;
4224 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004225 }
4226 return val;
bellardaab33092005-10-30 20:48:42 +00004227}
4228
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004229uint32_t lduw_phys(target_phys_addr_t addr)
4230{
4231 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4232}
4233
4234uint32_t lduw_le_phys(target_phys_addr_t addr)
4235{
4236 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4237}
4238
4239uint32_t lduw_be_phys(target_phys_addr_t addr)
4240{
4241 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4242}
4243
bellard8df1cd02005-01-28 22:37:22 +00004244/* warning: addr must be aligned. The ram page is not masked as dirty
4245 and the code inside is not invalidated. It is useful if the dirty
4246 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004247void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004248{
4249 int io_index;
4250 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004251 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004252
Avi Kivity06ef3522012-02-13 16:11:22 +02004253 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004254
Avi Kivity06ef3522012-02-13 16:11:22 +02004255 if (!memory_region_is_ram(section.mr) || section.readonly) {
4256 if (memory_region_is_ram(section.mr)) {
4257 io_index = io_mem_rom.ram_addr;
4258 } else {
4259 io_index = memory_region_get_ram_addr(section.mr);
4260 }
4261 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004262 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004263 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02004264 unsigned long addr1 = (memory_region_get_ram_addr(section.mr)
4265 & TARGET_PAGE_MASK)
4266 + section.offset_within_region
4267 + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004268 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004269 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004270
4271 if (unlikely(in_migration)) {
4272 if (!cpu_physical_memory_is_dirty(addr1)) {
4273 /* invalidate code */
4274 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4275 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004276 cpu_physical_memory_set_dirty_flags(
4277 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004278 }
4279 }
bellard8df1cd02005-01-28 22:37:22 +00004280 }
4281}
4282
Anthony Liguoric227f092009-10-01 16:12:16 -05004283void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004284{
4285 int io_index;
4286 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004287 MemoryRegionSection section;
j_mayerbc98a7e2007-04-04 07:55:12 +00004288
Avi Kivity06ef3522012-02-13 16:11:22 +02004289 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004290
Avi Kivity06ef3522012-02-13 16:11:22 +02004291 if (!memory_region_is_ram(section.mr) || section.readonly) {
4292 if (memory_region_is_ram(section.mr)) {
4293 io_index = io_mem_rom.ram_addr;
4294 } else {
4295 io_index = memory_region_get_ram_addr(section.mr)
4296 & (IO_MEM_NB_ENTRIES - 1);
4297 }
4298 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
j_mayerbc98a7e2007-04-04 07:55:12 +00004299#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004300 io_mem_write(io_index, addr, val >> 32, 4);
4301 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004302#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004303 io_mem_write(io_index, addr, (uint32_t)val, 4);
4304 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004305#endif
4306 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02004307 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4308 & TARGET_PAGE_MASK)
4309 + section.offset_within_region)
4310 + (addr & ~TARGET_PAGE_MASK);
j_mayerbc98a7e2007-04-04 07:55:12 +00004311 stq_p(ptr, val);
4312 }
4313}
4314
bellard8df1cd02005-01-28 22:37:22 +00004315/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004316static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4317 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004318{
4319 int io_index;
4320 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004321 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004322
Avi Kivity06ef3522012-02-13 16:11:22 +02004323 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004324
Avi Kivity06ef3522012-02-13 16:11:22 +02004325 if (!memory_region_is_ram(section.mr) || section.readonly) {
4326 if (memory_region_is_ram(section.mr)) {
4327 io_index = io_mem_rom.ram_addr;
4328 } else {
4329 io_index = memory_region_get_ram_addr(section.mr)
4330 & (IO_MEM_NB_ENTRIES - 1);
4331 }
4332 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004333#if defined(TARGET_WORDS_BIGENDIAN)
4334 if (endian == DEVICE_LITTLE_ENDIAN) {
4335 val = bswap32(val);
4336 }
4337#else
4338 if (endian == DEVICE_BIG_ENDIAN) {
4339 val = bswap32(val);
4340 }
4341#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004342 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004343 } else {
4344 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02004345 addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
4346 + section.offset_within_region
4347 + (addr & ~TARGET_PAGE_MASK);
bellard8df1cd02005-01-28 22:37:22 +00004348 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004349 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004350 switch (endian) {
4351 case DEVICE_LITTLE_ENDIAN:
4352 stl_le_p(ptr, val);
4353 break;
4354 case DEVICE_BIG_ENDIAN:
4355 stl_be_p(ptr, val);
4356 break;
4357 default:
4358 stl_p(ptr, val);
4359 break;
4360 }
bellard3a7d9292005-08-21 09:26:42 +00004361 if (!cpu_physical_memory_is_dirty(addr1)) {
4362 /* invalidate code */
4363 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4364 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004365 cpu_physical_memory_set_dirty_flags(addr1,
4366 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004367 }
bellard8df1cd02005-01-28 22:37:22 +00004368 }
4369}
4370
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004371void stl_phys(target_phys_addr_t addr, uint32_t val)
4372{
4373 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4374}
4375
4376void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4377{
4378 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4379}
4380
4381void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4382{
4383 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4384}
4385
bellardaab33092005-10-30 20:48:42 +00004386/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004387void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004388{
4389 uint8_t v = val;
4390 cpu_physical_memory_write(addr, &v, 1);
4391}
4392
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004393/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004394static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4395 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004396{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004397 int io_index;
4398 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004399 MemoryRegionSection section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004400
Avi Kivity06ef3522012-02-13 16:11:22 +02004401 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004402
Avi Kivity06ef3522012-02-13 16:11:22 +02004403 if (!memory_region_is_ram(section.mr) || section.readonly) {
4404 if (memory_region_is_ram(section.mr)) {
4405 io_index = io_mem_rom.ram_addr;
4406 } else {
4407 io_index = memory_region_get_ram_addr(section.mr)
4408 & (IO_MEM_NB_ENTRIES - 1);
4409 }
4410 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004411#if defined(TARGET_WORDS_BIGENDIAN)
4412 if (endian == DEVICE_LITTLE_ENDIAN) {
4413 val = bswap16(val);
4414 }
4415#else
4416 if (endian == DEVICE_BIG_ENDIAN) {
4417 val = bswap16(val);
4418 }
4419#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004420 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004421 } else {
4422 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02004423 addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
4424 + section.offset_within_region + (addr & ~TARGET_PAGE_MASK);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004425 /* RAM case */
4426 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004427 switch (endian) {
4428 case DEVICE_LITTLE_ENDIAN:
4429 stw_le_p(ptr, val);
4430 break;
4431 case DEVICE_BIG_ENDIAN:
4432 stw_be_p(ptr, val);
4433 break;
4434 default:
4435 stw_p(ptr, val);
4436 break;
4437 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004438 if (!cpu_physical_memory_is_dirty(addr1)) {
4439 /* invalidate code */
4440 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4441 /* set dirty bit */
4442 cpu_physical_memory_set_dirty_flags(addr1,
4443 (0xff & ~CODE_DIRTY_FLAG));
4444 }
4445 }
bellardaab33092005-10-30 20:48:42 +00004446}
4447
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004448void stw_phys(target_phys_addr_t addr, uint32_t val)
4449{
4450 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4451}
4452
4453void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4454{
4455 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4456}
4457
4458void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4459{
4460 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4461}
4462
bellardaab33092005-10-30 20:48:42 +00004463/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004464void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004465{
4466 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004467 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004468}
4469
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004470void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4471{
4472 val = cpu_to_le64(val);
4473 cpu_physical_memory_write(addr, &val, 8);
4474}
4475
4476void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4477{
4478 val = cpu_to_be64(val);
4479 cpu_physical_memory_write(addr, &val, 8);
4480}
4481
aliguori5e2972f2009-03-28 17:51:36 +00004482/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004483int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004484 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004485{
4486 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004487 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004488 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004489
4490 while (len > 0) {
4491 page = addr & TARGET_PAGE_MASK;
4492 phys_addr = cpu_get_phys_page_debug(env, page);
4493 /* if no physical page mapped, return an error */
4494 if (phys_addr == -1)
4495 return -1;
4496 l = (page + TARGET_PAGE_SIZE) - addr;
4497 if (l > len)
4498 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004499 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004500 if (is_write)
4501 cpu_physical_memory_write_rom(phys_addr, buf, l);
4502 else
aliguori5e2972f2009-03-28 17:51:36 +00004503 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004504 len -= l;
4505 buf += l;
4506 addr += l;
4507 }
4508 return 0;
4509}
Paul Brooka68fe892010-03-01 00:08:59 +00004510#endif
bellard13eb76e2004-01-24 15:23:36 +00004511
pbrook2e70f6e2008-06-29 01:03:05 +00004512/* in deterministic execution mode, instructions doing device I/Os
4513 must be at the end of the TB */
4514void cpu_io_recompile(CPUState *env, void *retaddr)
4515{
4516 TranslationBlock *tb;
4517 uint32_t n, cflags;
4518 target_ulong pc, cs_base;
4519 uint64_t flags;
4520
4521 tb = tb_find_pc((unsigned long)retaddr);
4522 if (!tb) {
4523 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4524 retaddr);
4525 }
4526 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004527 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004528 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004529 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004530 n = n - env->icount_decr.u16.low;
4531 /* Generate a new TB ending on the I/O insn. */
4532 n++;
4533 /* On MIPS and SH, delay slot instructions can only be restarted if
4534 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004535 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004536 branch. */
4537#if defined(TARGET_MIPS)
4538 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4539 env->active_tc.PC -= 4;
4540 env->icount_decr.u16.low++;
4541 env->hflags &= ~MIPS_HFLAG_BMASK;
4542 }
4543#elif defined(TARGET_SH4)
4544 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4545 && n > 1) {
4546 env->pc -= 2;
4547 env->icount_decr.u16.low++;
4548 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4549 }
4550#endif
4551 /* This should never happen. */
4552 if (n > CF_COUNT_MASK)
4553 cpu_abort(env, "TB too big during recompile");
4554
4555 cflags = n | CF_LAST_IO;
4556 pc = tb->pc;
4557 cs_base = tb->cs_base;
4558 flags = tb->flags;
4559 tb_phys_invalidate(tb, -1);
4560 /* FIXME: In theory this could raise an exception. In practice
4561 we have already translated the block once so it's probably ok. */
4562 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004563 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004564 the first in the TB) then we end up generating a whole new TB and
4565 repeating the fault, which is horribly inefficient.
4566 Better would be to execute just this insn uncached, or generate a
4567 second new TB. */
4568 cpu_resume_from_signal(env, NULL);
4569}
4570
Paul Brookb3755a92010-03-12 16:54:58 +00004571#if !defined(CONFIG_USER_ONLY)
4572
Stefan Weil055403b2010-10-22 23:03:32 +02004573void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004574{
4575 int i, target_code_size, max_target_code_size;
4576 int direct_jmp_count, direct_jmp2_count, cross_page;
4577 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004578
bellarde3db7222005-01-26 22:00:47 +00004579 target_code_size = 0;
4580 max_target_code_size = 0;
4581 cross_page = 0;
4582 direct_jmp_count = 0;
4583 direct_jmp2_count = 0;
4584 for(i = 0; i < nb_tbs; i++) {
4585 tb = &tbs[i];
4586 target_code_size += tb->size;
4587 if (tb->size > max_target_code_size)
4588 max_target_code_size = tb->size;
4589 if (tb->page_addr[1] != -1)
4590 cross_page++;
4591 if (tb->tb_next_offset[0] != 0xffff) {
4592 direct_jmp_count++;
4593 if (tb->tb_next_offset[1] != 0xffff) {
4594 direct_jmp2_count++;
4595 }
4596 }
4597 }
4598 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004599 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004600 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004601 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4602 cpu_fprintf(f, "TB count %d/%d\n",
4603 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004604 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004605 nb_tbs ? target_code_size / nb_tbs : 0,
4606 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004607 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004608 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4609 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004610 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4611 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004612 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4613 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004614 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004615 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4616 direct_jmp2_count,
4617 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004618 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004619 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4620 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4621 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004622 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004623}
4624
Avi Kivityd39e8222012-01-01 23:35:10 +02004625/* NOTE: this function can trigger an exception */
4626/* NOTE2: the returned address is not exactly the physical address: it
4627 is the offset relative to phys_ram_base */
4628tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4629{
4630 int mmu_idx, page_index, pd;
4631 void *p;
4632
4633 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4634 mmu_idx = cpu_mmu_index(env1);
4635 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4636 (addr & TARGET_PAGE_MASK))) {
4637 ldub_code(addr);
4638 }
4639 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004640 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
Avi Kivity06ef3522012-02-13 16:11:22 +02004641 && !io_mem_region[pd]->rom_device) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004642#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4643 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4644#else
4645 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4646#endif
4647 }
4648 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4649 return qemu_ram_addr_from_host_nofail(p);
4650}
4651
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004652/*
4653 * A helper function for the _utterly broken_ virtio device model to find out if
4654 * it's running on a big endian machine. Don't do this at home kids!
4655 */
4656bool virtio_is_big_endian(void);
4657bool virtio_is_big_endian(void)
4658{
4659#if defined(TARGET_WORDS_BIGENDIAN)
4660 return true;
4661#else
4662 return false;
4663#endif
4664}
4665
bellard61382a52003-10-27 21:22:23 +00004666#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004667#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004668#define GETPC() NULL
4669#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004670#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004671
4672#define SHIFT 0
4673#include "softmmu_template.h"
4674
4675#define SHIFT 1
4676#include "softmmu_template.h"
4677
4678#define SHIFT 2
4679#include "softmmu_template.h"
4680
4681#define SHIFT 3
4682#include "softmmu_template.h"
4683
4684#undef env
4685
4686#endif