blob: 2171eba487bc3290f7f0564881256ab59ae564ff [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Avi Kivity3eef53d2012-02-10 14:57:31 +0200163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000189typedef struct PhysPageDesc {
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset;
192 ram_addr_t region_offset;
193} PhysPageDesc;
194
Avi Kivity4346ae32012-02-10 17:00:01 +0200195typedef struct PhysPageEntry PhysPageEntry;
196
Avi Kivity5312bd82012-02-12 18:32:55 +0200197static MemoryRegionSection *phys_sections;
198static unsigned phys_sections_nb, phys_sections_nb_alloc;
199static uint16_t phys_section_unassigned;
200
Avi Kivity4346ae32012-02-10 17:00:01 +0200201struct PhysPageEntry {
202 union {
Avi Kivity5312bd82012-02-12 18:32:55 +0200203 uint16_t leaf; /* index into phys_sections */
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200204 uint16_t node; /* index into phys_map_nodes */
Avi Kivity4346ae32012-02-10 17:00:01 +0200205 } u;
206};
207
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208/* Simple allocator for PhysPageEntry nodes */
209static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
210static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
211
212#define PHYS_MAP_NODE_NIL ((uint16_t)~0)
213
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800214/* This is a multi-level map on the physical address space.
215 The bottom level has pointers to PhysPageDesc. */
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200216static PhysPageEntry phys_map = { .u.node = PHYS_MAP_NODE_NIL };
Paul Brook6d9a1302010-02-28 23:55:53 +0000217
pbrooke2eef172008-06-08 01:09:01 +0000218static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300219static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000220
bellard33417e72003-08-10 21:47:01 +0000221/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200222MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000223static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200224static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000225#endif
bellard33417e72003-08-10 21:47:01 +0000226
bellard34865132003-10-05 14:28:56 +0000227/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200228#ifdef WIN32
229static const char *logfilename = "qemu.log";
230#else
blueswir1d9b630f2008-10-05 09:57:08 +0000231static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200232#endif
bellard34865132003-10-05 14:28:56 +0000233FILE *logfile;
234int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000235static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000236
bellarde3db7222005-01-26 22:00:47 +0000237/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000238#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000239static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000240#endif
bellarde3db7222005-01-26 22:00:47 +0000241static int tb_flush_count;
242static int tb_phys_invalidate_count;
243
bellard7cb69ca2008-05-10 10:55:51 +0000244#ifdef _WIN32
245static void map_exec(void *addr, long size)
246{
247 DWORD old_protect;
248 VirtualProtect(addr, size,
249 PAGE_EXECUTE_READWRITE, &old_protect);
250
251}
252#else
253static void map_exec(void *addr, long size)
254{
bellard43694152008-05-29 09:35:57 +0000255 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000256
bellard43694152008-05-29 09:35:57 +0000257 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000258 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000259 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000260
261 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000262 end += page_size - 1;
263 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000264
265 mprotect((void *)start, end - start,
266 PROT_READ | PROT_WRITE | PROT_EXEC);
267}
268#endif
269
bellardb346ff42003-06-15 20:05:50 +0000270static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000271{
bellard83fb7ad2004-07-05 21:25:26 +0000272 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000273 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000274#ifdef _WIN32
275 {
276 SYSTEM_INFO system_info;
277
278 GetSystemInfo(&system_info);
279 qemu_real_host_page_size = system_info.dwPageSize;
280 }
281#else
282 qemu_real_host_page_size = getpagesize();
283#endif
bellard83fb7ad2004-07-05 21:25:26 +0000284 if (qemu_host_page_size == 0)
285 qemu_host_page_size = qemu_real_host_page_size;
286 if (qemu_host_page_size < TARGET_PAGE_SIZE)
287 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000288 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000289
Paul Brook2e9a5712010-05-05 16:32:59 +0100290#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000291 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100292#ifdef HAVE_KINFO_GETVMMAP
293 struct kinfo_vmentry *freep;
294 int i, cnt;
295
296 freep = kinfo_getvmmap(getpid(), &cnt);
297 if (freep) {
298 mmap_lock();
299 for (i = 0; i < cnt; i++) {
300 unsigned long startaddr, endaddr;
301
302 startaddr = freep[i].kve_start;
303 endaddr = freep[i].kve_end;
304 if (h2g_valid(startaddr)) {
305 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
306
307 if (h2g_valid(endaddr)) {
308 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200309 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100310 } else {
311#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
312 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200313 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100314#endif
315 }
316 }
317 }
318 free(freep);
319 mmap_unlock();
320 }
321#else
balrog50a95692007-12-12 01:16:23 +0000322 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000323
pbrook07765902008-05-31 16:33:53 +0000324 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800325
Aurelien Jarnofd436902010-04-10 17:20:36 +0200326 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000327 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800328 mmap_lock();
329
balrog50a95692007-12-12 01:16:23 +0000330 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800331 unsigned long startaddr, endaddr;
332 int n;
333
334 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
335
336 if (n == 2 && h2g_valid(startaddr)) {
337 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
338
339 if (h2g_valid(endaddr)) {
340 endaddr = h2g(endaddr);
341 } else {
342 endaddr = ~0ul;
343 }
344 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000345 }
346 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800347
balrog50a95692007-12-12 01:16:23 +0000348 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800349 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000350 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100351#endif
balrog50a95692007-12-12 01:16:23 +0000352 }
353#endif
bellard54936002003-05-13 00:25:15 +0000354}
355
Paul Brook41c1b1c2010-03-12 16:54:58 +0000356static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000357{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000358 PageDesc *pd;
359 void **lp;
360 int i;
361
pbrook17e23772008-06-09 13:47:45 +0000362#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500363 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800364# define ALLOC(P, SIZE) \
365 do { \
366 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
367 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800368 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000369#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800370# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500371 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000372#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800373
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800374 /* Level 1. Always allocated. */
375 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
376
377 /* Level 2..N-1. */
378 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
379 void **p = *lp;
380
381 if (p == NULL) {
382 if (!alloc) {
383 return NULL;
384 }
385 ALLOC(p, sizeof(void *) * L2_SIZE);
386 *lp = p;
387 }
388
389 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000390 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800391
392 pd = *lp;
393 if (pd == NULL) {
394 if (!alloc) {
395 return NULL;
396 }
397 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
398 *lp = pd;
399 }
400
401#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800402
403 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000404}
405
Paul Brook41c1b1c2010-03-12 16:54:58 +0000406static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000407{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800408 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000409}
410
Paul Brook6d9a1302010-02-28 23:55:53 +0000411#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200412
413static PhysPageEntry *phys_map_node_alloc(uint16_t *ptr)
414{
415 unsigned i;
416 uint16_t ret;
417
418 /* Assign early to avoid the pointer being invalidated by g_renew() */
419 *ptr = ret = phys_map_nodes_nb++;
420 assert(ret != PHYS_MAP_NODE_NIL);
421 if (ret == phys_map_nodes_nb_alloc) {
422 typedef PhysPageEntry Node[L2_SIZE];
423 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
424 phys_map_nodes = g_renew(Node, phys_map_nodes,
425 phys_map_nodes_nb_alloc);
426 }
427 for (i = 0; i < L2_SIZE; ++i) {
428 phys_map_nodes[ret][i].u.node = PHYS_MAP_NODE_NIL;
429 }
430 return phys_map_nodes[ret];
431}
432
433static void phys_map_nodes_reset(void)
434{
435 phys_map_nodes_nb = 0;
436}
437
Avi Kivity5312bd82012-02-12 18:32:55 +0200438static uint16_t *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000439{
Avi Kivity4346ae32012-02-10 17:00:01 +0200440 PhysPageEntry *lp, *p;
441 int i, j;
bellard92e873b2004-05-21 14:52:29 +0000442
Avi Kivity3eef53d2012-02-10 14:57:31 +0200443 lp = &phys_map;
bellard108c49b2005-07-24 12:55:09 +0000444
Avi Kivity4346ae32012-02-10 17:00:01 +0200445 /* Level 1..N. */
446 for (i = P_L2_LEVELS - 1; i >= 0; i--) {
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200447 if (lp->u.node == PHYS_MAP_NODE_NIL) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800448 if (!alloc) {
449 return NULL;
450 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200451 p = phys_map_node_alloc(&lp->u.node);
Avi Kivity4346ae32012-02-10 17:00:01 +0200452 if (i == 0) {
Avi Kivity4346ae32012-02-10 17:00:01 +0200453 for (j = 0; j < L2_SIZE; j++) {
Avi Kivity5312bd82012-02-12 18:32:55 +0200454 p[j].u.leaf = phys_section_unassigned;
Avi Kivity4346ae32012-02-10 17:00:01 +0200455 }
456 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200457 } else {
458 p = phys_map_nodes[lp->u.node];
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800459 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200460 lp = &p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
bellard108c49b2005-07-24 12:55:09 +0000461 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800462
Avi Kivity4346ae32012-02-10 17:00:01 +0200463 return &lp->u.leaf;
bellard92e873b2004-05-21 14:52:29 +0000464}
465
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200466static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000467{
Avi Kivity5312bd82012-02-12 18:32:55 +0200468 uint16_t *p = phys_page_find_alloc(index, 0);
469 uint16_t s_index = phys_section_unassigned;
470 MemoryRegionSection *section;
471 PhysPageDesc pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200472
473 if (p) {
Avi Kivity5312bd82012-02-12 18:32:55 +0200474 s_index = *p;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200475 }
Avi Kivity5312bd82012-02-12 18:32:55 +0200476 section = &phys_sections[s_index];
477 index <<= TARGET_PAGE_BITS;
478 assert(section->offset_within_address_space <= index
479 && index <= section->offset_within_address_space + section->size-1);
480 pd.phys_offset = section->mr->ram_addr;
481 pd.region_offset = (index - section->offset_within_address_space)
482 + section->offset_within_region;
483 if (memory_region_is_ram(section->mr)) {
484 pd.phys_offset += pd.region_offset;
485 pd.region_offset = 0;
486 } else if (section->mr->rom_device) {
487 pd.phys_offset += pd.region_offset;
488 }
489 if (section->readonly) {
490 pd.phys_offset |= io_mem_rom.ram_addr;
491 }
492 return pd;
bellard92e873b2004-05-21 14:52:29 +0000493}
494
Anthony Liguoric227f092009-10-01 16:12:16 -0500495static void tlb_protect_code(ram_addr_t ram_addr);
496static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000497 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000498#define mmap_lock() do { } while(0)
499#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000500#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000501
bellard43694152008-05-29 09:35:57 +0000502#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
503
504#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100505/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000506 user mode. It will change when a dedicated libc will be used */
507#define USE_STATIC_CODE_GEN_BUFFER
508#endif
509
510#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200511static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
512 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000513#endif
514
blueswir18fcd3692008-08-17 20:26:25 +0000515static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000516{
bellard43694152008-05-29 09:35:57 +0000517#ifdef USE_STATIC_CODE_GEN_BUFFER
518 code_gen_buffer = static_code_gen_buffer;
519 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
520 map_exec(code_gen_buffer, code_gen_buffer_size);
521#else
bellard26a5f132008-05-28 12:30:31 +0000522 code_gen_buffer_size = tb_size;
523 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000524#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000525 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
526#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100527 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000528 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000529#endif
bellard26a5f132008-05-28 12:30:31 +0000530 }
531 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
532 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
533 /* The code gen buffer location may have constraints depending on
534 the host cpu and OS */
535#if defined(__linux__)
536 {
537 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000538 void *start = NULL;
539
bellard26a5f132008-05-28 12:30:31 +0000540 flags = MAP_PRIVATE | MAP_ANONYMOUS;
541#if defined(__x86_64__)
542 flags |= MAP_32BIT;
543 /* Cannot map more than that */
544 if (code_gen_buffer_size > (800 * 1024 * 1024))
545 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000546#elif defined(__sparc_v9__)
547 // Map the buffer below 2G, so we can use direct calls and branches
548 flags |= MAP_FIXED;
549 start = (void *) 0x60000000UL;
550 if (code_gen_buffer_size > (512 * 1024 * 1024))
551 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000552#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100553 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000554 if (code_gen_buffer_size > 16 * 1024 * 1024)
555 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700556#elif defined(__s390x__)
557 /* Map the buffer so that we can use direct calls and branches. */
558 /* We have a +- 4GB range on the branches; leave some slop. */
559 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
560 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
561 }
562 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000563#endif
blueswir1141ac462008-07-26 15:05:57 +0000564 code_gen_buffer = mmap(start, code_gen_buffer_size,
565 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000566 flags, -1, 0);
567 if (code_gen_buffer == MAP_FAILED) {
568 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
569 exit(1);
570 }
571 }
Bradcbb608a2010-12-20 21:25:40 -0500572#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000573 || defined(__DragonFly__) || defined(__OpenBSD__) \
574 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000575 {
576 int flags;
577 void *addr = NULL;
578 flags = MAP_PRIVATE | MAP_ANONYMOUS;
579#if defined(__x86_64__)
580 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
581 * 0x40000000 is free */
582 flags |= MAP_FIXED;
583 addr = (void *)0x40000000;
584 /* Cannot map more than that */
585 if (code_gen_buffer_size > (800 * 1024 * 1024))
586 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000587#elif defined(__sparc_v9__)
588 // Map the buffer below 2G, so we can use direct calls and branches
589 flags |= MAP_FIXED;
590 addr = (void *) 0x60000000UL;
591 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
592 code_gen_buffer_size = (512 * 1024 * 1024);
593 }
aliguori06e67a82008-09-27 15:32:41 +0000594#endif
595 code_gen_buffer = mmap(addr, code_gen_buffer_size,
596 PROT_WRITE | PROT_READ | PROT_EXEC,
597 flags, -1, 0);
598 if (code_gen_buffer == MAP_FAILED) {
599 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
600 exit(1);
601 }
602 }
bellard26a5f132008-05-28 12:30:31 +0000603#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500604 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000605 map_exec(code_gen_buffer, code_gen_buffer_size);
606#endif
bellard43694152008-05-29 09:35:57 +0000607#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000608 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100609 code_gen_buffer_max_size = code_gen_buffer_size -
610 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000611 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500612 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000613}
614
615/* Must be called before using the QEMU cpus. 'tb_size' is the size
616 (in bytes) allocated to the translation buffer. Zero means default
617 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200618void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000619{
bellard26a5f132008-05-28 12:30:31 +0000620 cpu_gen_init();
621 code_gen_alloc(tb_size);
622 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000623 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700624#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
625 /* There's no guest base to take into account, so go ahead and
626 initialize the prologue now. */
627 tcg_prologue_init(&tcg_ctx);
628#endif
bellard26a5f132008-05-28 12:30:31 +0000629}
630
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200631bool tcg_enabled(void)
632{
633 return code_gen_buffer != NULL;
634}
635
636void cpu_exec_init_all(void)
637{
638#if !defined(CONFIG_USER_ONLY)
639 memory_map_init();
640 io_mem_init();
641#endif
642}
643
pbrook9656f322008-07-01 20:01:19 +0000644#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
645
Juan Quintelae59fb372009-09-29 22:48:21 +0200646static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200647{
648 CPUState *env = opaque;
649
aurel323098dba2009-03-07 21:28:24 +0000650 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
651 version_id is increased. */
652 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000653 tlb_flush(env, 1);
654
655 return 0;
656}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200657
658static const VMStateDescription vmstate_cpu_common = {
659 .name = "cpu_common",
660 .version_id = 1,
661 .minimum_version_id = 1,
662 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200663 .post_load = cpu_common_post_load,
664 .fields = (VMStateField []) {
665 VMSTATE_UINT32(halted, CPUState),
666 VMSTATE_UINT32(interrupt_request, CPUState),
667 VMSTATE_END_OF_LIST()
668 }
669};
pbrook9656f322008-07-01 20:01:19 +0000670#endif
671
Glauber Costa950f1472009-06-09 12:15:18 -0400672CPUState *qemu_get_cpu(int cpu)
673{
674 CPUState *env = first_cpu;
675
676 while (env) {
677 if (env->cpu_index == cpu)
678 break;
679 env = env->next_cpu;
680 }
681
682 return env;
683}
684
bellard6a00d602005-11-21 23:25:50 +0000685void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000686{
bellard6a00d602005-11-21 23:25:50 +0000687 CPUState **penv;
688 int cpu_index;
689
pbrookc2764712009-03-07 15:24:59 +0000690#if defined(CONFIG_USER_ONLY)
691 cpu_list_lock();
692#endif
bellard6a00d602005-11-21 23:25:50 +0000693 env->next_cpu = NULL;
694 penv = &first_cpu;
695 cpu_index = 0;
696 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700697 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000698 cpu_index++;
699 }
700 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000701 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000702 QTAILQ_INIT(&env->breakpoints);
703 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100704#ifndef CONFIG_USER_ONLY
705 env->thread_id = qemu_get_thread_id();
706#endif
bellard6a00d602005-11-21 23:25:50 +0000707 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000708#if defined(CONFIG_USER_ONLY)
709 cpu_list_unlock();
710#endif
pbrookb3c77242008-06-30 16:31:04 +0000711#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600712 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
713 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000714 cpu_save, cpu_load, env);
715#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000716}
717
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100718/* Allocate a new translation block. Flush the translation buffer if
719 too many translation blocks or too much generated code. */
720static TranslationBlock *tb_alloc(target_ulong pc)
721{
722 TranslationBlock *tb;
723
724 if (nb_tbs >= code_gen_max_blocks ||
725 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
726 return NULL;
727 tb = &tbs[nb_tbs++];
728 tb->pc = pc;
729 tb->cflags = 0;
730 return tb;
731}
732
733void tb_free(TranslationBlock *tb)
734{
735 /* In practice this is mostly used for single use temporary TB
736 Ignore the hard cases and just back up if this TB happens to
737 be the last one generated. */
738 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
739 code_gen_ptr = tb->tc_ptr;
740 nb_tbs--;
741 }
742}
743
bellard9fa3e852004-01-04 18:06:42 +0000744static inline void invalidate_page_bitmap(PageDesc *p)
745{
746 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500747 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000748 p->code_bitmap = NULL;
749 }
750 p->code_write_count = 0;
751}
752
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800753/* Set to NULL all the 'first_tb' fields in all PageDescs. */
754
755static void page_flush_tb_1 (int level, void **lp)
756{
757 int i;
758
759 if (*lp == NULL) {
760 return;
761 }
762 if (level == 0) {
763 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000764 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800765 pd[i].first_tb = NULL;
766 invalidate_page_bitmap(pd + i);
767 }
768 } else {
769 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000770 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800771 page_flush_tb_1 (level - 1, pp + i);
772 }
773 }
774}
775
bellardfd6ce8f2003-05-14 19:00:11 +0000776static void page_flush_tb(void)
777{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800778 int i;
779 for (i = 0; i < V_L1_SIZE; i++) {
780 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000781 }
782}
783
784/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000785/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000786void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000787{
bellard6a00d602005-11-21 23:25:50 +0000788 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000789#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000790 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
791 (unsigned long)(code_gen_ptr - code_gen_buffer),
792 nb_tbs, nb_tbs > 0 ?
793 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000794#endif
bellard26a5f132008-05-28 12:30:31 +0000795 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000796 cpu_abort(env1, "Internal error: code buffer overflow\n");
797
bellardfd6ce8f2003-05-14 19:00:11 +0000798 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000799
bellard6a00d602005-11-21 23:25:50 +0000800 for(env = first_cpu; env != NULL; env = env->next_cpu) {
801 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
802 }
bellard9fa3e852004-01-04 18:06:42 +0000803
bellard8a8a6082004-10-03 13:36:49 +0000804 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000805 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000806
bellardfd6ce8f2003-05-14 19:00:11 +0000807 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000808 /* XXX: flush processor icache at this point if cache flush is
809 expensive */
bellarde3db7222005-01-26 22:00:47 +0000810 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000811}
812
813#ifdef DEBUG_TB_CHECK
814
j_mayerbc98a7e2007-04-04 07:55:12 +0000815static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000816{
817 TranslationBlock *tb;
818 int i;
819 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000820 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
821 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000822 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
823 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000824 printf("ERROR invalidate: address=" TARGET_FMT_lx
825 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000826 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000827 }
828 }
829 }
830}
831
832/* verify that all the pages have correct rights for code */
833static void tb_page_check(void)
834{
835 TranslationBlock *tb;
836 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000837
pbrook99773bd2006-04-16 15:14:59 +0000838 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
839 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000840 flags1 = page_get_flags(tb->pc);
841 flags2 = page_get_flags(tb->pc + tb->size - 1);
842 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
843 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000844 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000845 }
846 }
847 }
848}
849
850#endif
851
852/* invalidate one TB */
853static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
854 int next_offset)
855{
856 TranslationBlock *tb1;
857 for(;;) {
858 tb1 = *ptb;
859 if (tb1 == tb) {
860 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
861 break;
862 }
863 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
864 }
865}
866
bellard9fa3e852004-01-04 18:06:42 +0000867static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
868{
869 TranslationBlock *tb1;
870 unsigned int n1;
871
872 for(;;) {
873 tb1 = *ptb;
874 n1 = (long)tb1 & 3;
875 tb1 = (TranslationBlock *)((long)tb1 & ~3);
876 if (tb1 == tb) {
877 *ptb = tb1->page_next[n1];
878 break;
879 }
880 ptb = &tb1->page_next[n1];
881 }
882}
883
bellardd4e81642003-05-25 16:46:15 +0000884static inline void tb_jmp_remove(TranslationBlock *tb, int n)
885{
886 TranslationBlock *tb1, **ptb;
887 unsigned int n1;
888
889 ptb = &tb->jmp_next[n];
890 tb1 = *ptb;
891 if (tb1) {
892 /* find tb(n) in circular list */
893 for(;;) {
894 tb1 = *ptb;
895 n1 = (long)tb1 & 3;
896 tb1 = (TranslationBlock *)((long)tb1 & ~3);
897 if (n1 == n && tb1 == tb)
898 break;
899 if (n1 == 2) {
900 ptb = &tb1->jmp_first;
901 } else {
902 ptb = &tb1->jmp_next[n1];
903 }
904 }
905 /* now we can suppress tb(n) from the list */
906 *ptb = tb->jmp_next[n];
907
908 tb->jmp_next[n] = NULL;
909 }
910}
911
912/* reset the jump entry 'n' of a TB so that it is not chained to
913 another TB */
914static inline void tb_reset_jump(TranslationBlock *tb, int n)
915{
916 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
917}
918
Paul Brook41c1b1c2010-03-12 16:54:58 +0000919void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000920{
bellard6a00d602005-11-21 23:25:50 +0000921 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000922 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000923 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000924 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000925 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000926
bellard9fa3e852004-01-04 18:06:42 +0000927 /* remove the TB from the hash list */
928 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
929 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000930 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000931 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000932
bellard9fa3e852004-01-04 18:06:42 +0000933 /* remove the TB from the page list */
934 if (tb->page_addr[0] != page_addr) {
935 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
936 tb_page_remove(&p->first_tb, tb);
937 invalidate_page_bitmap(p);
938 }
939 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
940 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
941 tb_page_remove(&p->first_tb, tb);
942 invalidate_page_bitmap(p);
943 }
944
bellard8a40a182005-11-20 10:35:40 +0000945 tb_invalidated_flag = 1;
946
947 /* remove the TB from the hash list */
948 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000949 for(env = first_cpu; env != NULL; env = env->next_cpu) {
950 if (env->tb_jmp_cache[h] == tb)
951 env->tb_jmp_cache[h] = NULL;
952 }
bellard8a40a182005-11-20 10:35:40 +0000953
954 /* suppress this TB from the two jump lists */
955 tb_jmp_remove(tb, 0);
956 tb_jmp_remove(tb, 1);
957
958 /* suppress any remaining jumps to this TB */
959 tb1 = tb->jmp_first;
960 for(;;) {
961 n1 = (long)tb1 & 3;
962 if (n1 == 2)
963 break;
964 tb1 = (TranslationBlock *)((long)tb1 & ~3);
965 tb2 = tb1->jmp_next[n1];
966 tb_reset_jump(tb1, n1);
967 tb1->jmp_next[n1] = NULL;
968 tb1 = tb2;
969 }
970 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
971
bellarde3db7222005-01-26 22:00:47 +0000972 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000973}
974
975static inline void set_bits(uint8_t *tab, int start, int len)
976{
977 int end, mask, end1;
978
979 end = start + len;
980 tab += start >> 3;
981 mask = 0xff << (start & 7);
982 if ((start & ~7) == (end & ~7)) {
983 if (start < end) {
984 mask &= ~(0xff << (end & 7));
985 *tab |= mask;
986 }
987 } else {
988 *tab++ |= mask;
989 start = (start + 8) & ~7;
990 end1 = end & ~7;
991 while (start < end1) {
992 *tab++ = 0xff;
993 start += 8;
994 }
995 if (start < end) {
996 mask = ~(0xff << (end & 7));
997 *tab |= mask;
998 }
999 }
1000}
1001
1002static void build_page_bitmap(PageDesc *p)
1003{
1004 int n, tb_start, tb_end;
1005 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001006
Anthony Liguori7267c092011-08-20 22:09:37 -05001007 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001008
1009 tb = p->first_tb;
1010 while (tb != NULL) {
1011 n = (long)tb & 3;
1012 tb = (TranslationBlock *)((long)tb & ~3);
1013 /* NOTE: this is subtle as a TB may span two physical pages */
1014 if (n == 0) {
1015 /* NOTE: tb_end may be after the end of the page, but
1016 it is not a problem */
1017 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1018 tb_end = tb_start + tb->size;
1019 if (tb_end > TARGET_PAGE_SIZE)
1020 tb_end = TARGET_PAGE_SIZE;
1021 } else {
1022 tb_start = 0;
1023 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1024 }
1025 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1026 tb = tb->page_next[n];
1027 }
1028}
1029
pbrook2e70f6e2008-06-29 01:03:05 +00001030TranslationBlock *tb_gen_code(CPUState *env,
1031 target_ulong pc, target_ulong cs_base,
1032 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001033{
1034 TranslationBlock *tb;
1035 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001036 tb_page_addr_t phys_pc, phys_page2;
1037 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001038 int code_gen_size;
1039
Paul Brook41c1b1c2010-03-12 16:54:58 +00001040 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001041 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001042 if (!tb) {
1043 /* flush must be done */
1044 tb_flush(env);
1045 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001046 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001047 /* Don't forget to invalidate previous TB info. */
1048 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001049 }
1050 tc_ptr = code_gen_ptr;
1051 tb->tc_ptr = tc_ptr;
1052 tb->cs_base = cs_base;
1053 tb->flags = flags;
1054 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001055 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001056 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001057
bellardd720b932004-04-25 17:57:43 +00001058 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001059 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001060 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001061 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001062 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001063 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001064 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001065 return tb;
bellardd720b932004-04-25 17:57:43 +00001066}
ths3b46e622007-09-17 08:09:54 +00001067
bellard9fa3e852004-01-04 18:06:42 +00001068/* invalidate all TBs which intersect with the target physical page
1069 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001070 the same physical page. 'is_cpu_write_access' should be true if called
1071 from a real cpu write access: the virtual CPU will exit the current
1072 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001073void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001074 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001075{
aliguori6b917542008-11-18 19:46:41 +00001076 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001077 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001078 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001079 PageDesc *p;
1080 int n;
1081#ifdef TARGET_HAS_PRECISE_SMC
1082 int current_tb_not_found = is_cpu_write_access;
1083 TranslationBlock *current_tb = NULL;
1084 int current_tb_modified = 0;
1085 target_ulong current_pc = 0;
1086 target_ulong current_cs_base = 0;
1087 int current_flags = 0;
1088#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001089
1090 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001091 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001092 return;
ths5fafdf22007-09-16 21:08:06 +00001093 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001094 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1095 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001096 /* build code bitmap */
1097 build_page_bitmap(p);
1098 }
1099
1100 /* we remove all the TBs in the range [start, end[ */
1101 /* XXX: see if in some cases it could be faster to invalidate all the code */
1102 tb = p->first_tb;
1103 while (tb != NULL) {
1104 n = (long)tb & 3;
1105 tb = (TranslationBlock *)((long)tb & ~3);
1106 tb_next = tb->page_next[n];
1107 /* NOTE: this is subtle as a TB may span two physical pages */
1108 if (n == 0) {
1109 /* NOTE: tb_end may be after the end of the page, but
1110 it is not a problem */
1111 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1112 tb_end = tb_start + tb->size;
1113 } else {
1114 tb_start = tb->page_addr[1];
1115 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1116 }
1117 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001118#ifdef TARGET_HAS_PRECISE_SMC
1119 if (current_tb_not_found) {
1120 current_tb_not_found = 0;
1121 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001122 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001123 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001124 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001125 }
1126 }
1127 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001128 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001129 /* If we are modifying the current TB, we must stop
1130 its execution. We could be more precise by checking
1131 that the modification is after the current PC, but it
1132 would require a specialized function to partially
1133 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001134
bellardd720b932004-04-25 17:57:43 +00001135 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001136 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001137 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1138 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001139 }
1140#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001141 /* we need to do that to handle the case where a signal
1142 occurs while doing tb_phys_invalidate() */
1143 saved_tb = NULL;
1144 if (env) {
1145 saved_tb = env->current_tb;
1146 env->current_tb = NULL;
1147 }
bellard9fa3e852004-01-04 18:06:42 +00001148 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001149 if (env) {
1150 env->current_tb = saved_tb;
1151 if (env->interrupt_request && env->current_tb)
1152 cpu_interrupt(env, env->interrupt_request);
1153 }
bellard9fa3e852004-01-04 18:06:42 +00001154 }
1155 tb = tb_next;
1156 }
1157#if !defined(CONFIG_USER_ONLY)
1158 /* if no code remaining, no need to continue to use slow writes */
1159 if (!p->first_tb) {
1160 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001161 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001162 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001163 }
1164 }
1165#endif
1166#ifdef TARGET_HAS_PRECISE_SMC
1167 if (current_tb_modified) {
1168 /* we generate a block containing just the instruction
1169 modifying the memory. It will ensure that it cannot modify
1170 itself */
bellardea1c1802004-06-14 18:56:36 +00001171 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001172 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001173 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001174 }
1175#endif
1176}
1177
1178/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001179static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001180{
1181 PageDesc *p;
1182 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001183#if 0
bellarda4193c82004-06-03 14:01:43 +00001184 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001185 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1186 cpu_single_env->mem_io_vaddr, len,
1187 cpu_single_env->eip,
1188 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001189 }
1190#endif
bellard9fa3e852004-01-04 18:06:42 +00001191 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001192 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001193 return;
1194 if (p->code_bitmap) {
1195 offset = start & ~TARGET_PAGE_MASK;
1196 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1197 if (b & ((1 << len) - 1))
1198 goto do_invalidate;
1199 } else {
1200 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001201 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001202 }
1203}
1204
bellard9fa3e852004-01-04 18:06:42 +00001205#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001206static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001207 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001208{
aliguori6b917542008-11-18 19:46:41 +00001209 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001210 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001211 int n;
bellardd720b932004-04-25 17:57:43 +00001212#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001213 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001214 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001215 int current_tb_modified = 0;
1216 target_ulong current_pc = 0;
1217 target_ulong current_cs_base = 0;
1218 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001219#endif
bellard9fa3e852004-01-04 18:06:42 +00001220
1221 addr &= TARGET_PAGE_MASK;
1222 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001223 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001224 return;
1225 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001226#ifdef TARGET_HAS_PRECISE_SMC
1227 if (tb && pc != 0) {
1228 current_tb = tb_find_pc(pc);
1229 }
1230#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001231 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001232 n = (long)tb & 3;
1233 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001234#ifdef TARGET_HAS_PRECISE_SMC
1235 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001236 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001237 /* If we are modifying the current TB, we must stop
1238 its execution. We could be more precise by checking
1239 that the modification is after the current PC, but it
1240 would require a specialized function to partially
1241 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001242
bellardd720b932004-04-25 17:57:43 +00001243 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001244 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001245 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1246 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001247 }
1248#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001249 tb_phys_invalidate(tb, addr);
1250 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001251 }
1252 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001253#ifdef TARGET_HAS_PRECISE_SMC
1254 if (current_tb_modified) {
1255 /* we generate a block containing just the instruction
1256 modifying the memory. It will ensure that it cannot modify
1257 itself */
bellardea1c1802004-06-14 18:56:36 +00001258 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001259 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001260 cpu_resume_from_signal(env, puc);
1261 }
1262#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001263}
bellard9fa3e852004-01-04 18:06:42 +00001264#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001265
1266/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001267static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001268 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001269{
1270 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001271#ifndef CONFIG_USER_ONLY
1272 bool page_already_protected;
1273#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001274
bellard9fa3e852004-01-04 18:06:42 +00001275 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001276 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001277 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001278#ifndef CONFIG_USER_ONLY
1279 page_already_protected = p->first_tb != NULL;
1280#endif
bellard9fa3e852004-01-04 18:06:42 +00001281 p->first_tb = (TranslationBlock *)((long)tb | n);
1282 invalidate_page_bitmap(p);
1283
bellard107db442004-06-22 18:48:46 +00001284#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001285
bellard9fa3e852004-01-04 18:06:42 +00001286#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001287 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001288 target_ulong addr;
1289 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001290 int prot;
1291
bellardfd6ce8f2003-05-14 19:00:11 +00001292 /* force the host page as non writable (writes will have a
1293 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001294 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001295 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001296 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1297 addr += TARGET_PAGE_SIZE) {
1298
1299 p2 = page_find (addr >> TARGET_PAGE_BITS);
1300 if (!p2)
1301 continue;
1302 prot |= p2->flags;
1303 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001304 }
ths5fafdf22007-09-16 21:08:06 +00001305 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001306 (prot & PAGE_BITS) & ~PAGE_WRITE);
1307#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001308 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001309 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001310#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001311 }
bellard9fa3e852004-01-04 18:06:42 +00001312#else
1313 /* if some code is already present, then the pages are already
1314 protected. So we handle the case where only the first TB is
1315 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001316 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001317 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001318 }
1319#endif
bellardd720b932004-04-25 17:57:43 +00001320
1321#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001322}
1323
bellard9fa3e852004-01-04 18:06:42 +00001324/* add a new TB and link it to the physical page tables. phys_page2 is
1325 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001326void tb_link_page(TranslationBlock *tb,
1327 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001328{
bellard9fa3e852004-01-04 18:06:42 +00001329 unsigned int h;
1330 TranslationBlock **ptb;
1331
pbrookc8a706f2008-06-02 16:16:42 +00001332 /* Grab the mmap lock to stop another thread invalidating this TB
1333 before we are done. */
1334 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001335 /* add in the physical hash table */
1336 h = tb_phys_hash_func(phys_pc);
1337 ptb = &tb_phys_hash[h];
1338 tb->phys_hash_next = *ptb;
1339 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001340
1341 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001342 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1343 if (phys_page2 != -1)
1344 tb_alloc_page(tb, 1, phys_page2);
1345 else
1346 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001347
bellardd4e81642003-05-25 16:46:15 +00001348 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1349 tb->jmp_next[0] = NULL;
1350 tb->jmp_next[1] = NULL;
1351
1352 /* init original jump addresses */
1353 if (tb->tb_next_offset[0] != 0xffff)
1354 tb_reset_jump(tb, 0);
1355 if (tb->tb_next_offset[1] != 0xffff)
1356 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001357
1358#ifdef DEBUG_TB_CHECK
1359 tb_page_check();
1360#endif
pbrookc8a706f2008-06-02 16:16:42 +00001361 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001362}
1363
bellarda513fe12003-05-27 23:29:48 +00001364/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1365 tb[1].tc_ptr. Return NULL if not found */
1366TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1367{
1368 int m_min, m_max, m;
1369 unsigned long v;
1370 TranslationBlock *tb;
1371
1372 if (nb_tbs <= 0)
1373 return NULL;
1374 if (tc_ptr < (unsigned long)code_gen_buffer ||
1375 tc_ptr >= (unsigned long)code_gen_ptr)
1376 return NULL;
1377 /* binary search (cf Knuth) */
1378 m_min = 0;
1379 m_max = nb_tbs - 1;
1380 while (m_min <= m_max) {
1381 m = (m_min + m_max) >> 1;
1382 tb = &tbs[m];
1383 v = (unsigned long)tb->tc_ptr;
1384 if (v == tc_ptr)
1385 return tb;
1386 else if (tc_ptr < v) {
1387 m_max = m - 1;
1388 } else {
1389 m_min = m + 1;
1390 }
ths5fafdf22007-09-16 21:08:06 +00001391 }
bellarda513fe12003-05-27 23:29:48 +00001392 return &tbs[m_max];
1393}
bellard75012672003-06-21 13:11:07 +00001394
bellardea041c02003-06-25 16:16:50 +00001395static void tb_reset_jump_recursive(TranslationBlock *tb);
1396
1397static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1398{
1399 TranslationBlock *tb1, *tb_next, **ptb;
1400 unsigned int n1;
1401
1402 tb1 = tb->jmp_next[n];
1403 if (tb1 != NULL) {
1404 /* find head of list */
1405 for(;;) {
1406 n1 = (long)tb1 & 3;
1407 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1408 if (n1 == 2)
1409 break;
1410 tb1 = tb1->jmp_next[n1];
1411 }
1412 /* we are now sure now that tb jumps to tb1 */
1413 tb_next = tb1;
1414
1415 /* remove tb from the jmp_first list */
1416 ptb = &tb_next->jmp_first;
1417 for(;;) {
1418 tb1 = *ptb;
1419 n1 = (long)tb1 & 3;
1420 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1421 if (n1 == n && tb1 == tb)
1422 break;
1423 ptb = &tb1->jmp_next[n1];
1424 }
1425 *ptb = tb->jmp_next[n];
1426 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001427
bellardea041c02003-06-25 16:16:50 +00001428 /* suppress the jump to next tb in generated code */
1429 tb_reset_jump(tb, n);
1430
bellard01243112004-01-04 15:48:17 +00001431 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001432 tb_reset_jump_recursive(tb_next);
1433 }
1434}
1435
1436static void tb_reset_jump_recursive(TranslationBlock *tb)
1437{
1438 tb_reset_jump_recursive2(tb, 0);
1439 tb_reset_jump_recursive2(tb, 1);
1440}
1441
bellard1fddef42005-04-17 19:16:13 +00001442#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001443#if defined(CONFIG_USER_ONLY)
1444static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1445{
1446 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1447}
1448#else
bellardd720b932004-04-25 17:57:43 +00001449static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1450{
Anthony Liguoric227f092009-10-01 16:12:16 -05001451 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001452 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001453 ram_addr_t ram_addr;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001454 PhysPageDesc p;
bellardd720b932004-04-25 17:57:43 +00001455
pbrookc2f07f82006-04-08 17:14:56 +00001456 addr = cpu_get_phys_page_debug(env, pc);
1457 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001458 pd = p.phys_offset;
pbrookc2f07f82006-04-08 17:14:56 +00001459 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001460 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001461}
bellardc27004e2005-01-03 23:35:10 +00001462#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001463#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001464
Paul Brookc527ee82010-03-01 03:31:14 +00001465#if defined(CONFIG_USER_ONLY)
1466void cpu_watchpoint_remove_all(CPUState *env, int mask)
1467
1468{
1469}
1470
1471int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1472 int flags, CPUWatchpoint **watchpoint)
1473{
1474 return -ENOSYS;
1475}
1476#else
pbrook6658ffb2007-03-16 23:58:11 +00001477/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001478int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1479 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001480{
aliguorib4051332008-11-18 20:14:20 +00001481 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001482 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001483
aliguorib4051332008-11-18 20:14:20 +00001484 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1485 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1486 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1487 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1488 return -EINVAL;
1489 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001490 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001491
aliguoria1d1bb32008-11-18 20:07:32 +00001492 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001493 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001494 wp->flags = flags;
1495
aliguori2dc9f412008-11-18 20:56:59 +00001496 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001497 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001498 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001499 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001500 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001501
pbrook6658ffb2007-03-16 23:58:11 +00001502 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001503
1504 if (watchpoint)
1505 *watchpoint = wp;
1506 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001507}
1508
aliguoria1d1bb32008-11-18 20:07:32 +00001509/* Remove a specific watchpoint. */
1510int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1511 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001512{
aliguorib4051332008-11-18 20:14:20 +00001513 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001514 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001515
Blue Swirl72cf2d42009-09-12 07:36:22 +00001516 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001517 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001518 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001519 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001520 return 0;
1521 }
1522 }
aliguoria1d1bb32008-11-18 20:07:32 +00001523 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001524}
1525
aliguoria1d1bb32008-11-18 20:07:32 +00001526/* Remove a specific watchpoint by reference. */
1527void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1528{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001529 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001530
aliguoria1d1bb32008-11-18 20:07:32 +00001531 tlb_flush_page(env, watchpoint->vaddr);
1532
Anthony Liguori7267c092011-08-20 22:09:37 -05001533 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001534}
1535
aliguoria1d1bb32008-11-18 20:07:32 +00001536/* Remove all matching watchpoints. */
1537void cpu_watchpoint_remove_all(CPUState *env, int mask)
1538{
aliguoric0ce9982008-11-25 22:13:57 +00001539 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001540
Blue Swirl72cf2d42009-09-12 07:36:22 +00001541 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001542 if (wp->flags & mask)
1543 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001544 }
aliguoria1d1bb32008-11-18 20:07:32 +00001545}
Paul Brookc527ee82010-03-01 03:31:14 +00001546#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001547
1548/* Add a breakpoint. */
1549int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1550 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001551{
bellard1fddef42005-04-17 19:16:13 +00001552#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001553 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001554
Anthony Liguori7267c092011-08-20 22:09:37 -05001555 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001556
1557 bp->pc = pc;
1558 bp->flags = flags;
1559
aliguori2dc9f412008-11-18 20:56:59 +00001560 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001561 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001562 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001563 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001564 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001565
1566 breakpoint_invalidate(env, pc);
1567
1568 if (breakpoint)
1569 *breakpoint = bp;
1570 return 0;
1571#else
1572 return -ENOSYS;
1573#endif
1574}
1575
1576/* Remove a specific breakpoint. */
1577int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1578{
1579#if defined(TARGET_HAS_ICE)
1580 CPUBreakpoint *bp;
1581
Blue Swirl72cf2d42009-09-12 07:36:22 +00001582 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001583 if (bp->pc == pc && bp->flags == flags) {
1584 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001585 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001586 }
bellard4c3a88a2003-07-26 12:06:08 +00001587 }
aliguoria1d1bb32008-11-18 20:07:32 +00001588 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001589#else
aliguoria1d1bb32008-11-18 20:07:32 +00001590 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001591#endif
1592}
1593
aliguoria1d1bb32008-11-18 20:07:32 +00001594/* Remove a specific breakpoint by reference. */
1595void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001596{
bellard1fddef42005-04-17 19:16:13 +00001597#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001598 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001599
aliguoria1d1bb32008-11-18 20:07:32 +00001600 breakpoint_invalidate(env, breakpoint->pc);
1601
Anthony Liguori7267c092011-08-20 22:09:37 -05001602 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001603#endif
1604}
1605
1606/* Remove all matching breakpoints. */
1607void cpu_breakpoint_remove_all(CPUState *env, int mask)
1608{
1609#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001610 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001611
Blue Swirl72cf2d42009-09-12 07:36:22 +00001612 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001613 if (bp->flags & mask)
1614 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001615 }
bellard4c3a88a2003-07-26 12:06:08 +00001616#endif
1617}
1618
bellardc33a3462003-07-29 20:50:33 +00001619/* enable or disable single step mode. EXCP_DEBUG is returned by the
1620 CPU loop after each instruction */
1621void cpu_single_step(CPUState *env, int enabled)
1622{
bellard1fddef42005-04-17 19:16:13 +00001623#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001624 if (env->singlestep_enabled != enabled) {
1625 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001626 if (kvm_enabled())
1627 kvm_update_guest_debug(env, 0);
1628 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001629 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001630 /* XXX: only flush what is necessary */
1631 tb_flush(env);
1632 }
bellardc33a3462003-07-29 20:50:33 +00001633 }
1634#endif
1635}
1636
bellard34865132003-10-05 14:28:56 +00001637/* enable or disable low levels log */
1638void cpu_set_log(int log_flags)
1639{
1640 loglevel = log_flags;
1641 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001642 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001643 if (!logfile) {
1644 perror(logfilename);
1645 _exit(1);
1646 }
bellard9fa3e852004-01-04 18:06:42 +00001647#if !defined(CONFIG_SOFTMMU)
1648 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1649 {
blueswir1b55266b2008-09-20 08:07:15 +00001650 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001651 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1652 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001653#elif defined(_WIN32)
1654 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1655 setvbuf(logfile, NULL, _IONBF, 0);
1656#else
bellard34865132003-10-05 14:28:56 +00001657 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001658#endif
pbrooke735b912007-06-30 13:53:24 +00001659 log_append = 1;
1660 }
1661 if (!loglevel && logfile) {
1662 fclose(logfile);
1663 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001664 }
1665}
1666
1667void cpu_set_log_filename(const char *filename)
1668{
1669 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001670 if (logfile) {
1671 fclose(logfile);
1672 logfile = NULL;
1673 }
1674 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001675}
bellardc33a3462003-07-29 20:50:33 +00001676
aurel323098dba2009-03-07 21:28:24 +00001677static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001678{
pbrookd5975362008-06-07 20:50:51 +00001679 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1680 problem and hope the cpu will stop of its own accord. For userspace
1681 emulation this often isn't actually as bad as it sounds. Often
1682 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001683 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001684 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001685
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001686 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001687 tb = env->current_tb;
1688 /* if the cpu is currently executing code, we must unlink it and
1689 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001690 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001691 env->current_tb = NULL;
1692 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001693 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001694 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001695}
1696
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001697#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001698/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001699static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001700{
1701 int old_mask;
1702
1703 old_mask = env->interrupt_request;
1704 env->interrupt_request |= mask;
1705
aliguori8edac962009-04-24 18:03:45 +00001706 /*
1707 * If called from iothread context, wake the target cpu in
1708 * case its halted.
1709 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001710 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001711 qemu_cpu_kick(env);
1712 return;
1713 }
aliguori8edac962009-04-24 18:03:45 +00001714
pbrook2e70f6e2008-06-29 01:03:05 +00001715 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001716 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001717 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001718 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001719 cpu_abort(env, "Raised interrupt while not in I/O function");
1720 }
pbrook2e70f6e2008-06-29 01:03:05 +00001721 } else {
aurel323098dba2009-03-07 21:28:24 +00001722 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001723 }
1724}
1725
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001726CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1727
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001728#else /* CONFIG_USER_ONLY */
1729
1730void cpu_interrupt(CPUState *env, int mask)
1731{
1732 env->interrupt_request |= mask;
1733 cpu_unlink_tb(env);
1734}
1735#endif /* CONFIG_USER_ONLY */
1736
bellardb54ad042004-05-20 13:42:52 +00001737void cpu_reset_interrupt(CPUState *env, int mask)
1738{
1739 env->interrupt_request &= ~mask;
1740}
1741
aurel323098dba2009-03-07 21:28:24 +00001742void cpu_exit(CPUState *env)
1743{
1744 env->exit_request = 1;
1745 cpu_unlink_tb(env);
1746}
1747
blueswir1c7cd6a32008-10-02 18:27:46 +00001748const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001749 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001750 "show generated host assembly code for each compiled TB" },
1751 { CPU_LOG_TB_IN_ASM, "in_asm",
1752 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001753 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001754 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001755 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001756 "show micro ops "
1757#ifdef TARGET_I386
1758 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001759#endif
blueswir1e01a1152008-03-14 17:37:11 +00001760 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001761 { CPU_LOG_INT, "int",
1762 "show interrupts/exceptions in short format" },
1763 { CPU_LOG_EXEC, "exec",
1764 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001765 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001766 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001767#ifdef TARGET_I386
1768 { CPU_LOG_PCALL, "pcall",
1769 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001770 { CPU_LOG_RESET, "cpu_reset",
1771 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001772#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001773#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001774 { CPU_LOG_IOPORT, "ioport",
1775 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001776#endif
bellardf193c792004-03-21 17:06:25 +00001777 { 0, NULL, NULL },
1778};
1779
1780static int cmp1(const char *s1, int n, const char *s2)
1781{
1782 if (strlen(s2) != n)
1783 return 0;
1784 return memcmp(s1, s2, n) == 0;
1785}
ths3b46e622007-09-17 08:09:54 +00001786
bellardf193c792004-03-21 17:06:25 +00001787/* takes a comma separated list of log masks. Return 0 if error. */
1788int cpu_str_to_log_mask(const char *str)
1789{
blueswir1c7cd6a32008-10-02 18:27:46 +00001790 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001791 int mask;
1792 const char *p, *p1;
1793
1794 p = str;
1795 mask = 0;
1796 for(;;) {
1797 p1 = strchr(p, ',');
1798 if (!p1)
1799 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001800 if(cmp1(p,p1-p,"all")) {
1801 for(item = cpu_log_items; item->mask != 0; item++) {
1802 mask |= item->mask;
1803 }
1804 } else {
1805 for(item = cpu_log_items; item->mask != 0; item++) {
1806 if (cmp1(p, p1 - p, item->name))
1807 goto found;
1808 }
1809 return 0;
bellardf193c792004-03-21 17:06:25 +00001810 }
bellardf193c792004-03-21 17:06:25 +00001811 found:
1812 mask |= item->mask;
1813 if (*p1 != ',')
1814 break;
1815 p = p1 + 1;
1816 }
1817 return mask;
1818}
bellardea041c02003-06-25 16:16:50 +00001819
bellard75012672003-06-21 13:11:07 +00001820void cpu_abort(CPUState *env, const char *fmt, ...)
1821{
1822 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001823 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001824
1825 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001826 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001827 fprintf(stderr, "qemu: fatal: ");
1828 vfprintf(stderr, fmt, ap);
1829 fprintf(stderr, "\n");
1830#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001831 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1832#else
1833 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001834#endif
aliguori93fcfe32009-01-15 22:34:14 +00001835 if (qemu_log_enabled()) {
1836 qemu_log("qemu: fatal: ");
1837 qemu_log_vprintf(fmt, ap2);
1838 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001839#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001840 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001841#else
aliguori93fcfe32009-01-15 22:34:14 +00001842 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001843#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001844 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001845 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001846 }
pbrook493ae1f2007-11-23 16:53:59 +00001847 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001848 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001849#if defined(CONFIG_USER_ONLY)
1850 {
1851 struct sigaction act;
1852 sigfillset(&act.sa_mask);
1853 act.sa_handler = SIG_DFL;
1854 sigaction(SIGABRT, &act, NULL);
1855 }
1856#endif
bellard75012672003-06-21 13:11:07 +00001857 abort();
1858}
1859
thsc5be9f02007-02-28 20:20:53 +00001860CPUState *cpu_copy(CPUState *env)
1861{
ths01ba9812007-12-09 02:22:57 +00001862 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001863 CPUState *next_cpu = new_env->next_cpu;
1864 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001865#if defined(TARGET_HAS_ICE)
1866 CPUBreakpoint *bp;
1867 CPUWatchpoint *wp;
1868#endif
1869
thsc5be9f02007-02-28 20:20:53 +00001870 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001871
1872 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001873 new_env->next_cpu = next_cpu;
1874 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001875
1876 /* Clone all break/watchpoints.
1877 Note: Once we support ptrace with hw-debug register access, make sure
1878 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001879 QTAILQ_INIT(&env->breakpoints);
1880 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001881#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001882 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001883 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1884 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001885 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001886 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1887 wp->flags, NULL);
1888 }
1889#endif
1890
thsc5be9f02007-02-28 20:20:53 +00001891 return new_env;
1892}
1893
bellard01243112004-01-04 15:48:17 +00001894#if !defined(CONFIG_USER_ONLY)
1895
edgar_igl5c751e92008-05-06 08:44:21 +00001896static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1897{
1898 unsigned int i;
1899
1900 /* Discard jump cache entries for any tb which might potentially
1901 overlap the flushed page. */
1902 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1903 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001904 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001905
1906 i = tb_jmp_cache_hash_page(addr);
1907 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001908 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001909}
1910
Igor Kovalenko08738982009-07-12 02:15:40 +04001911static CPUTLBEntry s_cputlb_empty_entry = {
1912 .addr_read = -1,
1913 .addr_write = -1,
1914 .addr_code = -1,
1915 .addend = -1,
1916};
1917
Peter Maydell771124e2012-01-17 13:23:13 +00001918/* NOTE:
1919 * If flush_global is true (the usual case), flush all tlb entries.
1920 * If flush_global is false, flush (at least) all tlb entries not
1921 * marked global.
1922 *
1923 * Since QEMU doesn't currently implement a global/not-global flag
1924 * for tlb entries, at the moment tlb_flush() will also flush all
1925 * tlb entries in the flush_global == false case. This is OK because
1926 * CPU architectures generally permit an implementation to drop
1927 * entries from the TLB at any time, so flushing more entries than
1928 * required is only an efficiency issue, not a correctness issue.
1929 */
bellardee8b7022004-02-03 23:35:10 +00001930void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001931{
bellard33417e72003-08-10 21:47:01 +00001932 int i;
bellard01243112004-01-04 15:48:17 +00001933
bellard9fa3e852004-01-04 18:06:42 +00001934#if defined(DEBUG_TLB)
1935 printf("tlb_flush:\n");
1936#endif
bellard01243112004-01-04 15:48:17 +00001937 /* must reset current TB so that interrupts cannot modify the
1938 links while we are modifying them */
1939 env->current_tb = NULL;
1940
bellard33417e72003-08-10 21:47:01 +00001941 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001942 int mmu_idx;
1943 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001944 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001945 }
bellard33417e72003-08-10 21:47:01 +00001946 }
bellard9fa3e852004-01-04 18:06:42 +00001947
bellard8a40a182005-11-20 10:35:40 +00001948 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001949
Paul Brookd4c430a2010-03-17 02:14:28 +00001950 env->tlb_flush_addr = -1;
1951 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001952 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001953}
1954
bellard274da6b2004-05-20 21:56:27 +00001955static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001956{
ths5fafdf22007-09-16 21:08:06 +00001957 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001958 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001959 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001960 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001961 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001962 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001963 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001964 }
bellard61382a52003-10-27 21:22:23 +00001965}
1966
bellard2e126692004-04-25 21:28:44 +00001967void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001968{
bellard8a40a182005-11-20 10:35:40 +00001969 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001970 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001971
bellard9fa3e852004-01-04 18:06:42 +00001972#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001973 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001974#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001975 /* Check if we need to flush due to large pages. */
1976 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1977#if defined(DEBUG_TLB)
1978 printf("tlb_flush_page: forced full flush ("
1979 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1980 env->tlb_flush_addr, env->tlb_flush_mask);
1981#endif
1982 tlb_flush(env, 1);
1983 return;
1984 }
bellard01243112004-01-04 15:48:17 +00001985 /* must reset current TB so that interrupts cannot modify the
1986 links while we are modifying them */
1987 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001988
bellard61382a52003-10-27 21:22:23 +00001989 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001990 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001991 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1992 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001993
edgar_igl5c751e92008-05-06 08:44:21 +00001994 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001995}
1996
bellard9fa3e852004-01-04 18:06:42 +00001997/* update the TLBs so that writes to code in the virtual page 'addr'
1998 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001999static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002000{
ths5fafdf22007-09-16 21:08:06 +00002001 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002002 ram_addr + TARGET_PAGE_SIZE,
2003 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002004}
2005
bellard9fa3e852004-01-04 18:06:42 +00002006/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002007 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002008static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002009 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002010{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002011 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002012}
2013
ths5fafdf22007-09-16 21:08:06 +00002014static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002015 unsigned long start, unsigned long length)
2016{
2017 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002018 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00002019 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002020 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002021 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002022 }
2023 }
2024}
2025
pbrook5579c7f2009-04-11 14:47:08 +00002026/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002027void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002028 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002029{
2030 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002031 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002032 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002033
2034 start &= TARGET_PAGE_MASK;
2035 end = TARGET_PAGE_ALIGN(end);
2036
2037 length = end - start;
2038 if (length == 0)
2039 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002040 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002041
bellard1ccde1c2004-02-06 19:46:14 +00002042 /* we modify the TLB cache so that the dirty bit will be set again
2043 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002044 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002045 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002046 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002047 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002048 != (end - 1) - start) {
2049 abort();
2050 }
2051
bellard6a00d602005-11-21 23:25:50 +00002052 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002053 int mmu_idx;
2054 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2055 for(i = 0; i < CPU_TLB_SIZE; i++)
2056 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2057 start1, length);
2058 }
bellard6a00d602005-11-21 23:25:50 +00002059 }
bellard1ccde1c2004-02-06 19:46:14 +00002060}
2061
aliguori74576192008-10-06 14:02:03 +00002062int cpu_physical_memory_set_dirty_tracking(int enable)
2063{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002064 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002065 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002066 return ret;
aliguori74576192008-10-06 14:02:03 +00002067}
2068
bellard3a7d9292005-08-21 09:26:42 +00002069static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2070{
Anthony Liguoric227f092009-10-01 16:12:16 -05002071 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002072 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002073
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002074 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002075 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2076 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002077 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002078 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002079 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002080 }
2081 }
2082}
2083
2084/* update the TLB according to the current state of the dirty bits */
2085void cpu_tlb_update_dirty(CPUState *env)
2086{
2087 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002088 int mmu_idx;
2089 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2090 for(i = 0; i < CPU_TLB_SIZE; i++)
2091 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2092 }
bellard3a7d9292005-08-21 09:26:42 +00002093}
2094
pbrook0f459d12008-06-09 00:20:13 +00002095static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002096{
pbrook0f459d12008-06-09 00:20:13 +00002097 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2098 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002099}
2100
pbrook0f459d12008-06-09 00:20:13 +00002101/* update the TLB corresponding to virtual page vaddr
2102 so that it is no longer dirty */
2103static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002104{
bellard1ccde1c2004-02-06 19:46:14 +00002105 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002106 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002107
pbrook0f459d12008-06-09 00:20:13 +00002108 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002109 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002110 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2111 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002112}
2113
Paul Brookd4c430a2010-03-17 02:14:28 +00002114/* Our TLB does not support large pages, so remember the area covered by
2115 large pages and trigger a full TLB flush if these are invalidated. */
2116static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2117 target_ulong size)
2118{
2119 target_ulong mask = ~(size - 1);
2120
2121 if (env->tlb_flush_addr == (target_ulong)-1) {
2122 env->tlb_flush_addr = vaddr & mask;
2123 env->tlb_flush_mask = mask;
2124 return;
2125 }
2126 /* Extend the existing region to include the new page.
2127 This is a compromise between unnecessary flushes and the cost
2128 of maintaining a full variable size TLB. */
2129 mask &= env->tlb_flush_mask;
2130 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2131 mask <<= 1;
2132 }
2133 env->tlb_flush_addr &= mask;
2134 env->tlb_flush_mask = mask;
2135}
2136
Avi Kivity1d393fa2012-01-01 21:15:42 +02002137static bool is_ram_rom(ram_addr_t pd)
2138{
2139 pd &= ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002140 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002141}
2142
Avi Kivity75c578d2012-01-02 15:40:52 +02002143static bool is_romd(ram_addr_t pd)
2144{
2145 MemoryRegion *mr;
2146
2147 pd &= ~TARGET_PAGE_MASK;
Avi Kivity11c7ef02012-01-02 17:21:07 +02002148 mr = io_mem_region[pd];
Avi Kivity75c578d2012-01-02 15:40:52 +02002149 return mr->rom_device && mr->readable;
2150}
2151
Avi Kivity1d393fa2012-01-01 21:15:42 +02002152static bool is_ram_rom_romd(ram_addr_t pd)
2153{
Avi Kivity75c578d2012-01-02 15:40:52 +02002154 return is_ram_rom(pd) || is_romd(pd);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002155}
2156
Paul Brookd4c430a2010-03-17 02:14:28 +00002157/* Add a new TLB entry. At most one entry for a given virtual address
2158 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2159 supplied size is only used by tlb_flush_page. */
2160void tlb_set_page(CPUState *env, target_ulong vaddr,
2161 target_phys_addr_t paddr, int prot,
2162 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002163{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002164 PhysPageDesc p;
bellard4f2ac232004-04-26 19:44:02 +00002165 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002166 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002167 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002168 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002169 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002170 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002171 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002172 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002173
Paul Brookd4c430a2010-03-17 02:14:28 +00002174 assert(size >= TARGET_PAGE_SIZE);
2175 if (size != TARGET_PAGE_SIZE) {
2176 tlb_add_large_page(env, vaddr, size);
2177 }
bellard92e873b2004-05-21 14:52:29 +00002178 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002179 pd = p.phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002180#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002181 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2182 " prot=%x idx=%d pd=0x%08lx\n",
2183 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002184#endif
2185
pbrook0f459d12008-06-09 00:20:13 +00002186 address = vaddr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002187 if (!is_ram_rom_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002188 /* IO memory case (romd handled later) */
2189 address |= TLB_MMIO;
2190 }
pbrook5579c7f2009-04-11 14:47:08 +00002191 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002192 if (is_ram_rom(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002193 /* Normal RAM. */
2194 iotlb = pd & TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002195 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2196 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002197 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002198 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002199 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002200 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002201 It would be nice to pass an offset from the base address
2202 of that region. This would avoid having to special case RAM,
2203 and avoid full address decoding in every device.
2204 We can't use the high bits of pd for this because
2205 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002206 iotlb = (pd & ~TARGET_PAGE_MASK);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002207 iotlb += p.region_offset;
pbrook0f459d12008-06-09 00:20:13 +00002208 }
pbrook6658ffb2007-03-16 23:58:11 +00002209
pbrook0f459d12008-06-09 00:20:13 +00002210 code_address = address;
2211 /* Make accesses to pages with watchpoints go via the
2212 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002213 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002214 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002215 /* Avoid trapping reads of pages with a write breakpoint. */
2216 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivity1ec9b902012-01-02 12:47:48 +02002217 iotlb = io_mem_watch.ram_addr + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002218 address |= TLB_MMIO;
2219 break;
2220 }
pbrook6658ffb2007-03-16 23:58:11 +00002221 }
pbrook0f459d12008-06-09 00:20:13 +00002222 }
balrogd79acba2007-06-26 20:01:13 +00002223
pbrook0f459d12008-06-09 00:20:13 +00002224 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2225 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2226 te = &env->tlb_table[mmu_idx][index];
2227 te->addend = addend - vaddr;
2228 if (prot & PAGE_READ) {
2229 te->addr_read = address;
2230 } else {
2231 te->addr_read = -1;
2232 }
edgar_igl5c751e92008-05-06 08:44:21 +00002233
pbrook0f459d12008-06-09 00:20:13 +00002234 if (prot & PAGE_EXEC) {
2235 te->addr_code = code_address;
2236 } else {
2237 te->addr_code = -1;
2238 }
2239 if (prot & PAGE_WRITE) {
Avi Kivity75c578d2012-01-02 15:40:52 +02002240 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr || is_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002241 /* Write access calls the I/O callback. */
2242 te->addr_write = address | TLB_MMIO;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002243 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
pbrook0f459d12008-06-09 00:20:13 +00002244 !cpu_physical_memory_is_dirty(pd)) {
2245 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002246 } else {
pbrook0f459d12008-06-09 00:20:13 +00002247 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002248 }
pbrook0f459d12008-06-09 00:20:13 +00002249 } else {
2250 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002251 }
bellard9fa3e852004-01-04 18:06:42 +00002252}
2253
bellard01243112004-01-04 15:48:17 +00002254#else
2255
bellardee8b7022004-02-03 23:35:10 +00002256void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002257{
2258}
2259
bellard2e126692004-04-25 21:28:44 +00002260void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002261{
2262}
2263
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002264/*
2265 * Walks guest process memory "regions" one by one
2266 * and calls callback function 'fn' for each region.
2267 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002268
2269struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002270{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002271 walk_memory_regions_fn fn;
2272 void *priv;
2273 unsigned long start;
2274 int prot;
2275};
bellard9fa3e852004-01-04 18:06:42 +00002276
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002277static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002278 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002279{
2280 if (data->start != -1ul) {
2281 int rc = data->fn(data->priv, data->start, end, data->prot);
2282 if (rc != 0) {
2283 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002284 }
bellard33417e72003-08-10 21:47:01 +00002285 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002286
2287 data->start = (new_prot ? end : -1ul);
2288 data->prot = new_prot;
2289
2290 return 0;
2291}
2292
2293static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002294 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002295{
Paul Brookb480d9b2010-03-12 23:23:29 +00002296 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002297 int i, rc;
2298
2299 if (*lp == NULL) {
2300 return walk_memory_regions_end(data, base, 0);
2301 }
2302
2303 if (level == 0) {
2304 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002305 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002306 int prot = pd[i].flags;
2307
2308 pa = base | (i << TARGET_PAGE_BITS);
2309 if (prot != data->prot) {
2310 rc = walk_memory_regions_end(data, pa, prot);
2311 if (rc != 0) {
2312 return rc;
2313 }
2314 }
2315 }
2316 } else {
2317 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002318 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002319 pa = base | ((abi_ulong)i <<
2320 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002321 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2322 if (rc != 0) {
2323 return rc;
2324 }
2325 }
2326 }
2327
2328 return 0;
2329}
2330
2331int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2332{
2333 struct walk_memory_regions_data data;
2334 unsigned long i;
2335
2336 data.fn = fn;
2337 data.priv = priv;
2338 data.start = -1ul;
2339 data.prot = 0;
2340
2341 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002342 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002343 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2344 if (rc != 0) {
2345 return rc;
2346 }
2347 }
2348
2349 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002350}
2351
Paul Brookb480d9b2010-03-12 23:23:29 +00002352static int dump_region(void *priv, abi_ulong start,
2353 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002354{
2355 FILE *f = (FILE *)priv;
2356
Paul Brookb480d9b2010-03-12 23:23:29 +00002357 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2358 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002359 start, end, end - start,
2360 ((prot & PAGE_READ) ? 'r' : '-'),
2361 ((prot & PAGE_WRITE) ? 'w' : '-'),
2362 ((prot & PAGE_EXEC) ? 'x' : '-'));
2363
2364 return (0);
2365}
2366
2367/* dump memory mappings */
2368void page_dump(FILE *f)
2369{
2370 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2371 "start", "end", "size", "prot");
2372 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002373}
2374
pbrook53a59602006-03-25 19:31:22 +00002375int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002376{
bellard9fa3e852004-01-04 18:06:42 +00002377 PageDesc *p;
2378
2379 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002380 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002381 return 0;
2382 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002383}
2384
Richard Henderson376a7902010-03-10 15:57:04 -08002385/* Modify the flags of a page and invalidate the code if necessary.
2386 The flag PAGE_WRITE_ORG is positioned automatically depending
2387 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002388void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002389{
Richard Henderson376a7902010-03-10 15:57:04 -08002390 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002391
Richard Henderson376a7902010-03-10 15:57:04 -08002392 /* This function should never be called with addresses outside the
2393 guest address space. If this assert fires, it probably indicates
2394 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002395#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2396 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002397#endif
2398 assert(start < end);
2399
bellard9fa3e852004-01-04 18:06:42 +00002400 start = start & TARGET_PAGE_MASK;
2401 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002402
2403 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002404 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002405 }
2406
2407 for (addr = start, len = end - start;
2408 len != 0;
2409 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2410 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2411
2412 /* If the write protection bit is set, then we invalidate
2413 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002414 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002415 (flags & PAGE_WRITE) &&
2416 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002417 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002418 }
2419 p->flags = flags;
2420 }
bellard9fa3e852004-01-04 18:06:42 +00002421}
2422
ths3d97b402007-11-02 19:02:07 +00002423int page_check_range(target_ulong start, target_ulong len, int flags)
2424{
2425 PageDesc *p;
2426 target_ulong end;
2427 target_ulong addr;
2428
Richard Henderson376a7902010-03-10 15:57:04 -08002429 /* This function should never be called with addresses outside the
2430 guest address space. If this assert fires, it probably indicates
2431 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002432#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2433 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002434#endif
2435
Richard Henderson3e0650a2010-03-29 10:54:42 -07002436 if (len == 0) {
2437 return 0;
2438 }
Richard Henderson376a7902010-03-10 15:57:04 -08002439 if (start + len - 1 < start) {
2440 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002441 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002442 }
balrog55f280c2008-10-28 10:24:11 +00002443
ths3d97b402007-11-02 19:02:07 +00002444 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2445 start = start & TARGET_PAGE_MASK;
2446
Richard Henderson376a7902010-03-10 15:57:04 -08002447 for (addr = start, len = end - start;
2448 len != 0;
2449 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002450 p = page_find(addr >> TARGET_PAGE_BITS);
2451 if( !p )
2452 return -1;
2453 if( !(p->flags & PAGE_VALID) )
2454 return -1;
2455
bellarddae32702007-11-14 10:51:00 +00002456 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002457 return -1;
bellarddae32702007-11-14 10:51:00 +00002458 if (flags & PAGE_WRITE) {
2459 if (!(p->flags & PAGE_WRITE_ORG))
2460 return -1;
2461 /* unprotect the page if it was put read-only because it
2462 contains translated code */
2463 if (!(p->flags & PAGE_WRITE)) {
2464 if (!page_unprotect(addr, 0, NULL))
2465 return -1;
2466 }
2467 return 0;
2468 }
ths3d97b402007-11-02 19:02:07 +00002469 }
2470 return 0;
2471}
2472
bellard9fa3e852004-01-04 18:06:42 +00002473/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002474 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002475int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002476{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002477 unsigned int prot;
2478 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002479 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002480
pbrookc8a706f2008-06-02 16:16:42 +00002481 /* Technically this isn't safe inside a signal handler. However we
2482 know this only ever happens in a synchronous SEGV handler, so in
2483 practice it seems to be ok. */
2484 mmap_lock();
2485
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002486 p = page_find(address >> TARGET_PAGE_BITS);
2487 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002488 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002489 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002490 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002491
bellard9fa3e852004-01-04 18:06:42 +00002492 /* if the page was really writable, then we change its
2493 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002494 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2495 host_start = address & qemu_host_page_mask;
2496 host_end = host_start + qemu_host_page_size;
2497
2498 prot = 0;
2499 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2500 p = page_find(addr >> TARGET_PAGE_BITS);
2501 p->flags |= PAGE_WRITE;
2502 prot |= p->flags;
2503
bellard9fa3e852004-01-04 18:06:42 +00002504 /* and since the content will be modified, we must invalidate
2505 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002506 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002507#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002508 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002509#endif
bellard9fa3e852004-01-04 18:06:42 +00002510 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002511 mprotect((void *)g2h(host_start), qemu_host_page_size,
2512 prot & PAGE_BITS);
2513
2514 mmap_unlock();
2515 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002516 }
pbrookc8a706f2008-06-02 16:16:42 +00002517 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002518 return 0;
2519}
2520
bellard6a00d602005-11-21 23:25:50 +00002521static inline void tlb_set_dirty(CPUState *env,
2522 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002523{
2524}
bellard9fa3e852004-01-04 18:06:42 +00002525#endif /* defined(CONFIG_USER_ONLY) */
2526
pbrooke2eef172008-06-08 01:09:01 +00002527#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002528
Paul Brookc04b2b72010-03-01 03:31:14 +00002529#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2530typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002531 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002532 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002533 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002534} subpage_t;
2535
Anthony Liguoric227f092009-10-01 16:12:16 -05002536static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002537 uint16_t section);
2538static subpage_t *subpage_init (target_phys_addr_t base, uint16_t *section,
2539 uint16_t orig_section);
blueswir1db7b5422007-05-26 17:36:03 +00002540#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2541 need_subpage) \
2542 do { \
2543 if (addr > start_addr) \
2544 start_addr2 = 0; \
2545 else { \
2546 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2547 if (start_addr2 > 0) \
2548 need_subpage = 1; \
2549 } \
2550 \
blueswir149e9fba2007-05-30 17:25:06 +00002551 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002552 end_addr2 = TARGET_PAGE_SIZE - 1; \
2553 else { \
2554 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2555 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2556 need_subpage = 1; \
2557 } \
2558 } while (0)
2559
Avi Kivity5312bd82012-02-12 18:32:55 +02002560static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002561{
Avi Kivity5312bd82012-02-12 18:32:55 +02002562 MemoryRegionSection *section = &phys_sections[section_index];
2563 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002564
2565 if (mr->subpage) {
2566 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2567 memory_region_destroy(&subpage->iomem);
2568 g_free(subpage);
2569 }
2570}
2571
Avi Kivity4346ae32012-02-10 17:00:01 +02002572static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002573{
2574 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002575 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002576
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002577 if (lp->u.node == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002578 return;
2579 }
2580
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002581 p = phys_map_nodes[lp->u.node];
Avi Kivity4346ae32012-02-10 17:00:01 +02002582 for (i = 0; i < L2_SIZE; ++i) {
2583 if (level > 0) {
Avi Kivity54688b12012-02-09 17:34:32 +02002584 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002585 } else {
2586 destroy_page_desc(p[i].u.leaf);
Avi Kivity54688b12012-02-09 17:34:32 +02002587 }
Avi Kivity54688b12012-02-09 17:34:32 +02002588 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002589 lp->u.node = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002590}
2591
2592static void destroy_all_mappings(void)
2593{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002594 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002595 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002596}
2597
Avi Kivity5312bd82012-02-12 18:32:55 +02002598static uint16_t phys_section_add(MemoryRegionSection *section)
2599{
2600 if (phys_sections_nb == phys_sections_nb_alloc) {
2601 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2602 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2603 phys_sections_nb_alloc);
2604 }
2605 phys_sections[phys_sections_nb] = *section;
2606 return phys_sections_nb++;
2607}
2608
2609static void phys_sections_clear(void)
2610{
2611 phys_sections_nb = 0;
2612}
2613
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002614/* register physical memory.
2615 For RAM, 'size' must be a multiple of the target page size.
2616 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002617 io memory page. The address used when calling the IO function is
2618 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002619 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002620 before calculating this offset. This should not be a problem unless
2621 the low bits of start_addr and region_offset differ. */
Avi Kivitydd811242012-01-02 12:17:03 +02002622void cpu_register_physical_memory_log(MemoryRegionSection *section,
Avi Kivityd7ec83e2012-02-08 17:07:26 +02002623 bool readonly)
bellard33417e72003-08-10 21:47:01 +00002624{
Avi Kivitydd811242012-01-02 12:17:03 +02002625 target_phys_addr_t start_addr = section->offset_within_address_space;
2626 ram_addr_t size = section->size;
Anthony Liguoric227f092009-10-01 16:12:16 -05002627 target_phys_addr_t addr, end_addr;
Anthony Liguoric227f092009-10-01 16:12:16 -05002628 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002629 subpage_t *subpage;
Avi Kivity5312bd82012-02-12 18:32:55 +02002630 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002631
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002632 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002633
bellard5fd386f2004-05-23 21:11:22 +00002634 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002635 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002636
2637 addr = start_addr;
2638 do {
Avi Kivity717cb7b2012-02-12 21:21:21 +02002639 uint16_t *p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2640 uint16_t orig_memory = *p;
2641 target_phys_addr_t start_addr2, end_addr2;
2642 int need_subpage = 0;
2643 MemoryRegion *mr = phys_sections[orig_memory].mr;
blueswir1db7b5422007-05-26 17:36:03 +00002644
Avi Kivity717cb7b2012-02-12 21:21:21 +02002645 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2646 need_subpage);
2647 if (need_subpage) {
2648 if (!(mr->subpage)) {
Avi Kivity8636b922012-02-12 21:10:50 +02002649 subpage = subpage_init((addr & TARGET_PAGE_MASK),
Avi Kivity717cb7b2012-02-12 21:21:21 +02002650 p, orig_memory);
2651 } else {
2652 subpage = container_of(mr, subpage_t, iomem);
blueswir1db7b5422007-05-26 17:36:03 +00002653 }
Avi Kivity717cb7b2012-02-12 21:21:21 +02002654 subpage_register(subpage, start_addr2, end_addr2,
2655 section_index);
2656 } else {
2657 *p = section_index;
blueswir1db7b5422007-05-26 17:36:03 +00002658 }
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002659 addr += TARGET_PAGE_SIZE;
2660 } while (addr != end_addr);
bellard33417e72003-08-10 21:47:01 +00002661}
2662
Anthony Liguoric227f092009-10-01 16:12:16 -05002663void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002664{
2665 if (kvm_enabled())
2666 kvm_coalesce_mmio_region(addr, size);
2667}
2668
Anthony Liguoric227f092009-10-01 16:12:16 -05002669void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002670{
2671 if (kvm_enabled())
2672 kvm_uncoalesce_mmio_region(addr, size);
2673}
2674
Sheng Yang62a27442010-01-26 19:21:16 +08002675void qemu_flush_coalesced_mmio_buffer(void)
2676{
2677 if (kvm_enabled())
2678 kvm_flush_coalesced_mmio_buffer();
2679}
2680
Marcelo Tosattic9027602010-03-01 20:25:08 -03002681#if defined(__linux__) && !defined(TARGET_S390X)
2682
2683#include <sys/vfs.h>
2684
2685#define HUGETLBFS_MAGIC 0x958458f6
2686
2687static long gethugepagesize(const char *path)
2688{
2689 struct statfs fs;
2690 int ret;
2691
2692 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002693 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002694 } while (ret != 0 && errno == EINTR);
2695
2696 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002697 perror(path);
2698 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002699 }
2700
2701 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002702 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002703
2704 return fs.f_bsize;
2705}
2706
Alex Williamson04b16652010-07-02 11:13:17 -06002707static void *file_ram_alloc(RAMBlock *block,
2708 ram_addr_t memory,
2709 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002710{
2711 char *filename;
2712 void *area;
2713 int fd;
2714#ifdef MAP_POPULATE
2715 int flags;
2716#endif
2717 unsigned long hpagesize;
2718
2719 hpagesize = gethugepagesize(path);
2720 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002721 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002722 }
2723
2724 if (memory < hpagesize) {
2725 return NULL;
2726 }
2727
2728 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2729 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2730 return NULL;
2731 }
2732
2733 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002734 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002735 }
2736
2737 fd = mkstemp(filename);
2738 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002739 perror("unable to create backing store for hugepages");
2740 free(filename);
2741 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002742 }
2743 unlink(filename);
2744 free(filename);
2745
2746 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2747
2748 /*
2749 * ftruncate is not supported by hugetlbfs in older
2750 * hosts, so don't bother bailing out on errors.
2751 * If anything goes wrong with it under other filesystems,
2752 * mmap will fail.
2753 */
2754 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002755 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002756
2757#ifdef MAP_POPULATE
2758 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2759 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2760 * to sidestep this quirk.
2761 */
2762 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2763 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2764#else
2765 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2766#endif
2767 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002768 perror("file_ram_alloc: can't mmap RAM pages");
2769 close(fd);
2770 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002771 }
Alex Williamson04b16652010-07-02 11:13:17 -06002772 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002773 return area;
2774}
2775#endif
2776
Alex Williamsond17b5282010-06-25 11:08:38 -06002777static ram_addr_t find_ram_offset(ram_addr_t size)
2778{
Alex Williamson04b16652010-07-02 11:13:17 -06002779 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002780 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002781
2782 if (QLIST_EMPTY(&ram_list.blocks))
2783 return 0;
2784
2785 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002786 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002787
2788 end = block->offset + block->length;
2789
2790 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2791 if (next_block->offset >= end) {
2792 next = MIN(next, next_block->offset);
2793 }
2794 }
2795 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002796 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002797 mingap = next - end;
2798 }
2799 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002800
2801 if (offset == RAM_ADDR_MAX) {
2802 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2803 (uint64_t)size);
2804 abort();
2805 }
2806
Alex Williamson04b16652010-07-02 11:13:17 -06002807 return offset;
2808}
2809
2810static ram_addr_t last_ram_offset(void)
2811{
Alex Williamsond17b5282010-06-25 11:08:38 -06002812 RAMBlock *block;
2813 ram_addr_t last = 0;
2814
2815 QLIST_FOREACH(block, &ram_list.blocks, next)
2816 last = MAX(last, block->offset + block->length);
2817
2818 return last;
2819}
2820
Avi Kivityc5705a72011-12-20 15:59:12 +02002821void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002822{
2823 RAMBlock *new_block, *block;
2824
Avi Kivityc5705a72011-12-20 15:59:12 +02002825 new_block = NULL;
2826 QLIST_FOREACH(block, &ram_list.blocks, next) {
2827 if (block->offset == addr) {
2828 new_block = block;
2829 break;
2830 }
2831 }
2832 assert(new_block);
2833 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002834
2835 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2836 char *id = dev->parent_bus->info->get_dev_path(dev);
2837 if (id) {
2838 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002839 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002840 }
2841 }
2842 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2843
2844 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002845 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002846 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2847 new_block->idstr);
2848 abort();
2849 }
2850 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002851}
2852
2853ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2854 MemoryRegion *mr)
2855{
2856 RAMBlock *new_block;
2857
2858 size = TARGET_PAGE_ALIGN(size);
2859 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002860
Avi Kivity7c637362011-12-21 13:09:49 +02002861 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002862 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002863 if (host) {
2864 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002865 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002866 } else {
2867 if (mem_path) {
2868#if defined (__linux__) && !defined(TARGET_S390X)
2869 new_block->host = file_ram_alloc(new_block, size, mem_path);
2870 if (!new_block->host) {
2871 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002872 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002873 }
2874#else
2875 fprintf(stderr, "-mem-path option unsupported\n");
2876 exit(1);
2877#endif
2878 } else {
2879#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002880 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2881 an system defined value, which is at least 256GB. Larger systems
2882 have larger values. We put the guest between the end of data
2883 segment (system break) and this value. We use 32GB as a base to
2884 have enough room for the system break to grow. */
2885 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002886 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002887 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002888 if (new_block->host == MAP_FAILED) {
2889 fprintf(stderr, "Allocating RAM failed\n");
2890 abort();
2891 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002892#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002893 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002894 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002895 } else {
2896 new_block->host = qemu_vmalloc(size);
2897 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002898#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002899 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002900 }
2901 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002902 new_block->length = size;
2903
2904 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2905
Anthony Liguori7267c092011-08-20 22:09:37 -05002906 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002907 last_ram_offset() >> TARGET_PAGE_BITS);
2908 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2909 0xff, size >> TARGET_PAGE_BITS);
2910
2911 if (kvm_enabled())
2912 kvm_setup_guest_memory(new_block->host, size);
2913
2914 return new_block->offset;
2915}
2916
Avi Kivityc5705a72011-12-20 15:59:12 +02002917ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002918{
Avi Kivityc5705a72011-12-20 15:59:12 +02002919 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002920}
bellarde9a1ab12007-02-08 23:08:38 +00002921
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002922void qemu_ram_free_from_ptr(ram_addr_t addr)
2923{
2924 RAMBlock *block;
2925
2926 QLIST_FOREACH(block, &ram_list.blocks, next) {
2927 if (addr == block->offset) {
2928 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002929 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002930 return;
2931 }
2932 }
2933}
2934
Anthony Liguoric227f092009-10-01 16:12:16 -05002935void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002936{
Alex Williamson04b16652010-07-02 11:13:17 -06002937 RAMBlock *block;
2938
2939 QLIST_FOREACH(block, &ram_list.blocks, next) {
2940 if (addr == block->offset) {
2941 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002942 if (block->flags & RAM_PREALLOC_MASK) {
2943 ;
2944 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002945#if defined (__linux__) && !defined(TARGET_S390X)
2946 if (block->fd) {
2947 munmap(block->host, block->length);
2948 close(block->fd);
2949 } else {
2950 qemu_vfree(block->host);
2951 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002952#else
2953 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002954#endif
2955 } else {
2956#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2957 munmap(block->host, block->length);
2958#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002959 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002960 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002961 } else {
2962 qemu_vfree(block->host);
2963 }
Alex Williamson04b16652010-07-02 11:13:17 -06002964#endif
2965 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002966 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002967 return;
2968 }
2969 }
2970
bellarde9a1ab12007-02-08 23:08:38 +00002971}
2972
Huang Yingcd19cfa2011-03-02 08:56:19 +01002973#ifndef _WIN32
2974void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2975{
2976 RAMBlock *block;
2977 ram_addr_t offset;
2978 int flags;
2979 void *area, *vaddr;
2980
2981 QLIST_FOREACH(block, &ram_list.blocks, next) {
2982 offset = addr - block->offset;
2983 if (offset < block->length) {
2984 vaddr = block->host + offset;
2985 if (block->flags & RAM_PREALLOC_MASK) {
2986 ;
2987 } else {
2988 flags = MAP_FIXED;
2989 munmap(vaddr, length);
2990 if (mem_path) {
2991#if defined(__linux__) && !defined(TARGET_S390X)
2992 if (block->fd) {
2993#ifdef MAP_POPULATE
2994 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2995 MAP_PRIVATE;
2996#else
2997 flags |= MAP_PRIVATE;
2998#endif
2999 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3000 flags, block->fd, offset);
3001 } else {
3002 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3003 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3004 flags, -1, 0);
3005 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003006#else
3007 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003008#endif
3009 } else {
3010#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3011 flags |= MAP_SHARED | MAP_ANONYMOUS;
3012 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3013 flags, -1, 0);
3014#else
3015 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3016 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3017 flags, -1, 0);
3018#endif
3019 }
3020 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003021 fprintf(stderr, "Could not remap addr: "
3022 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003023 length, addr);
3024 exit(1);
3025 }
3026 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3027 }
3028 return;
3029 }
3030 }
3031}
3032#endif /* !_WIN32 */
3033
pbrookdc828ca2009-04-09 22:21:07 +00003034/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003035 With the exception of the softmmu code in this file, this should
3036 only be used for local memory (e.g. video ram) that the device owns,
3037 and knows it isn't going to access beyond the end of the block.
3038
3039 It should not be used for general purpose DMA.
3040 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3041 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003042void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003043{
pbrook94a6b542009-04-11 17:15:54 +00003044 RAMBlock *block;
3045
Alex Williamsonf471a172010-06-11 11:11:42 -06003046 QLIST_FOREACH(block, &ram_list.blocks, next) {
3047 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003048 /* Move this entry to to start of the list. */
3049 if (block != QLIST_FIRST(&ram_list.blocks)) {
3050 QLIST_REMOVE(block, next);
3051 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3052 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003053 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003054 /* We need to check if the requested address is in the RAM
3055 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003056 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003057 */
3058 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003059 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003060 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003061 block->host =
3062 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003063 }
3064 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003065 return block->host + (addr - block->offset);
3066 }
pbrook94a6b542009-04-11 17:15:54 +00003067 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003068
3069 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3070 abort();
3071
3072 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003073}
3074
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003075/* Return a host pointer to ram allocated with qemu_ram_alloc.
3076 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3077 */
3078void *qemu_safe_ram_ptr(ram_addr_t addr)
3079{
3080 RAMBlock *block;
3081
3082 QLIST_FOREACH(block, &ram_list.blocks, next) {
3083 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003084 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003085 /* We need to check if the requested address is in the RAM
3086 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003087 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003088 */
3089 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003090 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003091 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003092 block->host =
3093 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003094 }
3095 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003096 return block->host + (addr - block->offset);
3097 }
3098 }
3099
3100 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3101 abort();
3102
3103 return NULL;
3104}
3105
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003106/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3107 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003108void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003109{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003110 if (*size == 0) {
3111 return NULL;
3112 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003113 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003114 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003115 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003116 RAMBlock *block;
3117
3118 QLIST_FOREACH(block, &ram_list.blocks, next) {
3119 if (addr - block->offset < block->length) {
3120 if (addr - block->offset + *size > block->length)
3121 *size = block->length - addr + block->offset;
3122 return block->host + (addr - block->offset);
3123 }
3124 }
3125
3126 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3127 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003128 }
3129}
3130
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003131void qemu_put_ram_ptr(void *addr)
3132{
3133 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003134}
3135
Marcelo Tosattie8902612010-10-11 15:31:19 -03003136int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003137{
pbrook94a6b542009-04-11 17:15:54 +00003138 RAMBlock *block;
3139 uint8_t *host = ptr;
3140
Jan Kiszka868bb332011-06-21 22:59:09 +02003141 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003142 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003143 return 0;
3144 }
3145
Alex Williamsonf471a172010-06-11 11:11:42 -06003146 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003147 /* This case append when the block is not mapped. */
3148 if (block->host == NULL) {
3149 continue;
3150 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003151 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003152 *ram_addr = block->offset + (host - block->host);
3153 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003154 }
pbrook94a6b542009-04-11 17:15:54 +00003155 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003156
Marcelo Tosattie8902612010-10-11 15:31:19 -03003157 return -1;
3158}
Alex Williamsonf471a172010-06-11 11:11:42 -06003159
Marcelo Tosattie8902612010-10-11 15:31:19 -03003160/* Some of the softmmu routines need to translate from a host pointer
3161 (typically a TLB entry) back to a ram offset. */
3162ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3163{
3164 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003165
Marcelo Tosattie8902612010-10-11 15:31:19 -03003166 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3167 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3168 abort();
3169 }
3170 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003171}
3172
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003173static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3174 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003175{
pbrook67d3b952006-12-18 05:03:52 +00003176#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003177 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003178#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003179#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003180 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003181#endif
3182 return 0;
3183}
3184
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003185static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3186 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003187{
3188#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003189 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003190#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003191#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003192 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003193#endif
3194}
3195
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003196static const MemoryRegionOps unassigned_mem_ops = {
3197 .read = unassigned_mem_read,
3198 .write = unassigned_mem_write,
3199 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003200};
3201
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003202static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3203 unsigned size)
3204{
3205 abort();
3206}
3207
3208static void error_mem_write(void *opaque, target_phys_addr_t addr,
3209 uint64_t value, unsigned size)
3210{
3211 abort();
3212}
3213
3214static const MemoryRegionOps error_mem_ops = {
3215 .read = error_mem_read,
3216 .write = error_mem_write,
3217 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003218};
3219
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003220static const MemoryRegionOps rom_mem_ops = {
3221 .read = error_mem_read,
3222 .write = unassigned_mem_write,
3223 .endianness = DEVICE_NATIVE_ENDIAN,
3224};
3225
3226static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3227 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003228{
bellard3a7d9292005-08-21 09:26:42 +00003229 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003230 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003231 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3232#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003233 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003234 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003235#endif
3236 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003237 switch (size) {
3238 case 1:
3239 stb_p(qemu_get_ram_ptr(ram_addr), val);
3240 break;
3241 case 2:
3242 stw_p(qemu_get_ram_ptr(ram_addr), val);
3243 break;
3244 case 4:
3245 stl_p(qemu_get_ram_ptr(ram_addr), val);
3246 break;
3247 default:
3248 abort();
3249 }
bellardf23db162005-08-21 19:12:28 +00003250 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003251 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003252 /* we remove the notdirty callback only if the code has been
3253 flushed */
3254 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003255 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003256}
3257
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003258static const MemoryRegionOps notdirty_mem_ops = {
3259 .read = error_mem_read,
3260 .write = notdirty_mem_write,
3261 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003262};
3263
pbrook0f459d12008-06-09 00:20:13 +00003264/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003265static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003266{
3267 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003268 target_ulong pc, cs_base;
3269 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003270 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003271 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003272 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003273
aliguori06d55cc2008-11-18 20:24:06 +00003274 if (env->watchpoint_hit) {
3275 /* We re-entered the check after replacing the TB. Now raise
3276 * the debug interrupt so that is will trigger after the
3277 * current instruction. */
3278 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3279 return;
3280 }
pbrook2e70f6e2008-06-29 01:03:05 +00003281 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003282 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003283 if ((vaddr == (wp->vaddr & len_mask) ||
3284 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003285 wp->flags |= BP_WATCHPOINT_HIT;
3286 if (!env->watchpoint_hit) {
3287 env->watchpoint_hit = wp;
3288 tb = tb_find_pc(env->mem_io_pc);
3289 if (!tb) {
3290 cpu_abort(env, "check_watchpoint: could not find TB for "
3291 "pc=%p", (void *)env->mem_io_pc);
3292 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003293 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003294 tb_phys_invalidate(tb, -1);
3295 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3296 env->exception_index = EXCP_DEBUG;
3297 } else {
3298 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3299 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3300 }
3301 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003302 }
aliguori6e140f22008-11-18 20:37:55 +00003303 } else {
3304 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003305 }
3306 }
3307}
3308
pbrook6658ffb2007-03-16 23:58:11 +00003309/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3310 so these check for a hit then pass through to the normal out-of-line
3311 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003312static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3313 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003314{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003315 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3316 switch (size) {
3317 case 1: return ldub_phys(addr);
3318 case 2: return lduw_phys(addr);
3319 case 4: return ldl_phys(addr);
3320 default: abort();
3321 }
pbrook6658ffb2007-03-16 23:58:11 +00003322}
3323
Avi Kivity1ec9b902012-01-02 12:47:48 +02003324static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3325 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003326{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003327 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3328 switch (size) {
3329 case 1: stb_phys(addr, val);
3330 case 2: stw_phys(addr, val);
3331 case 4: stl_phys(addr, val);
3332 default: abort();
3333 }
pbrook6658ffb2007-03-16 23:58:11 +00003334}
3335
Avi Kivity1ec9b902012-01-02 12:47:48 +02003336static const MemoryRegionOps watch_mem_ops = {
3337 .read = watch_mem_read,
3338 .write = watch_mem_write,
3339 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003340};
pbrook6658ffb2007-03-16 23:58:11 +00003341
Avi Kivity70c68e42012-01-02 12:32:48 +02003342static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3343 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003344{
Avi Kivity70c68e42012-01-02 12:32:48 +02003345 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003346 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003347 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003348#if defined(DEBUG_SUBPAGE)
3349 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3350 mmio, len, addr, idx);
3351#endif
blueswir1db7b5422007-05-26 17:36:03 +00003352
Avi Kivity5312bd82012-02-12 18:32:55 +02003353 section = &phys_sections[mmio->sub_section[idx]];
3354 addr += mmio->base;
3355 addr -= section->offset_within_address_space;
3356 addr += section->offset_within_region;
3357 return io_mem_read(section->mr->ram_addr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003358}
3359
Avi Kivity70c68e42012-01-02 12:32:48 +02003360static void subpage_write(void *opaque, target_phys_addr_t addr,
3361 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003362{
Avi Kivity70c68e42012-01-02 12:32:48 +02003363 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003364 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003365 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003366#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003367 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3368 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003369 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003370#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003371
Avi Kivity5312bd82012-02-12 18:32:55 +02003372 section = &phys_sections[mmio->sub_section[idx]];
3373 addr += mmio->base;
3374 addr -= section->offset_within_address_space;
3375 addr += section->offset_within_region;
3376 io_mem_write(section->mr->ram_addr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003377}
3378
Avi Kivity70c68e42012-01-02 12:32:48 +02003379static const MemoryRegionOps subpage_ops = {
3380 .read = subpage_read,
3381 .write = subpage_write,
3382 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003383};
3384
Avi Kivityde712f92012-01-02 12:41:07 +02003385static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3386 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003387{
3388 ram_addr_t raddr = addr;
3389 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003390 switch (size) {
3391 case 1: return ldub_p(ptr);
3392 case 2: return lduw_p(ptr);
3393 case 4: return ldl_p(ptr);
3394 default: abort();
3395 }
Andreas Färber56384e82011-11-30 16:26:21 +01003396}
3397
Avi Kivityde712f92012-01-02 12:41:07 +02003398static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3399 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003400{
3401 ram_addr_t raddr = addr;
3402 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003403 switch (size) {
3404 case 1: return stb_p(ptr, value);
3405 case 2: return stw_p(ptr, value);
3406 case 4: return stl_p(ptr, value);
3407 default: abort();
3408 }
Andreas Färber56384e82011-11-30 16:26:21 +01003409}
3410
Avi Kivityde712f92012-01-02 12:41:07 +02003411static const MemoryRegionOps subpage_ram_ops = {
3412 .read = subpage_ram_read,
3413 .write = subpage_ram_write,
3414 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003415};
3416
Anthony Liguoric227f092009-10-01 16:12:16 -05003417static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003418 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003419{
3420 int idx, eidx;
3421
3422 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3423 return -1;
3424 idx = SUBPAGE_IDX(start);
3425 eidx = SUBPAGE_IDX(end);
3426#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003427 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003428 mmio, start, end, idx, eidx, memory);
3429#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003430 if (memory_region_is_ram(phys_sections[section].mr)) {
3431 MemoryRegionSection new_section = phys_sections[section];
3432 new_section.mr = &io_mem_subpage_ram;
3433 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003434 }
blueswir1db7b5422007-05-26 17:36:03 +00003435 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003436 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003437 }
3438
3439 return 0;
3440}
3441
Avi Kivity5312bd82012-02-12 18:32:55 +02003442static subpage_t *subpage_init (target_phys_addr_t base, uint16_t *section_ind,
3443 uint16_t orig_section)
blueswir1db7b5422007-05-26 17:36:03 +00003444{
Anthony Liguoric227f092009-10-01 16:12:16 -05003445 subpage_t *mmio;
Avi Kivity5312bd82012-02-12 18:32:55 +02003446 MemoryRegionSection section = {
3447 .offset_within_address_space = base,
3448 .size = TARGET_PAGE_SIZE,
3449 };
blueswir1db7b5422007-05-26 17:36:03 +00003450
Anthony Liguori7267c092011-08-20 22:09:37 -05003451 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003452
3453 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003454 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3455 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003456 mmio->iomem.subpage = true;
Avi Kivity5312bd82012-02-12 18:32:55 +02003457 section.mr = &mmio->iomem;
blueswir1db7b5422007-05-26 17:36:03 +00003458#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003459 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3460 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003461#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003462 *section_ind = phys_section_add(&section);
3463 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_section);
blueswir1db7b5422007-05-26 17:36:03 +00003464
3465 return mmio;
3466}
3467
aliguori88715652009-02-11 15:20:58 +00003468static int get_free_io_mem_idx(void)
3469{
3470 int i;
3471
3472 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3473 if (!io_mem_used[i]) {
3474 io_mem_used[i] = 1;
3475 return i;
3476 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003477 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003478 return -1;
3479}
3480
bellard33417e72003-08-10 21:47:01 +00003481/* mem_read and mem_write are arrays of functions containing the
3482 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003483 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003484 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003485 modified. If it is zero, a new io zone is allocated. The return
3486 value can be used with cpu_register_physical_memory(). (-1) is
3487 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003488static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003489{
bellard33417e72003-08-10 21:47:01 +00003490 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003491 io_index = get_free_io_mem_idx();
3492 if (io_index == -1)
3493 return io_index;
bellard33417e72003-08-10 21:47:01 +00003494 } else {
3495 if (io_index >= IO_MEM_NB_ENTRIES)
3496 return -1;
3497 }
bellardb5ff1b32005-11-26 10:38:39 +00003498
Avi Kivitya621f382012-01-02 13:12:08 +02003499 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003500
Avi Kivity11c7ef02012-01-02 17:21:07 +02003501 return io_index;
bellard33417e72003-08-10 21:47:01 +00003502}
bellard61382a52003-10-27 21:22:23 +00003503
Avi Kivitya621f382012-01-02 13:12:08 +02003504int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003505{
Avi Kivitya621f382012-01-02 13:12:08 +02003506 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003507}
3508
Avi Kivity11c7ef02012-01-02 17:21:07 +02003509void cpu_unregister_io_memory(int io_index)
aliguori88715652009-02-11 15:20:58 +00003510{
Avi Kivitya621f382012-01-02 13:12:08 +02003511 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003512 io_mem_used[io_index] = 0;
3513}
3514
Avi Kivity5312bd82012-02-12 18:32:55 +02003515static uint16_t dummy_section(MemoryRegion *mr)
3516{
3517 MemoryRegionSection section = {
3518 .mr = mr,
3519 .offset_within_address_space = 0,
3520 .offset_within_region = 0,
3521 .size = UINT64_MAX,
3522 };
3523
3524 return phys_section_add(&section);
3525}
3526
Avi Kivitye9179ce2009-06-14 11:38:52 +03003527static void io_mem_init(void)
3528{
3529 int i;
3530
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003531 /* Must be first: */
3532 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3533 assert(io_mem_ram.ram_addr == 0);
3534 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3535 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3536 "unassigned", UINT64_MAX);
3537 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3538 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003539 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3540 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003541 for (i=0; i<5; i++)
3542 io_mem_used[i] = 1;
3543
Avi Kivity1ec9b902012-01-02 12:47:48 +02003544 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3545 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003546}
3547
Avi Kivity50c1e142012-02-08 21:36:02 +02003548static void core_begin(MemoryListener *listener)
3549{
Avi Kivity54688b12012-02-09 17:34:32 +02003550 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003551 phys_sections_clear();
Avi Kivityd6f2ea22012-02-12 20:12:49 +02003552 phys_map.u.node = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003553 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivity50c1e142012-02-08 21:36:02 +02003554}
3555
3556static void core_commit(MemoryListener *listener)
3557{
Avi Kivity117712c2012-02-12 21:23:17 +02003558 CPUState *env;
3559
3560 /* since each CPU stores ram addresses in its TLB cache, we must
3561 reset the modified entries */
3562 /* XXX: slow ! */
3563 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3564 tlb_flush(env, 1);
3565 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003566}
3567
Avi Kivity93632742012-02-08 16:54:16 +02003568static void core_region_add(MemoryListener *listener,
3569 MemoryRegionSection *section)
3570{
Avi Kivity4855d412012-02-08 21:16:05 +02003571 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003572}
3573
3574static void core_region_del(MemoryListener *listener,
3575 MemoryRegionSection *section)
3576{
Avi Kivity93632742012-02-08 16:54:16 +02003577}
3578
Avi Kivity50c1e142012-02-08 21:36:02 +02003579static void core_region_nop(MemoryListener *listener,
3580 MemoryRegionSection *section)
3581{
Avi Kivity54688b12012-02-09 17:34:32 +02003582 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003583}
3584
Avi Kivity93632742012-02-08 16:54:16 +02003585static void core_log_start(MemoryListener *listener,
3586 MemoryRegionSection *section)
3587{
3588}
3589
3590static void core_log_stop(MemoryListener *listener,
3591 MemoryRegionSection *section)
3592{
3593}
3594
3595static void core_log_sync(MemoryListener *listener,
3596 MemoryRegionSection *section)
3597{
3598}
3599
3600static void core_log_global_start(MemoryListener *listener)
3601{
3602 cpu_physical_memory_set_dirty_tracking(1);
3603}
3604
3605static void core_log_global_stop(MemoryListener *listener)
3606{
3607 cpu_physical_memory_set_dirty_tracking(0);
3608}
3609
3610static void core_eventfd_add(MemoryListener *listener,
3611 MemoryRegionSection *section,
3612 bool match_data, uint64_t data, int fd)
3613{
3614}
3615
3616static void core_eventfd_del(MemoryListener *listener,
3617 MemoryRegionSection *section,
3618 bool match_data, uint64_t data, int fd)
3619{
3620}
3621
Avi Kivity50c1e142012-02-08 21:36:02 +02003622static void io_begin(MemoryListener *listener)
3623{
3624}
3625
3626static void io_commit(MemoryListener *listener)
3627{
3628}
3629
Avi Kivity4855d412012-02-08 21:16:05 +02003630static void io_region_add(MemoryListener *listener,
3631 MemoryRegionSection *section)
3632{
3633 iorange_init(&section->mr->iorange, &memory_region_iorange_ops,
3634 section->offset_within_address_space, section->size);
3635 ioport_register(&section->mr->iorange);
3636}
3637
3638static void io_region_del(MemoryListener *listener,
3639 MemoryRegionSection *section)
3640{
3641 isa_unassign_ioport(section->offset_within_address_space, section->size);
3642}
3643
Avi Kivity50c1e142012-02-08 21:36:02 +02003644static void io_region_nop(MemoryListener *listener,
3645 MemoryRegionSection *section)
3646{
3647}
3648
Avi Kivity4855d412012-02-08 21:16:05 +02003649static void io_log_start(MemoryListener *listener,
3650 MemoryRegionSection *section)
3651{
3652}
3653
3654static void io_log_stop(MemoryListener *listener,
3655 MemoryRegionSection *section)
3656{
3657}
3658
3659static void io_log_sync(MemoryListener *listener,
3660 MemoryRegionSection *section)
3661{
3662}
3663
3664static void io_log_global_start(MemoryListener *listener)
3665{
3666}
3667
3668static void io_log_global_stop(MemoryListener *listener)
3669{
3670}
3671
3672static void io_eventfd_add(MemoryListener *listener,
3673 MemoryRegionSection *section,
3674 bool match_data, uint64_t data, int fd)
3675{
3676}
3677
3678static void io_eventfd_del(MemoryListener *listener,
3679 MemoryRegionSection *section,
3680 bool match_data, uint64_t data, int fd)
3681{
3682}
3683
Avi Kivity93632742012-02-08 16:54:16 +02003684static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003685 .begin = core_begin,
3686 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003687 .region_add = core_region_add,
3688 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003689 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003690 .log_start = core_log_start,
3691 .log_stop = core_log_stop,
3692 .log_sync = core_log_sync,
3693 .log_global_start = core_log_global_start,
3694 .log_global_stop = core_log_global_stop,
3695 .eventfd_add = core_eventfd_add,
3696 .eventfd_del = core_eventfd_del,
3697 .priority = 0,
3698};
3699
Avi Kivity4855d412012-02-08 21:16:05 +02003700static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003701 .begin = io_begin,
3702 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003703 .region_add = io_region_add,
3704 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003705 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003706 .log_start = io_log_start,
3707 .log_stop = io_log_stop,
3708 .log_sync = io_log_sync,
3709 .log_global_start = io_log_global_start,
3710 .log_global_stop = io_log_global_stop,
3711 .eventfd_add = io_eventfd_add,
3712 .eventfd_del = io_eventfd_del,
3713 .priority = 0,
3714};
3715
Avi Kivity62152b82011-07-26 14:26:14 +03003716static void memory_map_init(void)
3717{
Anthony Liguori7267c092011-08-20 22:09:37 -05003718 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003719 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003720 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003721
Anthony Liguori7267c092011-08-20 22:09:37 -05003722 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003723 memory_region_init(system_io, "io", 65536);
3724 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003725
Avi Kivity4855d412012-02-08 21:16:05 +02003726 memory_listener_register(&core_memory_listener, system_memory);
3727 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003728}
3729
3730MemoryRegion *get_system_memory(void)
3731{
3732 return system_memory;
3733}
3734
Avi Kivity309cb472011-08-08 16:09:03 +03003735MemoryRegion *get_system_io(void)
3736{
3737 return system_io;
3738}
3739
pbrooke2eef172008-06-08 01:09:01 +00003740#endif /* !defined(CONFIG_USER_ONLY) */
3741
bellard13eb76e2004-01-24 15:23:36 +00003742/* physical memory access (slow version, mainly for debug) */
3743#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003744int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3745 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003746{
3747 int l, flags;
3748 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003749 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003750
3751 while (len > 0) {
3752 page = addr & TARGET_PAGE_MASK;
3753 l = (page + TARGET_PAGE_SIZE) - addr;
3754 if (l > len)
3755 l = len;
3756 flags = page_get_flags(page);
3757 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003758 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003759 if (is_write) {
3760 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003761 return -1;
bellard579a97f2007-11-11 14:26:47 +00003762 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003763 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003764 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003765 memcpy(p, buf, l);
3766 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003767 } else {
3768 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003769 return -1;
bellard579a97f2007-11-11 14:26:47 +00003770 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003771 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003772 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003773 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003774 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003775 }
3776 len -= l;
3777 buf += l;
3778 addr += l;
3779 }
Paul Brooka68fe892010-03-01 00:08:59 +00003780 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003781}
bellard8df1cd02005-01-28 22:37:22 +00003782
bellard13eb76e2004-01-24 15:23:36 +00003783#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003784void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003785 int len, int is_write)
3786{
3787 int l, io_index;
3788 uint8_t *ptr;
3789 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003790 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003791 ram_addr_t pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003792 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003793
bellard13eb76e2004-01-24 15:23:36 +00003794 while (len > 0) {
3795 page = addr & TARGET_PAGE_MASK;
3796 l = (page + TARGET_PAGE_SIZE) - addr;
3797 if (l > len)
3798 l = len;
bellard92e873b2004-05-21 14:52:29 +00003799 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003800 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003801
bellard13eb76e2004-01-24 15:23:36 +00003802 if (is_write) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003803 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003804 target_phys_addr_t addr1;
Avi Kivity11c7ef02012-01-02 17:21:07 +02003805 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003806 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bellard6a00d602005-11-21 23:25:50 +00003807 /* XXX: could force cpu_single_env to NULL to avoid
3808 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003809 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003810 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003811 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003812 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003813 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003814 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003815 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003816 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003817 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003818 l = 2;
3819 } else {
bellard1c213d12005-09-03 10:49:04 +00003820 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003821 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003822 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003823 l = 1;
3824 }
3825 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003826 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003827 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003828 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003829 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003830 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003831 if (!cpu_physical_memory_is_dirty(addr1)) {
3832 /* invalidate code */
3833 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3834 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003835 cpu_physical_memory_set_dirty_flags(
3836 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003837 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003838 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003839 }
3840 } else {
Avi Kivity1d393fa2012-01-01 21:15:42 +02003841 if (!is_ram_rom_romd(pd)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003842 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003843 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02003844 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003845 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
aurel326c2934d2009-02-18 21:37:17 +00003846 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003847 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003848 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003849 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003850 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003851 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003852 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003853 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003854 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003855 l = 2;
3856 } else {
bellard1c213d12005-09-03 10:49:04 +00003857 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003858 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003859 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003860 l = 1;
3861 }
3862 } else {
3863 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003864 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3865 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3866 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003867 }
3868 }
3869 len -= l;
3870 buf += l;
3871 addr += l;
3872 }
3873}
bellard8df1cd02005-01-28 22:37:22 +00003874
bellardd0ecd2a2006-04-23 17:14:48 +00003875/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003876void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003877 const uint8_t *buf, int len)
3878{
3879 int l;
3880 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003881 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003882 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003883 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003884
bellardd0ecd2a2006-04-23 17:14:48 +00003885 while (len > 0) {
3886 page = addr & TARGET_PAGE_MASK;
3887 l = (page + TARGET_PAGE_SIZE) - addr;
3888 if (l > len)
3889 l = len;
3890 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003891 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003892
Avi Kivity1d393fa2012-01-01 21:15:42 +02003893 if (!is_ram_rom_romd(pd)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003894 /* do nothing */
3895 } else {
3896 unsigned long addr1;
3897 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3898 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003899 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003900 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003901 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003902 }
3903 len -= l;
3904 buf += l;
3905 addr += l;
3906 }
3907}
3908
aliguori6d16c2f2009-01-22 16:59:11 +00003909typedef struct {
3910 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003911 target_phys_addr_t addr;
3912 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003913} BounceBuffer;
3914
3915static BounceBuffer bounce;
3916
aliguoriba223c22009-01-22 16:59:16 +00003917typedef struct MapClient {
3918 void *opaque;
3919 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003920 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003921} MapClient;
3922
Blue Swirl72cf2d42009-09-12 07:36:22 +00003923static QLIST_HEAD(map_client_list, MapClient) map_client_list
3924 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003925
3926void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3927{
Anthony Liguori7267c092011-08-20 22:09:37 -05003928 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003929
3930 client->opaque = opaque;
3931 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003932 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003933 return client;
3934}
3935
3936void cpu_unregister_map_client(void *_client)
3937{
3938 MapClient *client = (MapClient *)_client;
3939
Blue Swirl72cf2d42009-09-12 07:36:22 +00003940 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003941 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003942}
3943
3944static void cpu_notify_map_clients(void)
3945{
3946 MapClient *client;
3947
Blue Swirl72cf2d42009-09-12 07:36:22 +00003948 while (!QLIST_EMPTY(&map_client_list)) {
3949 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003950 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003951 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003952 }
3953}
3954
aliguori6d16c2f2009-01-22 16:59:11 +00003955/* Map a physical memory region into a host virtual address.
3956 * May map a subset of the requested range, given by and returned in *plen.
3957 * May return NULL if resources needed to perform the mapping are exhausted.
3958 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003959 * Use cpu_register_map_client() to know when retrying the map operation is
3960 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003961 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003962void *cpu_physical_memory_map(target_phys_addr_t addr,
3963 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003964 int is_write)
3965{
Anthony Liguoric227f092009-10-01 16:12:16 -05003966 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003967 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003968 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003969 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003970 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003971 PhysPageDesc p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003972 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003973 ram_addr_t rlen;
3974 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003975
3976 while (len > 0) {
3977 page = addr & TARGET_PAGE_MASK;
3978 l = (page + TARGET_PAGE_SIZE) - addr;
3979 if (l > len)
3980 l = len;
3981 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003982 pd = p.phys_offset;
aliguori6d16c2f2009-01-22 16:59:11 +00003983
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003984 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003985 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003986 break;
3987 }
3988 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3989 bounce.addr = addr;
3990 bounce.len = l;
3991 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003992 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003993 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003994
3995 *plen = l;
3996 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003997 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003998 if (!todo) {
3999 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4000 }
aliguori6d16c2f2009-01-22 16:59:11 +00004001
4002 len -= l;
4003 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004004 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004005 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004006 rlen = todo;
4007 ret = qemu_ram_ptr_length(raddr, &rlen);
4008 *plen = rlen;
4009 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004010}
4011
4012/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4013 * Will also mark the memory as dirty if is_write == 1. access_len gives
4014 * the amount of memory that was actually read or written by the caller.
4015 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004016void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4017 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004018{
4019 if (buffer != bounce.buffer) {
4020 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004021 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004022 while (access_len) {
4023 unsigned l;
4024 l = TARGET_PAGE_SIZE;
4025 if (l > access_len)
4026 l = access_len;
4027 if (!cpu_physical_memory_is_dirty(addr1)) {
4028 /* invalidate code */
4029 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4030 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004031 cpu_physical_memory_set_dirty_flags(
4032 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004033 }
4034 addr1 += l;
4035 access_len -= l;
4036 }
4037 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004038 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004039 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004040 }
aliguori6d16c2f2009-01-22 16:59:11 +00004041 return;
4042 }
4043 if (is_write) {
4044 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4045 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004046 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004047 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004048 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004049}
bellardd0ecd2a2006-04-23 17:14:48 +00004050
bellard8df1cd02005-01-28 22:37:22 +00004051/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004052static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4053 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004054{
4055 int io_index;
4056 uint8_t *ptr;
4057 uint32_t val;
4058 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004059 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004060
4061 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004062 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004063
Avi Kivity1d393fa2012-01-01 21:15:42 +02004064 if (!is_ram_rom_romd(pd)) {
bellard8df1cd02005-01-28 22:37:22 +00004065 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004066 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004067 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004068 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004069#if defined(TARGET_WORDS_BIGENDIAN)
4070 if (endian == DEVICE_LITTLE_ENDIAN) {
4071 val = bswap32(val);
4072 }
4073#else
4074 if (endian == DEVICE_BIG_ENDIAN) {
4075 val = bswap32(val);
4076 }
4077#endif
bellard8df1cd02005-01-28 22:37:22 +00004078 } else {
4079 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004080 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004081 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004082 switch (endian) {
4083 case DEVICE_LITTLE_ENDIAN:
4084 val = ldl_le_p(ptr);
4085 break;
4086 case DEVICE_BIG_ENDIAN:
4087 val = ldl_be_p(ptr);
4088 break;
4089 default:
4090 val = ldl_p(ptr);
4091 break;
4092 }
bellard8df1cd02005-01-28 22:37:22 +00004093 }
4094 return val;
4095}
4096
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004097uint32_t ldl_phys(target_phys_addr_t addr)
4098{
4099 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4100}
4101
4102uint32_t ldl_le_phys(target_phys_addr_t addr)
4103{
4104 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4105}
4106
4107uint32_t ldl_be_phys(target_phys_addr_t addr)
4108{
4109 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4110}
4111
bellard84b7b8e2005-11-28 21:19:04 +00004112/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004113static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4114 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004115{
4116 int io_index;
4117 uint8_t *ptr;
4118 uint64_t val;
4119 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004120 PhysPageDesc p;
bellard84b7b8e2005-11-28 21:19:04 +00004121
4122 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004123 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004124
Avi Kivity1d393fa2012-01-01 21:15:42 +02004125 if (!is_ram_rom_romd(pd)) {
bellard84b7b8e2005-11-28 21:19:04 +00004126 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004127 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004128 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004129
4130 /* XXX This is broken when device endian != cpu endian.
4131 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004132#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004133 val = io_mem_read(io_index, addr, 4) << 32;
4134 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004135#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004136 val = io_mem_read(io_index, addr, 4);
4137 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004138#endif
4139 } else {
4140 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004141 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004142 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004143 switch (endian) {
4144 case DEVICE_LITTLE_ENDIAN:
4145 val = ldq_le_p(ptr);
4146 break;
4147 case DEVICE_BIG_ENDIAN:
4148 val = ldq_be_p(ptr);
4149 break;
4150 default:
4151 val = ldq_p(ptr);
4152 break;
4153 }
bellard84b7b8e2005-11-28 21:19:04 +00004154 }
4155 return val;
4156}
4157
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004158uint64_t ldq_phys(target_phys_addr_t addr)
4159{
4160 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4161}
4162
4163uint64_t ldq_le_phys(target_phys_addr_t addr)
4164{
4165 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4166}
4167
4168uint64_t ldq_be_phys(target_phys_addr_t addr)
4169{
4170 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4171}
4172
bellardaab33092005-10-30 20:48:42 +00004173/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004174uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004175{
4176 uint8_t val;
4177 cpu_physical_memory_read(addr, &val, 1);
4178 return val;
4179}
4180
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004181/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004182static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4183 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004184{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004185 int io_index;
4186 uint8_t *ptr;
4187 uint64_t val;
4188 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004189 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004190
4191 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004192 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004193
Avi Kivity1d393fa2012-01-01 21:15:42 +02004194 if (!is_ram_rom_romd(pd)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004195 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004196 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004197 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004198 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004199#if defined(TARGET_WORDS_BIGENDIAN)
4200 if (endian == DEVICE_LITTLE_ENDIAN) {
4201 val = bswap16(val);
4202 }
4203#else
4204 if (endian == DEVICE_BIG_ENDIAN) {
4205 val = bswap16(val);
4206 }
4207#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004208 } else {
4209 /* RAM case */
4210 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4211 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004212 switch (endian) {
4213 case DEVICE_LITTLE_ENDIAN:
4214 val = lduw_le_p(ptr);
4215 break;
4216 case DEVICE_BIG_ENDIAN:
4217 val = lduw_be_p(ptr);
4218 break;
4219 default:
4220 val = lduw_p(ptr);
4221 break;
4222 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004223 }
4224 return val;
bellardaab33092005-10-30 20:48:42 +00004225}
4226
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004227uint32_t lduw_phys(target_phys_addr_t addr)
4228{
4229 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4230}
4231
4232uint32_t lduw_le_phys(target_phys_addr_t addr)
4233{
4234 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4235}
4236
4237uint32_t lduw_be_phys(target_phys_addr_t addr)
4238{
4239 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4240}
4241
bellard8df1cd02005-01-28 22:37:22 +00004242/* warning: addr must be aligned. The ram page is not masked as dirty
4243 and the code inside is not invalidated. It is useful if the dirty
4244 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004245void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004246{
4247 int io_index;
4248 uint8_t *ptr;
4249 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004250 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004251
4252 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004253 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004254
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004255 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004256 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004257 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004258 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004259 } else {
aliguori74576192008-10-06 14:02:03 +00004260 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004261 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004262 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004263
4264 if (unlikely(in_migration)) {
4265 if (!cpu_physical_memory_is_dirty(addr1)) {
4266 /* invalidate code */
4267 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4268 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004269 cpu_physical_memory_set_dirty_flags(
4270 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004271 }
4272 }
bellard8df1cd02005-01-28 22:37:22 +00004273 }
4274}
4275
Anthony Liguoric227f092009-10-01 16:12:16 -05004276void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004277{
4278 int io_index;
4279 uint8_t *ptr;
4280 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004281 PhysPageDesc p;
j_mayerbc98a7e2007-04-04 07:55:12 +00004282
4283 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004284 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004285
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004286 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004287 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004288 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004289#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004290 io_mem_write(io_index, addr, val >> 32, 4);
4291 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004292#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004293 io_mem_write(io_index, addr, (uint32_t)val, 4);
4294 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004295#endif
4296 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004297 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004298 (addr & ~TARGET_PAGE_MASK);
4299 stq_p(ptr, val);
4300 }
4301}
4302
bellard8df1cd02005-01-28 22:37:22 +00004303/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004304static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4305 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004306{
4307 int io_index;
4308 uint8_t *ptr;
4309 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004310 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004311
4312 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004313 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004314
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004315 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004316 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004317 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004318#if defined(TARGET_WORDS_BIGENDIAN)
4319 if (endian == DEVICE_LITTLE_ENDIAN) {
4320 val = bswap32(val);
4321 }
4322#else
4323 if (endian == DEVICE_BIG_ENDIAN) {
4324 val = bswap32(val);
4325 }
4326#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004327 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004328 } else {
4329 unsigned long addr1;
4330 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4331 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004332 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004333 switch (endian) {
4334 case DEVICE_LITTLE_ENDIAN:
4335 stl_le_p(ptr, val);
4336 break;
4337 case DEVICE_BIG_ENDIAN:
4338 stl_be_p(ptr, val);
4339 break;
4340 default:
4341 stl_p(ptr, val);
4342 break;
4343 }
bellard3a7d9292005-08-21 09:26:42 +00004344 if (!cpu_physical_memory_is_dirty(addr1)) {
4345 /* invalidate code */
4346 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4347 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004348 cpu_physical_memory_set_dirty_flags(addr1,
4349 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004350 }
bellard8df1cd02005-01-28 22:37:22 +00004351 }
4352}
4353
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004354void stl_phys(target_phys_addr_t addr, uint32_t val)
4355{
4356 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4357}
4358
4359void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4360{
4361 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4362}
4363
4364void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4365{
4366 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4367}
4368
bellardaab33092005-10-30 20:48:42 +00004369/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004370void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004371{
4372 uint8_t v = val;
4373 cpu_physical_memory_write(addr, &v, 1);
4374}
4375
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004376/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004377static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4378 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004379{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004380 int io_index;
4381 uint8_t *ptr;
4382 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004383 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004384
4385 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004386 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004387
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004388 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004389 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004390 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004391#if defined(TARGET_WORDS_BIGENDIAN)
4392 if (endian == DEVICE_LITTLE_ENDIAN) {
4393 val = bswap16(val);
4394 }
4395#else
4396 if (endian == DEVICE_BIG_ENDIAN) {
4397 val = bswap16(val);
4398 }
4399#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004400 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004401 } else {
4402 unsigned long addr1;
4403 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4404 /* RAM case */
4405 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004406 switch (endian) {
4407 case DEVICE_LITTLE_ENDIAN:
4408 stw_le_p(ptr, val);
4409 break;
4410 case DEVICE_BIG_ENDIAN:
4411 stw_be_p(ptr, val);
4412 break;
4413 default:
4414 stw_p(ptr, val);
4415 break;
4416 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004417 if (!cpu_physical_memory_is_dirty(addr1)) {
4418 /* invalidate code */
4419 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4420 /* set dirty bit */
4421 cpu_physical_memory_set_dirty_flags(addr1,
4422 (0xff & ~CODE_DIRTY_FLAG));
4423 }
4424 }
bellardaab33092005-10-30 20:48:42 +00004425}
4426
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004427void stw_phys(target_phys_addr_t addr, uint32_t val)
4428{
4429 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4430}
4431
4432void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4433{
4434 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4435}
4436
4437void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4438{
4439 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4440}
4441
bellardaab33092005-10-30 20:48:42 +00004442/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004443void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004444{
4445 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004446 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004447}
4448
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004449void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4450{
4451 val = cpu_to_le64(val);
4452 cpu_physical_memory_write(addr, &val, 8);
4453}
4454
4455void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4456{
4457 val = cpu_to_be64(val);
4458 cpu_physical_memory_write(addr, &val, 8);
4459}
4460
aliguori5e2972f2009-03-28 17:51:36 +00004461/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004462int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004463 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004464{
4465 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004466 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004467 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004468
4469 while (len > 0) {
4470 page = addr & TARGET_PAGE_MASK;
4471 phys_addr = cpu_get_phys_page_debug(env, page);
4472 /* if no physical page mapped, return an error */
4473 if (phys_addr == -1)
4474 return -1;
4475 l = (page + TARGET_PAGE_SIZE) - addr;
4476 if (l > len)
4477 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004478 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004479 if (is_write)
4480 cpu_physical_memory_write_rom(phys_addr, buf, l);
4481 else
aliguori5e2972f2009-03-28 17:51:36 +00004482 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004483 len -= l;
4484 buf += l;
4485 addr += l;
4486 }
4487 return 0;
4488}
Paul Brooka68fe892010-03-01 00:08:59 +00004489#endif
bellard13eb76e2004-01-24 15:23:36 +00004490
pbrook2e70f6e2008-06-29 01:03:05 +00004491/* in deterministic execution mode, instructions doing device I/Os
4492 must be at the end of the TB */
4493void cpu_io_recompile(CPUState *env, void *retaddr)
4494{
4495 TranslationBlock *tb;
4496 uint32_t n, cflags;
4497 target_ulong pc, cs_base;
4498 uint64_t flags;
4499
4500 tb = tb_find_pc((unsigned long)retaddr);
4501 if (!tb) {
4502 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4503 retaddr);
4504 }
4505 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004506 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004507 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004508 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004509 n = n - env->icount_decr.u16.low;
4510 /* Generate a new TB ending on the I/O insn. */
4511 n++;
4512 /* On MIPS and SH, delay slot instructions can only be restarted if
4513 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004514 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004515 branch. */
4516#if defined(TARGET_MIPS)
4517 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4518 env->active_tc.PC -= 4;
4519 env->icount_decr.u16.low++;
4520 env->hflags &= ~MIPS_HFLAG_BMASK;
4521 }
4522#elif defined(TARGET_SH4)
4523 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4524 && n > 1) {
4525 env->pc -= 2;
4526 env->icount_decr.u16.low++;
4527 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4528 }
4529#endif
4530 /* This should never happen. */
4531 if (n > CF_COUNT_MASK)
4532 cpu_abort(env, "TB too big during recompile");
4533
4534 cflags = n | CF_LAST_IO;
4535 pc = tb->pc;
4536 cs_base = tb->cs_base;
4537 flags = tb->flags;
4538 tb_phys_invalidate(tb, -1);
4539 /* FIXME: In theory this could raise an exception. In practice
4540 we have already translated the block once so it's probably ok. */
4541 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004542 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004543 the first in the TB) then we end up generating a whole new TB and
4544 repeating the fault, which is horribly inefficient.
4545 Better would be to execute just this insn uncached, or generate a
4546 second new TB. */
4547 cpu_resume_from_signal(env, NULL);
4548}
4549
Paul Brookb3755a92010-03-12 16:54:58 +00004550#if !defined(CONFIG_USER_ONLY)
4551
Stefan Weil055403b2010-10-22 23:03:32 +02004552void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004553{
4554 int i, target_code_size, max_target_code_size;
4555 int direct_jmp_count, direct_jmp2_count, cross_page;
4556 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004557
bellarde3db7222005-01-26 22:00:47 +00004558 target_code_size = 0;
4559 max_target_code_size = 0;
4560 cross_page = 0;
4561 direct_jmp_count = 0;
4562 direct_jmp2_count = 0;
4563 for(i = 0; i < nb_tbs; i++) {
4564 tb = &tbs[i];
4565 target_code_size += tb->size;
4566 if (tb->size > max_target_code_size)
4567 max_target_code_size = tb->size;
4568 if (tb->page_addr[1] != -1)
4569 cross_page++;
4570 if (tb->tb_next_offset[0] != 0xffff) {
4571 direct_jmp_count++;
4572 if (tb->tb_next_offset[1] != 0xffff) {
4573 direct_jmp2_count++;
4574 }
4575 }
4576 }
4577 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004578 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004579 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004580 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4581 cpu_fprintf(f, "TB count %d/%d\n",
4582 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004583 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004584 nb_tbs ? target_code_size / nb_tbs : 0,
4585 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004586 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004587 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4588 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004589 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4590 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004591 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4592 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004593 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004594 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4595 direct_jmp2_count,
4596 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004597 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004598 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4599 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4600 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004601 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004602}
4603
Avi Kivityd39e8222012-01-01 23:35:10 +02004604/* NOTE: this function can trigger an exception */
4605/* NOTE2: the returned address is not exactly the physical address: it
4606 is the offset relative to phys_ram_base */
4607tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4608{
4609 int mmu_idx, page_index, pd;
4610 void *p;
4611
4612 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4613 mmu_idx = cpu_mmu_index(env1);
4614 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4615 (addr & TARGET_PAGE_MASK))) {
4616 ldub_code(addr);
4617 }
4618 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004619 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
Avi Kivity75c578d2012-01-02 15:40:52 +02004620 && !is_romd(pd)) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004621#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4622 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4623#else
4624 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4625#endif
4626 }
4627 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4628 return qemu_ram_addr_from_host_nofail(p);
4629}
4630
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004631/*
4632 * A helper function for the _utterly broken_ virtio device model to find out if
4633 * it's running on a big endian machine. Don't do this at home kids!
4634 */
4635bool virtio_is_big_endian(void);
4636bool virtio_is_big_endian(void)
4637{
4638#if defined(TARGET_WORDS_BIGENDIAN)
4639 return true;
4640#else
4641 return false;
4642#endif
4643}
4644
bellard61382a52003-10-27 21:22:23 +00004645#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004646#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004647#define GETPC() NULL
4648#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004649#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004650
4651#define SHIFT 0
4652#include "softmmu_template.h"
4653
4654#define SHIFT 1
4655#include "softmmu_template.h"
4656
4657#define SHIFT 2
4658#include "softmmu_template.h"
4659
4660#define SHIFT 3
4661#include "softmmu_template.h"
4662
4663#undef env
4664
4665#endif