blob: 5d0afdbdebb720b47688ab4f3b96d9023429a13f [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Avi Kivity3eef53d2012-02-10 14:57:31 +0200163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000189typedef struct PhysPageDesc {
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset;
192 ram_addr_t region_offset;
193} PhysPageDesc;
194
Avi Kivity4346ae32012-02-10 17:00:01 +0200195typedef struct PhysPageEntry PhysPageEntry;
196
Avi Kivity5312bd82012-02-12 18:32:55 +0200197static MemoryRegionSection *phys_sections;
198static unsigned phys_sections_nb, phys_sections_nb_alloc;
199static uint16_t phys_section_unassigned;
200
Avi Kivity4346ae32012-02-10 17:00:01 +0200201struct PhysPageEntry {
202 union {
Avi Kivity5312bd82012-02-12 18:32:55 +0200203 uint16_t leaf; /* index into phys_sections */
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200204 uint16_t node; /* index into phys_map_nodes */
Avi Kivity4346ae32012-02-10 17:00:01 +0200205 } u;
206};
207
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208/* Simple allocator for PhysPageEntry nodes */
209static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
210static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
211
212#define PHYS_MAP_NODE_NIL ((uint16_t)~0)
213
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800214/* This is a multi-level map on the physical address space.
215 The bottom level has pointers to PhysPageDesc. */
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200216static PhysPageEntry phys_map = { .u.node = PHYS_MAP_NODE_NIL };
Paul Brook6d9a1302010-02-28 23:55:53 +0000217
pbrooke2eef172008-06-08 01:09:01 +0000218static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300219static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000220
bellard33417e72003-08-10 21:47:01 +0000221/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200222MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000223static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200224static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000225#endif
bellard33417e72003-08-10 21:47:01 +0000226
bellard34865132003-10-05 14:28:56 +0000227/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200228#ifdef WIN32
229static const char *logfilename = "qemu.log";
230#else
blueswir1d9b630f2008-10-05 09:57:08 +0000231static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200232#endif
bellard34865132003-10-05 14:28:56 +0000233FILE *logfile;
234int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000235static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000236
bellarde3db7222005-01-26 22:00:47 +0000237/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000238#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000239static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000240#endif
bellarde3db7222005-01-26 22:00:47 +0000241static int tb_flush_count;
242static int tb_phys_invalidate_count;
243
bellard7cb69ca2008-05-10 10:55:51 +0000244#ifdef _WIN32
245static void map_exec(void *addr, long size)
246{
247 DWORD old_protect;
248 VirtualProtect(addr, size,
249 PAGE_EXECUTE_READWRITE, &old_protect);
250
251}
252#else
253static void map_exec(void *addr, long size)
254{
bellard43694152008-05-29 09:35:57 +0000255 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000256
bellard43694152008-05-29 09:35:57 +0000257 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000258 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000259 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000260
261 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000262 end += page_size - 1;
263 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000264
265 mprotect((void *)start, end - start,
266 PROT_READ | PROT_WRITE | PROT_EXEC);
267}
268#endif
269
bellardb346ff42003-06-15 20:05:50 +0000270static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000271{
bellard83fb7ad2004-07-05 21:25:26 +0000272 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000273 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000274#ifdef _WIN32
275 {
276 SYSTEM_INFO system_info;
277
278 GetSystemInfo(&system_info);
279 qemu_real_host_page_size = system_info.dwPageSize;
280 }
281#else
282 qemu_real_host_page_size = getpagesize();
283#endif
bellard83fb7ad2004-07-05 21:25:26 +0000284 if (qemu_host_page_size == 0)
285 qemu_host_page_size = qemu_real_host_page_size;
286 if (qemu_host_page_size < TARGET_PAGE_SIZE)
287 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000288 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000289
Paul Brook2e9a5712010-05-05 16:32:59 +0100290#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000291 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100292#ifdef HAVE_KINFO_GETVMMAP
293 struct kinfo_vmentry *freep;
294 int i, cnt;
295
296 freep = kinfo_getvmmap(getpid(), &cnt);
297 if (freep) {
298 mmap_lock();
299 for (i = 0; i < cnt; i++) {
300 unsigned long startaddr, endaddr;
301
302 startaddr = freep[i].kve_start;
303 endaddr = freep[i].kve_end;
304 if (h2g_valid(startaddr)) {
305 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
306
307 if (h2g_valid(endaddr)) {
308 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200309 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100310 } else {
311#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
312 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200313 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100314#endif
315 }
316 }
317 }
318 free(freep);
319 mmap_unlock();
320 }
321#else
balrog50a95692007-12-12 01:16:23 +0000322 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000323
pbrook07765902008-05-31 16:33:53 +0000324 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800325
Aurelien Jarnofd436902010-04-10 17:20:36 +0200326 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000327 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800328 mmap_lock();
329
balrog50a95692007-12-12 01:16:23 +0000330 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800331 unsigned long startaddr, endaddr;
332 int n;
333
334 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
335
336 if (n == 2 && h2g_valid(startaddr)) {
337 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
338
339 if (h2g_valid(endaddr)) {
340 endaddr = h2g(endaddr);
341 } else {
342 endaddr = ~0ul;
343 }
344 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000345 }
346 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800347
balrog50a95692007-12-12 01:16:23 +0000348 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800349 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000350 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100351#endif
balrog50a95692007-12-12 01:16:23 +0000352 }
353#endif
bellard54936002003-05-13 00:25:15 +0000354}
355
Paul Brook41c1b1c2010-03-12 16:54:58 +0000356static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000357{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000358 PageDesc *pd;
359 void **lp;
360 int i;
361
pbrook17e23772008-06-09 13:47:45 +0000362#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500363 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800364# define ALLOC(P, SIZE) \
365 do { \
366 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
367 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800368 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000369#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800370# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500371 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000372#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800373
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800374 /* Level 1. Always allocated. */
375 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
376
377 /* Level 2..N-1. */
378 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
379 void **p = *lp;
380
381 if (p == NULL) {
382 if (!alloc) {
383 return NULL;
384 }
385 ALLOC(p, sizeof(void *) * L2_SIZE);
386 *lp = p;
387 }
388
389 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000390 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800391
392 pd = *lp;
393 if (pd == NULL) {
394 if (!alloc) {
395 return NULL;
396 }
397 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
398 *lp = pd;
399 }
400
401#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800402
403 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000404}
405
Paul Brook41c1b1c2010-03-12 16:54:58 +0000406static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000407{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800408 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000409}
410
Paul Brook6d9a1302010-02-28 23:55:53 +0000411#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200412
413static PhysPageEntry *phys_map_node_alloc(uint16_t *ptr)
414{
415 unsigned i;
416 uint16_t ret;
417
418 /* Assign early to avoid the pointer being invalidated by g_renew() */
419 *ptr = ret = phys_map_nodes_nb++;
420 assert(ret != PHYS_MAP_NODE_NIL);
421 if (ret == phys_map_nodes_nb_alloc) {
422 typedef PhysPageEntry Node[L2_SIZE];
423 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
424 phys_map_nodes = g_renew(Node, phys_map_nodes,
425 phys_map_nodes_nb_alloc);
426 }
427 for (i = 0; i < L2_SIZE; ++i) {
428 phys_map_nodes[ret][i].u.node = PHYS_MAP_NODE_NIL;
429 }
430 return phys_map_nodes[ret];
431}
432
433static void phys_map_nodes_reset(void)
434{
435 phys_map_nodes_nb = 0;
436}
437
Avi Kivity5312bd82012-02-12 18:32:55 +0200438static uint16_t *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000439{
Avi Kivity4346ae32012-02-10 17:00:01 +0200440 PhysPageEntry *lp, *p;
441 int i, j;
bellard92e873b2004-05-21 14:52:29 +0000442
Avi Kivity3eef53d2012-02-10 14:57:31 +0200443 lp = &phys_map;
bellard108c49b2005-07-24 12:55:09 +0000444
Avi Kivity4346ae32012-02-10 17:00:01 +0200445 /* Level 1..N. */
446 for (i = P_L2_LEVELS - 1; i >= 0; i--) {
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200447 if (lp->u.node == PHYS_MAP_NODE_NIL) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800448 if (!alloc) {
449 return NULL;
450 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200451 p = phys_map_node_alloc(&lp->u.node);
Avi Kivity4346ae32012-02-10 17:00:01 +0200452 if (i == 0) {
Avi Kivity4346ae32012-02-10 17:00:01 +0200453 for (j = 0; j < L2_SIZE; j++) {
Avi Kivity5312bd82012-02-12 18:32:55 +0200454 p[j].u.leaf = phys_section_unassigned;
Avi Kivity4346ae32012-02-10 17:00:01 +0200455 }
456 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200457 } else {
458 p = phys_map_nodes[lp->u.node];
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800459 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200460 lp = &p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
bellard108c49b2005-07-24 12:55:09 +0000461 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800462
Avi Kivity4346ae32012-02-10 17:00:01 +0200463 return &lp->u.leaf;
bellard92e873b2004-05-21 14:52:29 +0000464}
465
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200466static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000467{
Avi Kivity5312bd82012-02-12 18:32:55 +0200468 uint16_t *p = phys_page_find_alloc(index, 0);
469 uint16_t s_index = phys_section_unassigned;
470 MemoryRegionSection *section;
471 PhysPageDesc pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200472
473 if (p) {
Avi Kivity5312bd82012-02-12 18:32:55 +0200474 s_index = *p;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200475 }
Avi Kivity5312bd82012-02-12 18:32:55 +0200476 section = &phys_sections[s_index];
477 index <<= TARGET_PAGE_BITS;
478 assert(section->offset_within_address_space <= index
479 && index <= section->offset_within_address_space + section->size-1);
480 pd.phys_offset = section->mr->ram_addr;
481 pd.region_offset = (index - section->offset_within_address_space)
482 + section->offset_within_region;
483 if (memory_region_is_ram(section->mr)) {
484 pd.phys_offset += pd.region_offset;
485 pd.region_offset = 0;
486 } else if (section->mr->rom_device) {
487 pd.phys_offset += pd.region_offset;
488 }
489 if (section->readonly) {
490 pd.phys_offset |= io_mem_rom.ram_addr;
491 }
492 return pd;
bellard92e873b2004-05-21 14:52:29 +0000493}
494
Anthony Liguoric227f092009-10-01 16:12:16 -0500495static void tlb_protect_code(ram_addr_t ram_addr);
496static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000497 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000498#define mmap_lock() do { } while(0)
499#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000500#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000501
bellard43694152008-05-29 09:35:57 +0000502#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
503
504#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100505/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000506 user mode. It will change when a dedicated libc will be used */
507#define USE_STATIC_CODE_GEN_BUFFER
508#endif
509
510#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200511static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
512 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000513#endif
514
blueswir18fcd3692008-08-17 20:26:25 +0000515static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000516{
bellard43694152008-05-29 09:35:57 +0000517#ifdef USE_STATIC_CODE_GEN_BUFFER
518 code_gen_buffer = static_code_gen_buffer;
519 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
520 map_exec(code_gen_buffer, code_gen_buffer_size);
521#else
bellard26a5f132008-05-28 12:30:31 +0000522 code_gen_buffer_size = tb_size;
523 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000524#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000525 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
526#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100527 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000528 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000529#endif
bellard26a5f132008-05-28 12:30:31 +0000530 }
531 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
532 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
533 /* The code gen buffer location may have constraints depending on
534 the host cpu and OS */
535#if defined(__linux__)
536 {
537 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000538 void *start = NULL;
539
bellard26a5f132008-05-28 12:30:31 +0000540 flags = MAP_PRIVATE | MAP_ANONYMOUS;
541#if defined(__x86_64__)
542 flags |= MAP_32BIT;
543 /* Cannot map more than that */
544 if (code_gen_buffer_size > (800 * 1024 * 1024))
545 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000546#elif defined(__sparc_v9__)
547 // Map the buffer below 2G, so we can use direct calls and branches
548 flags |= MAP_FIXED;
549 start = (void *) 0x60000000UL;
550 if (code_gen_buffer_size > (512 * 1024 * 1024))
551 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000552#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100553 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000554 if (code_gen_buffer_size > 16 * 1024 * 1024)
555 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700556#elif defined(__s390x__)
557 /* Map the buffer so that we can use direct calls and branches. */
558 /* We have a +- 4GB range on the branches; leave some slop. */
559 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
560 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
561 }
562 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000563#endif
blueswir1141ac462008-07-26 15:05:57 +0000564 code_gen_buffer = mmap(start, code_gen_buffer_size,
565 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000566 flags, -1, 0);
567 if (code_gen_buffer == MAP_FAILED) {
568 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
569 exit(1);
570 }
571 }
Bradcbb608a2010-12-20 21:25:40 -0500572#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000573 || defined(__DragonFly__) || defined(__OpenBSD__) \
574 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000575 {
576 int flags;
577 void *addr = NULL;
578 flags = MAP_PRIVATE | MAP_ANONYMOUS;
579#if defined(__x86_64__)
580 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
581 * 0x40000000 is free */
582 flags |= MAP_FIXED;
583 addr = (void *)0x40000000;
584 /* Cannot map more than that */
585 if (code_gen_buffer_size > (800 * 1024 * 1024))
586 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000587#elif defined(__sparc_v9__)
588 // Map the buffer below 2G, so we can use direct calls and branches
589 flags |= MAP_FIXED;
590 addr = (void *) 0x60000000UL;
591 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
592 code_gen_buffer_size = (512 * 1024 * 1024);
593 }
aliguori06e67a82008-09-27 15:32:41 +0000594#endif
595 code_gen_buffer = mmap(addr, code_gen_buffer_size,
596 PROT_WRITE | PROT_READ | PROT_EXEC,
597 flags, -1, 0);
598 if (code_gen_buffer == MAP_FAILED) {
599 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
600 exit(1);
601 }
602 }
bellard26a5f132008-05-28 12:30:31 +0000603#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500604 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000605 map_exec(code_gen_buffer, code_gen_buffer_size);
606#endif
bellard43694152008-05-29 09:35:57 +0000607#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000608 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100609 code_gen_buffer_max_size = code_gen_buffer_size -
610 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000611 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500612 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000613}
614
615/* Must be called before using the QEMU cpus. 'tb_size' is the size
616 (in bytes) allocated to the translation buffer. Zero means default
617 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200618void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000619{
bellard26a5f132008-05-28 12:30:31 +0000620 cpu_gen_init();
621 code_gen_alloc(tb_size);
622 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000623 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700624#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
625 /* There's no guest base to take into account, so go ahead and
626 initialize the prologue now. */
627 tcg_prologue_init(&tcg_ctx);
628#endif
bellard26a5f132008-05-28 12:30:31 +0000629}
630
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200631bool tcg_enabled(void)
632{
633 return code_gen_buffer != NULL;
634}
635
636void cpu_exec_init_all(void)
637{
638#if !defined(CONFIG_USER_ONLY)
639 memory_map_init();
640 io_mem_init();
641#endif
642}
643
pbrook9656f322008-07-01 20:01:19 +0000644#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
645
Juan Quintelae59fb372009-09-29 22:48:21 +0200646static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200647{
648 CPUState *env = opaque;
649
aurel323098dba2009-03-07 21:28:24 +0000650 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
651 version_id is increased. */
652 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000653 tlb_flush(env, 1);
654
655 return 0;
656}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200657
658static const VMStateDescription vmstate_cpu_common = {
659 .name = "cpu_common",
660 .version_id = 1,
661 .minimum_version_id = 1,
662 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200663 .post_load = cpu_common_post_load,
664 .fields = (VMStateField []) {
665 VMSTATE_UINT32(halted, CPUState),
666 VMSTATE_UINT32(interrupt_request, CPUState),
667 VMSTATE_END_OF_LIST()
668 }
669};
pbrook9656f322008-07-01 20:01:19 +0000670#endif
671
Glauber Costa950f1472009-06-09 12:15:18 -0400672CPUState *qemu_get_cpu(int cpu)
673{
674 CPUState *env = first_cpu;
675
676 while (env) {
677 if (env->cpu_index == cpu)
678 break;
679 env = env->next_cpu;
680 }
681
682 return env;
683}
684
bellard6a00d602005-11-21 23:25:50 +0000685void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000686{
bellard6a00d602005-11-21 23:25:50 +0000687 CPUState **penv;
688 int cpu_index;
689
pbrookc2764712009-03-07 15:24:59 +0000690#if defined(CONFIG_USER_ONLY)
691 cpu_list_lock();
692#endif
bellard6a00d602005-11-21 23:25:50 +0000693 env->next_cpu = NULL;
694 penv = &first_cpu;
695 cpu_index = 0;
696 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700697 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000698 cpu_index++;
699 }
700 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000701 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000702 QTAILQ_INIT(&env->breakpoints);
703 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100704#ifndef CONFIG_USER_ONLY
705 env->thread_id = qemu_get_thread_id();
706#endif
bellard6a00d602005-11-21 23:25:50 +0000707 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000708#if defined(CONFIG_USER_ONLY)
709 cpu_list_unlock();
710#endif
pbrookb3c77242008-06-30 16:31:04 +0000711#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600712 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
713 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000714 cpu_save, cpu_load, env);
715#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000716}
717
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100718/* Allocate a new translation block. Flush the translation buffer if
719 too many translation blocks or too much generated code. */
720static TranslationBlock *tb_alloc(target_ulong pc)
721{
722 TranslationBlock *tb;
723
724 if (nb_tbs >= code_gen_max_blocks ||
725 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
726 return NULL;
727 tb = &tbs[nb_tbs++];
728 tb->pc = pc;
729 tb->cflags = 0;
730 return tb;
731}
732
733void tb_free(TranslationBlock *tb)
734{
735 /* In practice this is mostly used for single use temporary TB
736 Ignore the hard cases and just back up if this TB happens to
737 be the last one generated. */
738 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
739 code_gen_ptr = tb->tc_ptr;
740 nb_tbs--;
741 }
742}
743
bellard9fa3e852004-01-04 18:06:42 +0000744static inline void invalidate_page_bitmap(PageDesc *p)
745{
746 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500747 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000748 p->code_bitmap = NULL;
749 }
750 p->code_write_count = 0;
751}
752
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800753/* Set to NULL all the 'first_tb' fields in all PageDescs. */
754
755static void page_flush_tb_1 (int level, void **lp)
756{
757 int i;
758
759 if (*lp == NULL) {
760 return;
761 }
762 if (level == 0) {
763 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000764 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800765 pd[i].first_tb = NULL;
766 invalidate_page_bitmap(pd + i);
767 }
768 } else {
769 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000770 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800771 page_flush_tb_1 (level - 1, pp + i);
772 }
773 }
774}
775
bellardfd6ce8f2003-05-14 19:00:11 +0000776static void page_flush_tb(void)
777{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800778 int i;
779 for (i = 0; i < V_L1_SIZE; i++) {
780 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000781 }
782}
783
784/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000785/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000786void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000787{
bellard6a00d602005-11-21 23:25:50 +0000788 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000789#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000790 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
791 (unsigned long)(code_gen_ptr - code_gen_buffer),
792 nb_tbs, nb_tbs > 0 ?
793 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000794#endif
bellard26a5f132008-05-28 12:30:31 +0000795 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000796 cpu_abort(env1, "Internal error: code buffer overflow\n");
797
bellardfd6ce8f2003-05-14 19:00:11 +0000798 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000799
bellard6a00d602005-11-21 23:25:50 +0000800 for(env = first_cpu; env != NULL; env = env->next_cpu) {
801 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
802 }
bellard9fa3e852004-01-04 18:06:42 +0000803
bellard8a8a6082004-10-03 13:36:49 +0000804 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000805 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000806
bellardfd6ce8f2003-05-14 19:00:11 +0000807 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000808 /* XXX: flush processor icache at this point if cache flush is
809 expensive */
bellarde3db7222005-01-26 22:00:47 +0000810 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000811}
812
813#ifdef DEBUG_TB_CHECK
814
j_mayerbc98a7e2007-04-04 07:55:12 +0000815static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000816{
817 TranslationBlock *tb;
818 int i;
819 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000820 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
821 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000822 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
823 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000824 printf("ERROR invalidate: address=" TARGET_FMT_lx
825 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000826 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000827 }
828 }
829 }
830}
831
832/* verify that all the pages have correct rights for code */
833static void tb_page_check(void)
834{
835 TranslationBlock *tb;
836 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000837
pbrook99773bd2006-04-16 15:14:59 +0000838 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
839 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000840 flags1 = page_get_flags(tb->pc);
841 flags2 = page_get_flags(tb->pc + tb->size - 1);
842 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
843 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000844 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000845 }
846 }
847 }
848}
849
850#endif
851
852/* invalidate one TB */
853static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
854 int next_offset)
855{
856 TranslationBlock *tb1;
857 for(;;) {
858 tb1 = *ptb;
859 if (tb1 == tb) {
860 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
861 break;
862 }
863 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
864 }
865}
866
bellard9fa3e852004-01-04 18:06:42 +0000867static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
868{
869 TranslationBlock *tb1;
870 unsigned int n1;
871
872 for(;;) {
873 tb1 = *ptb;
874 n1 = (long)tb1 & 3;
875 tb1 = (TranslationBlock *)((long)tb1 & ~3);
876 if (tb1 == tb) {
877 *ptb = tb1->page_next[n1];
878 break;
879 }
880 ptb = &tb1->page_next[n1];
881 }
882}
883
bellardd4e81642003-05-25 16:46:15 +0000884static inline void tb_jmp_remove(TranslationBlock *tb, int n)
885{
886 TranslationBlock *tb1, **ptb;
887 unsigned int n1;
888
889 ptb = &tb->jmp_next[n];
890 tb1 = *ptb;
891 if (tb1) {
892 /* find tb(n) in circular list */
893 for(;;) {
894 tb1 = *ptb;
895 n1 = (long)tb1 & 3;
896 tb1 = (TranslationBlock *)((long)tb1 & ~3);
897 if (n1 == n && tb1 == tb)
898 break;
899 if (n1 == 2) {
900 ptb = &tb1->jmp_first;
901 } else {
902 ptb = &tb1->jmp_next[n1];
903 }
904 }
905 /* now we can suppress tb(n) from the list */
906 *ptb = tb->jmp_next[n];
907
908 tb->jmp_next[n] = NULL;
909 }
910}
911
912/* reset the jump entry 'n' of a TB so that it is not chained to
913 another TB */
914static inline void tb_reset_jump(TranslationBlock *tb, int n)
915{
916 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
917}
918
Paul Brook41c1b1c2010-03-12 16:54:58 +0000919void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000920{
bellard6a00d602005-11-21 23:25:50 +0000921 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000922 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000923 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000924 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000925 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000926
bellard9fa3e852004-01-04 18:06:42 +0000927 /* remove the TB from the hash list */
928 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
929 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000930 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000931 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000932
bellard9fa3e852004-01-04 18:06:42 +0000933 /* remove the TB from the page list */
934 if (tb->page_addr[0] != page_addr) {
935 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
936 tb_page_remove(&p->first_tb, tb);
937 invalidate_page_bitmap(p);
938 }
939 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
940 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
941 tb_page_remove(&p->first_tb, tb);
942 invalidate_page_bitmap(p);
943 }
944
bellard8a40a182005-11-20 10:35:40 +0000945 tb_invalidated_flag = 1;
946
947 /* remove the TB from the hash list */
948 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000949 for(env = first_cpu; env != NULL; env = env->next_cpu) {
950 if (env->tb_jmp_cache[h] == tb)
951 env->tb_jmp_cache[h] = NULL;
952 }
bellard8a40a182005-11-20 10:35:40 +0000953
954 /* suppress this TB from the two jump lists */
955 tb_jmp_remove(tb, 0);
956 tb_jmp_remove(tb, 1);
957
958 /* suppress any remaining jumps to this TB */
959 tb1 = tb->jmp_first;
960 for(;;) {
961 n1 = (long)tb1 & 3;
962 if (n1 == 2)
963 break;
964 tb1 = (TranslationBlock *)((long)tb1 & ~3);
965 tb2 = tb1->jmp_next[n1];
966 tb_reset_jump(tb1, n1);
967 tb1->jmp_next[n1] = NULL;
968 tb1 = tb2;
969 }
970 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
971
bellarde3db7222005-01-26 22:00:47 +0000972 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000973}
974
975static inline void set_bits(uint8_t *tab, int start, int len)
976{
977 int end, mask, end1;
978
979 end = start + len;
980 tab += start >> 3;
981 mask = 0xff << (start & 7);
982 if ((start & ~7) == (end & ~7)) {
983 if (start < end) {
984 mask &= ~(0xff << (end & 7));
985 *tab |= mask;
986 }
987 } else {
988 *tab++ |= mask;
989 start = (start + 8) & ~7;
990 end1 = end & ~7;
991 while (start < end1) {
992 *tab++ = 0xff;
993 start += 8;
994 }
995 if (start < end) {
996 mask = ~(0xff << (end & 7));
997 *tab |= mask;
998 }
999 }
1000}
1001
1002static void build_page_bitmap(PageDesc *p)
1003{
1004 int n, tb_start, tb_end;
1005 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001006
Anthony Liguori7267c092011-08-20 22:09:37 -05001007 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001008
1009 tb = p->first_tb;
1010 while (tb != NULL) {
1011 n = (long)tb & 3;
1012 tb = (TranslationBlock *)((long)tb & ~3);
1013 /* NOTE: this is subtle as a TB may span two physical pages */
1014 if (n == 0) {
1015 /* NOTE: tb_end may be after the end of the page, but
1016 it is not a problem */
1017 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1018 tb_end = tb_start + tb->size;
1019 if (tb_end > TARGET_PAGE_SIZE)
1020 tb_end = TARGET_PAGE_SIZE;
1021 } else {
1022 tb_start = 0;
1023 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1024 }
1025 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1026 tb = tb->page_next[n];
1027 }
1028}
1029
pbrook2e70f6e2008-06-29 01:03:05 +00001030TranslationBlock *tb_gen_code(CPUState *env,
1031 target_ulong pc, target_ulong cs_base,
1032 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001033{
1034 TranslationBlock *tb;
1035 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001036 tb_page_addr_t phys_pc, phys_page2;
1037 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001038 int code_gen_size;
1039
Paul Brook41c1b1c2010-03-12 16:54:58 +00001040 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001041 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001042 if (!tb) {
1043 /* flush must be done */
1044 tb_flush(env);
1045 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001046 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001047 /* Don't forget to invalidate previous TB info. */
1048 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001049 }
1050 tc_ptr = code_gen_ptr;
1051 tb->tc_ptr = tc_ptr;
1052 tb->cs_base = cs_base;
1053 tb->flags = flags;
1054 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001055 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001056 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001057
bellardd720b932004-04-25 17:57:43 +00001058 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001059 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001060 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001061 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001062 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001063 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001064 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001065 return tb;
bellardd720b932004-04-25 17:57:43 +00001066}
ths3b46e622007-09-17 08:09:54 +00001067
bellard9fa3e852004-01-04 18:06:42 +00001068/* invalidate all TBs which intersect with the target physical page
1069 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001070 the same physical page. 'is_cpu_write_access' should be true if called
1071 from a real cpu write access: the virtual CPU will exit the current
1072 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001073void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001074 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001075{
aliguori6b917542008-11-18 19:46:41 +00001076 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001077 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001078 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001079 PageDesc *p;
1080 int n;
1081#ifdef TARGET_HAS_PRECISE_SMC
1082 int current_tb_not_found = is_cpu_write_access;
1083 TranslationBlock *current_tb = NULL;
1084 int current_tb_modified = 0;
1085 target_ulong current_pc = 0;
1086 target_ulong current_cs_base = 0;
1087 int current_flags = 0;
1088#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001089
1090 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001091 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001092 return;
ths5fafdf22007-09-16 21:08:06 +00001093 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001094 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1095 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001096 /* build code bitmap */
1097 build_page_bitmap(p);
1098 }
1099
1100 /* we remove all the TBs in the range [start, end[ */
1101 /* XXX: see if in some cases it could be faster to invalidate all the code */
1102 tb = p->first_tb;
1103 while (tb != NULL) {
1104 n = (long)tb & 3;
1105 tb = (TranslationBlock *)((long)tb & ~3);
1106 tb_next = tb->page_next[n];
1107 /* NOTE: this is subtle as a TB may span two physical pages */
1108 if (n == 0) {
1109 /* NOTE: tb_end may be after the end of the page, but
1110 it is not a problem */
1111 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1112 tb_end = tb_start + tb->size;
1113 } else {
1114 tb_start = tb->page_addr[1];
1115 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1116 }
1117 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001118#ifdef TARGET_HAS_PRECISE_SMC
1119 if (current_tb_not_found) {
1120 current_tb_not_found = 0;
1121 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001122 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001123 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001124 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001125 }
1126 }
1127 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001128 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001129 /* If we are modifying the current TB, we must stop
1130 its execution. We could be more precise by checking
1131 that the modification is after the current PC, but it
1132 would require a specialized function to partially
1133 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001134
bellardd720b932004-04-25 17:57:43 +00001135 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001136 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001137 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1138 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001139 }
1140#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001141 /* we need to do that to handle the case where a signal
1142 occurs while doing tb_phys_invalidate() */
1143 saved_tb = NULL;
1144 if (env) {
1145 saved_tb = env->current_tb;
1146 env->current_tb = NULL;
1147 }
bellard9fa3e852004-01-04 18:06:42 +00001148 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001149 if (env) {
1150 env->current_tb = saved_tb;
1151 if (env->interrupt_request && env->current_tb)
1152 cpu_interrupt(env, env->interrupt_request);
1153 }
bellard9fa3e852004-01-04 18:06:42 +00001154 }
1155 tb = tb_next;
1156 }
1157#if !defined(CONFIG_USER_ONLY)
1158 /* if no code remaining, no need to continue to use slow writes */
1159 if (!p->first_tb) {
1160 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001161 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001162 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001163 }
1164 }
1165#endif
1166#ifdef TARGET_HAS_PRECISE_SMC
1167 if (current_tb_modified) {
1168 /* we generate a block containing just the instruction
1169 modifying the memory. It will ensure that it cannot modify
1170 itself */
bellardea1c1802004-06-14 18:56:36 +00001171 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001172 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001173 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001174 }
1175#endif
1176}
1177
1178/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001179static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001180{
1181 PageDesc *p;
1182 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001183#if 0
bellarda4193c82004-06-03 14:01:43 +00001184 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001185 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1186 cpu_single_env->mem_io_vaddr, len,
1187 cpu_single_env->eip,
1188 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001189 }
1190#endif
bellard9fa3e852004-01-04 18:06:42 +00001191 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001192 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001193 return;
1194 if (p->code_bitmap) {
1195 offset = start & ~TARGET_PAGE_MASK;
1196 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1197 if (b & ((1 << len) - 1))
1198 goto do_invalidate;
1199 } else {
1200 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001201 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001202 }
1203}
1204
bellard9fa3e852004-01-04 18:06:42 +00001205#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001206static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001207 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001208{
aliguori6b917542008-11-18 19:46:41 +00001209 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001210 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001211 int n;
bellardd720b932004-04-25 17:57:43 +00001212#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001213 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001214 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001215 int current_tb_modified = 0;
1216 target_ulong current_pc = 0;
1217 target_ulong current_cs_base = 0;
1218 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001219#endif
bellard9fa3e852004-01-04 18:06:42 +00001220
1221 addr &= TARGET_PAGE_MASK;
1222 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001223 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001224 return;
1225 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001226#ifdef TARGET_HAS_PRECISE_SMC
1227 if (tb && pc != 0) {
1228 current_tb = tb_find_pc(pc);
1229 }
1230#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001231 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001232 n = (long)tb & 3;
1233 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001234#ifdef TARGET_HAS_PRECISE_SMC
1235 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001236 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001237 /* If we are modifying the current TB, we must stop
1238 its execution. We could be more precise by checking
1239 that the modification is after the current PC, but it
1240 would require a specialized function to partially
1241 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001242
bellardd720b932004-04-25 17:57:43 +00001243 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001244 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001245 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1246 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001247 }
1248#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001249 tb_phys_invalidate(tb, addr);
1250 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001251 }
1252 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001253#ifdef TARGET_HAS_PRECISE_SMC
1254 if (current_tb_modified) {
1255 /* we generate a block containing just the instruction
1256 modifying the memory. It will ensure that it cannot modify
1257 itself */
bellardea1c1802004-06-14 18:56:36 +00001258 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001259 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001260 cpu_resume_from_signal(env, puc);
1261 }
1262#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001263}
bellard9fa3e852004-01-04 18:06:42 +00001264#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001265
1266/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001267static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001268 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001269{
1270 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001271#ifndef CONFIG_USER_ONLY
1272 bool page_already_protected;
1273#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001274
bellard9fa3e852004-01-04 18:06:42 +00001275 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001276 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001277 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001278#ifndef CONFIG_USER_ONLY
1279 page_already_protected = p->first_tb != NULL;
1280#endif
bellard9fa3e852004-01-04 18:06:42 +00001281 p->first_tb = (TranslationBlock *)((long)tb | n);
1282 invalidate_page_bitmap(p);
1283
bellard107db442004-06-22 18:48:46 +00001284#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001285
bellard9fa3e852004-01-04 18:06:42 +00001286#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001287 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001288 target_ulong addr;
1289 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001290 int prot;
1291
bellardfd6ce8f2003-05-14 19:00:11 +00001292 /* force the host page as non writable (writes will have a
1293 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001294 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001295 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001296 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1297 addr += TARGET_PAGE_SIZE) {
1298
1299 p2 = page_find (addr >> TARGET_PAGE_BITS);
1300 if (!p2)
1301 continue;
1302 prot |= p2->flags;
1303 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001304 }
ths5fafdf22007-09-16 21:08:06 +00001305 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001306 (prot & PAGE_BITS) & ~PAGE_WRITE);
1307#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001308 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001309 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001310#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001311 }
bellard9fa3e852004-01-04 18:06:42 +00001312#else
1313 /* if some code is already present, then the pages are already
1314 protected. So we handle the case where only the first TB is
1315 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001316 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001317 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001318 }
1319#endif
bellardd720b932004-04-25 17:57:43 +00001320
1321#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001322}
1323
bellard9fa3e852004-01-04 18:06:42 +00001324/* add a new TB and link it to the physical page tables. phys_page2 is
1325 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001326void tb_link_page(TranslationBlock *tb,
1327 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001328{
bellard9fa3e852004-01-04 18:06:42 +00001329 unsigned int h;
1330 TranslationBlock **ptb;
1331
pbrookc8a706f2008-06-02 16:16:42 +00001332 /* Grab the mmap lock to stop another thread invalidating this TB
1333 before we are done. */
1334 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001335 /* add in the physical hash table */
1336 h = tb_phys_hash_func(phys_pc);
1337 ptb = &tb_phys_hash[h];
1338 tb->phys_hash_next = *ptb;
1339 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001340
1341 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001342 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1343 if (phys_page2 != -1)
1344 tb_alloc_page(tb, 1, phys_page2);
1345 else
1346 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001347
bellardd4e81642003-05-25 16:46:15 +00001348 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1349 tb->jmp_next[0] = NULL;
1350 tb->jmp_next[1] = NULL;
1351
1352 /* init original jump addresses */
1353 if (tb->tb_next_offset[0] != 0xffff)
1354 tb_reset_jump(tb, 0);
1355 if (tb->tb_next_offset[1] != 0xffff)
1356 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001357
1358#ifdef DEBUG_TB_CHECK
1359 tb_page_check();
1360#endif
pbrookc8a706f2008-06-02 16:16:42 +00001361 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001362}
1363
bellarda513fe12003-05-27 23:29:48 +00001364/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1365 tb[1].tc_ptr. Return NULL if not found */
1366TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1367{
1368 int m_min, m_max, m;
1369 unsigned long v;
1370 TranslationBlock *tb;
1371
1372 if (nb_tbs <= 0)
1373 return NULL;
1374 if (tc_ptr < (unsigned long)code_gen_buffer ||
1375 tc_ptr >= (unsigned long)code_gen_ptr)
1376 return NULL;
1377 /* binary search (cf Knuth) */
1378 m_min = 0;
1379 m_max = nb_tbs - 1;
1380 while (m_min <= m_max) {
1381 m = (m_min + m_max) >> 1;
1382 tb = &tbs[m];
1383 v = (unsigned long)tb->tc_ptr;
1384 if (v == tc_ptr)
1385 return tb;
1386 else if (tc_ptr < v) {
1387 m_max = m - 1;
1388 } else {
1389 m_min = m + 1;
1390 }
ths5fafdf22007-09-16 21:08:06 +00001391 }
bellarda513fe12003-05-27 23:29:48 +00001392 return &tbs[m_max];
1393}
bellard75012672003-06-21 13:11:07 +00001394
bellardea041c02003-06-25 16:16:50 +00001395static void tb_reset_jump_recursive(TranslationBlock *tb);
1396
1397static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1398{
1399 TranslationBlock *tb1, *tb_next, **ptb;
1400 unsigned int n1;
1401
1402 tb1 = tb->jmp_next[n];
1403 if (tb1 != NULL) {
1404 /* find head of list */
1405 for(;;) {
1406 n1 = (long)tb1 & 3;
1407 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1408 if (n1 == 2)
1409 break;
1410 tb1 = tb1->jmp_next[n1];
1411 }
1412 /* we are now sure now that tb jumps to tb1 */
1413 tb_next = tb1;
1414
1415 /* remove tb from the jmp_first list */
1416 ptb = &tb_next->jmp_first;
1417 for(;;) {
1418 tb1 = *ptb;
1419 n1 = (long)tb1 & 3;
1420 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1421 if (n1 == n && tb1 == tb)
1422 break;
1423 ptb = &tb1->jmp_next[n1];
1424 }
1425 *ptb = tb->jmp_next[n];
1426 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001427
bellardea041c02003-06-25 16:16:50 +00001428 /* suppress the jump to next tb in generated code */
1429 tb_reset_jump(tb, n);
1430
bellard01243112004-01-04 15:48:17 +00001431 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001432 tb_reset_jump_recursive(tb_next);
1433 }
1434}
1435
1436static void tb_reset_jump_recursive(TranslationBlock *tb)
1437{
1438 tb_reset_jump_recursive2(tb, 0);
1439 tb_reset_jump_recursive2(tb, 1);
1440}
1441
bellard1fddef42005-04-17 19:16:13 +00001442#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001443#if defined(CONFIG_USER_ONLY)
1444static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1445{
1446 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1447}
1448#else
bellardd720b932004-04-25 17:57:43 +00001449static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1450{
Anthony Liguoric227f092009-10-01 16:12:16 -05001451 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001452 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001453 ram_addr_t ram_addr;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001454 PhysPageDesc p;
bellardd720b932004-04-25 17:57:43 +00001455
pbrookc2f07f82006-04-08 17:14:56 +00001456 addr = cpu_get_phys_page_debug(env, pc);
1457 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001458 pd = p.phys_offset;
pbrookc2f07f82006-04-08 17:14:56 +00001459 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001460 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001461}
bellardc27004e2005-01-03 23:35:10 +00001462#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001463#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001464
Paul Brookc527ee82010-03-01 03:31:14 +00001465#if defined(CONFIG_USER_ONLY)
1466void cpu_watchpoint_remove_all(CPUState *env, int mask)
1467
1468{
1469}
1470
1471int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1472 int flags, CPUWatchpoint **watchpoint)
1473{
1474 return -ENOSYS;
1475}
1476#else
pbrook6658ffb2007-03-16 23:58:11 +00001477/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001478int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1479 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001480{
aliguorib4051332008-11-18 20:14:20 +00001481 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001482 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001483
aliguorib4051332008-11-18 20:14:20 +00001484 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1485 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1486 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1487 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1488 return -EINVAL;
1489 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001490 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001491
aliguoria1d1bb32008-11-18 20:07:32 +00001492 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001493 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001494 wp->flags = flags;
1495
aliguori2dc9f412008-11-18 20:56:59 +00001496 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001497 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001498 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001499 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001500 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001501
pbrook6658ffb2007-03-16 23:58:11 +00001502 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001503
1504 if (watchpoint)
1505 *watchpoint = wp;
1506 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001507}
1508
aliguoria1d1bb32008-11-18 20:07:32 +00001509/* Remove a specific watchpoint. */
1510int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1511 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001512{
aliguorib4051332008-11-18 20:14:20 +00001513 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001514 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001515
Blue Swirl72cf2d42009-09-12 07:36:22 +00001516 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001517 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001518 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001519 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001520 return 0;
1521 }
1522 }
aliguoria1d1bb32008-11-18 20:07:32 +00001523 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001524}
1525
aliguoria1d1bb32008-11-18 20:07:32 +00001526/* Remove a specific watchpoint by reference. */
1527void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1528{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001529 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001530
aliguoria1d1bb32008-11-18 20:07:32 +00001531 tlb_flush_page(env, watchpoint->vaddr);
1532
Anthony Liguori7267c092011-08-20 22:09:37 -05001533 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001534}
1535
aliguoria1d1bb32008-11-18 20:07:32 +00001536/* Remove all matching watchpoints. */
1537void cpu_watchpoint_remove_all(CPUState *env, int mask)
1538{
aliguoric0ce9982008-11-25 22:13:57 +00001539 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001540
Blue Swirl72cf2d42009-09-12 07:36:22 +00001541 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001542 if (wp->flags & mask)
1543 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001544 }
aliguoria1d1bb32008-11-18 20:07:32 +00001545}
Paul Brookc527ee82010-03-01 03:31:14 +00001546#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001547
1548/* Add a breakpoint. */
1549int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1550 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001551{
bellard1fddef42005-04-17 19:16:13 +00001552#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001553 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001554
Anthony Liguori7267c092011-08-20 22:09:37 -05001555 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001556
1557 bp->pc = pc;
1558 bp->flags = flags;
1559
aliguori2dc9f412008-11-18 20:56:59 +00001560 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001561 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001562 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001563 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001564 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001565
1566 breakpoint_invalidate(env, pc);
1567
1568 if (breakpoint)
1569 *breakpoint = bp;
1570 return 0;
1571#else
1572 return -ENOSYS;
1573#endif
1574}
1575
1576/* Remove a specific breakpoint. */
1577int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1578{
1579#if defined(TARGET_HAS_ICE)
1580 CPUBreakpoint *bp;
1581
Blue Swirl72cf2d42009-09-12 07:36:22 +00001582 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001583 if (bp->pc == pc && bp->flags == flags) {
1584 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001585 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001586 }
bellard4c3a88a2003-07-26 12:06:08 +00001587 }
aliguoria1d1bb32008-11-18 20:07:32 +00001588 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001589#else
aliguoria1d1bb32008-11-18 20:07:32 +00001590 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001591#endif
1592}
1593
aliguoria1d1bb32008-11-18 20:07:32 +00001594/* Remove a specific breakpoint by reference. */
1595void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001596{
bellard1fddef42005-04-17 19:16:13 +00001597#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001598 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001599
aliguoria1d1bb32008-11-18 20:07:32 +00001600 breakpoint_invalidate(env, breakpoint->pc);
1601
Anthony Liguori7267c092011-08-20 22:09:37 -05001602 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001603#endif
1604}
1605
1606/* Remove all matching breakpoints. */
1607void cpu_breakpoint_remove_all(CPUState *env, int mask)
1608{
1609#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001610 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001611
Blue Swirl72cf2d42009-09-12 07:36:22 +00001612 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001613 if (bp->flags & mask)
1614 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001615 }
bellard4c3a88a2003-07-26 12:06:08 +00001616#endif
1617}
1618
bellardc33a3462003-07-29 20:50:33 +00001619/* enable or disable single step mode. EXCP_DEBUG is returned by the
1620 CPU loop after each instruction */
1621void cpu_single_step(CPUState *env, int enabled)
1622{
bellard1fddef42005-04-17 19:16:13 +00001623#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001624 if (env->singlestep_enabled != enabled) {
1625 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001626 if (kvm_enabled())
1627 kvm_update_guest_debug(env, 0);
1628 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001629 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001630 /* XXX: only flush what is necessary */
1631 tb_flush(env);
1632 }
bellardc33a3462003-07-29 20:50:33 +00001633 }
1634#endif
1635}
1636
bellard34865132003-10-05 14:28:56 +00001637/* enable or disable low levels log */
1638void cpu_set_log(int log_flags)
1639{
1640 loglevel = log_flags;
1641 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001642 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001643 if (!logfile) {
1644 perror(logfilename);
1645 _exit(1);
1646 }
bellard9fa3e852004-01-04 18:06:42 +00001647#if !defined(CONFIG_SOFTMMU)
1648 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1649 {
blueswir1b55266b2008-09-20 08:07:15 +00001650 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001651 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1652 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001653#elif defined(_WIN32)
1654 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1655 setvbuf(logfile, NULL, _IONBF, 0);
1656#else
bellard34865132003-10-05 14:28:56 +00001657 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001658#endif
pbrooke735b912007-06-30 13:53:24 +00001659 log_append = 1;
1660 }
1661 if (!loglevel && logfile) {
1662 fclose(logfile);
1663 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001664 }
1665}
1666
1667void cpu_set_log_filename(const char *filename)
1668{
1669 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001670 if (logfile) {
1671 fclose(logfile);
1672 logfile = NULL;
1673 }
1674 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001675}
bellardc33a3462003-07-29 20:50:33 +00001676
aurel323098dba2009-03-07 21:28:24 +00001677static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001678{
pbrookd5975362008-06-07 20:50:51 +00001679 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1680 problem and hope the cpu will stop of its own accord. For userspace
1681 emulation this often isn't actually as bad as it sounds. Often
1682 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001683 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001684 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001685
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001686 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001687 tb = env->current_tb;
1688 /* if the cpu is currently executing code, we must unlink it and
1689 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001690 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001691 env->current_tb = NULL;
1692 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001693 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001694 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001695}
1696
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001697#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001698/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001699static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001700{
1701 int old_mask;
1702
1703 old_mask = env->interrupt_request;
1704 env->interrupt_request |= mask;
1705
aliguori8edac962009-04-24 18:03:45 +00001706 /*
1707 * If called from iothread context, wake the target cpu in
1708 * case its halted.
1709 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001710 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001711 qemu_cpu_kick(env);
1712 return;
1713 }
aliguori8edac962009-04-24 18:03:45 +00001714
pbrook2e70f6e2008-06-29 01:03:05 +00001715 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001716 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001717 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001718 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001719 cpu_abort(env, "Raised interrupt while not in I/O function");
1720 }
pbrook2e70f6e2008-06-29 01:03:05 +00001721 } else {
aurel323098dba2009-03-07 21:28:24 +00001722 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001723 }
1724}
1725
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001726CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1727
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001728#else /* CONFIG_USER_ONLY */
1729
1730void cpu_interrupt(CPUState *env, int mask)
1731{
1732 env->interrupt_request |= mask;
1733 cpu_unlink_tb(env);
1734}
1735#endif /* CONFIG_USER_ONLY */
1736
bellardb54ad042004-05-20 13:42:52 +00001737void cpu_reset_interrupt(CPUState *env, int mask)
1738{
1739 env->interrupt_request &= ~mask;
1740}
1741
aurel323098dba2009-03-07 21:28:24 +00001742void cpu_exit(CPUState *env)
1743{
1744 env->exit_request = 1;
1745 cpu_unlink_tb(env);
1746}
1747
blueswir1c7cd6a32008-10-02 18:27:46 +00001748const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001749 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001750 "show generated host assembly code for each compiled TB" },
1751 { CPU_LOG_TB_IN_ASM, "in_asm",
1752 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001753 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001754 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001755 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001756 "show micro ops "
1757#ifdef TARGET_I386
1758 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001759#endif
blueswir1e01a1152008-03-14 17:37:11 +00001760 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001761 { CPU_LOG_INT, "int",
1762 "show interrupts/exceptions in short format" },
1763 { CPU_LOG_EXEC, "exec",
1764 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001765 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001766 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001767#ifdef TARGET_I386
1768 { CPU_LOG_PCALL, "pcall",
1769 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001770 { CPU_LOG_RESET, "cpu_reset",
1771 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001772#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001773#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001774 { CPU_LOG_IOPORT, "ioport",
1775 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001776#endif
bellardf193c792004-03-21 17:06:25 +00001777 { 0, NULL, NULL },
1778};
1779
1780static int cmp1(const char *s1, int n, const char *s2)
1781{
1782 if (strlen(s2) != n)
1783 return 0;
1784 return memcmp(s1, s2, n) == 0;
1785}
ths3b46e622007-09-17 08:09:54 +00001786
bellardf193c792004-03-21 17:06:25 +00001787/* takes a comma separated list of log masks. Return 0 if error. */
1788int cpu_str_to_log_mask(const char *str)
1789{
blueswir1c7cd6a32008-10-02 18:27:46 +00001790 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001791 int mask;
1792 const char *p, *p1;
1793
1794 p = str;
1795 mask = 0;
1796 for(;;) {
1797 p1 = strchr(p, ',');
1798 if (!p1)
1799 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001800 if(cmp1(p,p1-p,"all")) {
1801 for(item = cpu_log_items; item->mask != 0; item++) {
1802 mask |= item->mask;
1803 }
1804 } else {
1805 for(item = cpu_log_items; item->mask != 0; item++) {
1806 if (cmp1(p, p1 - p, item->name))
1807 goto found;
1808 }
1809 return 0;
bellardf193c792004-03-21 17:06:25 +00001810 }
bellardf193c792004-03-21 17:06:25 +00001811 found:
1812 mask |= item->mask;
1813 if (*p1 != ',')
1814 break;
1815 p = p1 + 1;
1816 }
1817 return mask;
1818}
bellardea041c02003-06-25 16:16:50 +00001819
bellard75012672003-06-21 13:11:07 +00001820void cpu_abort(CPUState *env, const char *fmt, ...)
1821{
1822 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001823 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001824
1825 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001826 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001827 fprintf(stderr, "qemu: fatal: ");
1828 vfprintf(stderr, fmt, ap);
1829 fprintf(stderr, "\n");
1830#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001831 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1832#else
1833 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001834#endif
aliguori93fcfe32009-01-15 22:34:14 +00001835 if (qemu_log_enabled()) {
1836 qemu_log("qemu: fatal: ");
1837 qemu_log_vprintf(fmt, ap2);
1838 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001839#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001840 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001841#else
aliguori93fcfe32009-01-15 22:34:14 +00001842 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001843#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001844 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001845 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001846 }
pbrook493ae1f2007-11-23 16:53:59 +00001847 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001848 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001849#if defined(CONFIG_USER_ONLY)
1850 {
1851 struct sigaction act;
1852 sigfillset(&act.sa_mask);
1853 act.sa_handler = SIG_DFL;
1854 sigaction(SIGABRT, &act, NULL);
1855 }
1856#endif
bellard75012672003-06-21 13:11:07 +00001857 abort();
1858}
1859
thsc5be9f02007-02-28 20:20:53 +00001860CPUState *cpu_copy(CPUState *env)
1861{
ths01ba9812007-12-09 02:22:57 +00001862 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001863 CPUState *next_cpu = new_env->next_cpu;
1864 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001865#if defined(TARGET_HAS_ICE)
1866 CPUBreakpoint *bp;
1867 CPUWatchpoint *wp;
1868#endif
1869
thsc5be9f02007-02-28 20:20:53 +00001870 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001871
1872 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001873 new_env->next_cpu = next_cpu;
1874 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001875
1876 /* Clone all break/watchpoints.
1877 Note: Once we support ptrace with hw-debug register access, make sure
1878 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001879 QTAILQ_INIT(&env->breakpoints);
1880 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001881#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001882 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001883 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1884 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001885 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001886 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1887 wp->flags, NULL);
1888 }
1889#endif
1890
thsc5be9f02007-02-28 20:20:53 +00001891 return new_env;
1892}
1893
bellard01243112004-01-04 15:48:17 +00001894#if !defined(CONFIG_USER_ONLY)
1895
edgar_igl5c751e92008-05-06 08:44:21 +00001896static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1897{
1898 unsigned int i;
1899
1900 /* Discard jump cache entries for any tb which might potentially
1901 overlap the flushed page. */
1902 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1903 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001904 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001905
1906 i = tb_jmp_cache_hash_page(addr);
1907 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001908 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001909}
1910
Igor Kovalenko08738982009-07-12 02:15:40 +04001911static CPUTLBEntry s_cputlb_empty_entry = {
1912 .addr_read = -1,
1913 .addr_write = -1,
1914 .addr_code = -1,
1915 .addend = -1,
1916};
1917
Peter Maydell771124e2012-01-17 13:23:13 +00001918/* NOTE:
1919 * If flush_global is true (the usual case), flush all tlb entries.
1920 * If flush_global is false, flush (at least) all tlb entries not
1921 * marked global.
1922 *
1923 * Since QEMU doesn't currently implement a global/not-global flag
1924 * for tlb entries, at the moment tlb_flush() will also flush all
1925 * tlb entries in the flush_global == false case. This is OK because
1926 * CPU architectures generally permit an implementation to drop
1927 * entries from the TLB at any time, so flushing more entries than
1928 * required is only an efficiency issue, not a correctness issue.
1929 */
bellardee8b7022004-02-03 23:35:10 +00001930void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001931{
bellard33417e72003-08-10 21:47:01 +00001932 int i;
bellard01243112004-01-04 15:48:17 +00001933
bellard9fa3e852004-01-04 18:06:42 +00001934#if defined(DEBUG_TLB)
1935 printf("tlb_flush:\n");
1936#endif
bellard01243112004-01-04 15:48:17 +00001937 /* must reset current TB so that interrupts cannot modify the
1938 links while we are modifying them */
1939 env->current_tb = NULL;
1940
bellard33417e72003-08-10 21:47:01 +00001941 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001942 int mmu_idx;
1943 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001944 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001945 }
bellard33417e72003-08-10 21:47:01 +00001946 }
bellard9fa3e852004-01-04 18:06:42 +00001947
bellard8a40a182005-11-20 10:35:40 +00001948 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001949
Paul Brookd4c430a2010-03-17 02:14:28 +00001950 env->tlb_flush_addr = -1;
1951 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001952 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001953}
1954
bellard274da6b2004-05-20 21:56:27 +00001955static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001956{
ths5fafdf22007-09-16 21:08:06 +00001957 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001958 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001959 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001960 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001961 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001962 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001963 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001964 }
bellard61382a52003-10-27 21:22:23 +00001965}
1966
bellard2e126692004-04-25 21:28:44 +00001967void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001968{
bellard8a40a182005-11-20 10:35:40 +00001969 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001970 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001971
bellard9fa3e852004-01-04 18:06:42 +00001972#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001973 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001974#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001975 /* Check if we need to flush due to large pages. */
1976 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1977#if defined(DEBUG_TLB)
1978 printf("tlb_flush_page: forced full flush ("
1979 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1980 env->tlb_flush_addr, env->tlb_flush_mask);
1981#endif
1982 tlb_flush(env, 1);
1983 return;
1984 }
bellard01243112004-01-04 15:48:17 +00001985 /* must reset current TB so that interrupts cannot modify the
1986 links while we are modifying them */
1987 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001988
bellard61382a52003-10-27 21:22:23 +00001989 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001990 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001991 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1992 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001993
edgar_igl5c751e92008-05-06 08:44:21 +00001994 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001995}
1996
bellard9fa3e852004-01-04 18:06:42 +00001997/* update the TLBs so that writes to code in the virtual page 'addr'
1998 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001999static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002000{
ths5fafdf22007-09-16 21:08:06 +00002001 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002002 ram_addr + TARGET_PAGE_SIZE,
2003 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002004}
2005
bellard9fa3e852004-01-04 18:06:42 +00002006/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002007 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002008static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002009 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002010{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002011 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002012}
2013
ths5fafdf22007-09-16 21:08:06 +00002014static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002015 unsigned long start, unsigned long length)
2016{
2017 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002018 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00002019 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002020 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002021 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002022 }
2023 }
2024}
2025
pbrook5579c7f2009-04-11 14:47:08 +00002026/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002027void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002028 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002029{
2030 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002031 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002032 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002033
2034 start &= TARGET_PAGE_MASK;
2035 end = TARGET_PAGE_ALIGN(end);
2036
2037 length = end - start;
2038 if (length == 0)
2039 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002040 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002041
bellard1ccde1c2004-02-06 19:46:14 +00002042 /* we modify the TLB cache so that the dirty bit will be set again
2043 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002044 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002045 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002046 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002047 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002048 != (end - 1) - start) {
2049 abort();
2050 }
2051
bellard6a00d602005-11-21 23:25:50 +00002052 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002053 int mmu_idx;
2054 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2055 for(i = 0; i < CPU_TLB_SIZE; i++)
2056 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2057 start1, length);
2058 }
bellard6a00d602005-11-21 23:25:50 +00002059 }
bellard1ccde1c2004-02-06 19:46:14 +00002060}
2061
aliguori74576192008-10-06 14:02:03 +00002062int cpu_physical_memory_set_dirty_tracking(int enable)
2063{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002064 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002065 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002066 return ret;
aliguori74576192008-10-06 14:02:03 +00002067}
2068
bellard3a7d9292005-08-21 09:26:42 +00002069static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2070{
Anthony Liguoric227f092009-10-01 16:12:16 -05002071 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002072 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002073
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002074 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002075 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2076 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002077 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002078 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002079 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002080 }
2081 }
2082}
2083
2084/* update the TLB according to the current state of the dirty bits */
2085void cpu_tlb_update_dirty(CPUState *env)
2086{
2087 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002088 int mmu_idx;
2089 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2090 for(i = 0; i < CPU_TLB_SIZE; i++)
2091 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2092 }
bellard3a7d9292005-08-21 09:26:42 +00002093}
2094
pbrook0f459d12008-06-09 00:20:13 +00002095static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002096{
pbrook0f459d12008-06-09 00:20:13 +00002097 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2098 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002099}
2100
pbrook0f459d12008-06-09 00:20:13 +00002101/* update the TLB corresponding to virtual page vaddr
2102 so that it is no longer dirty */
2103static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002104{
bellard1ccde1c2004-02-06 19:46:14 +00002105 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002106 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002107
pbrook0f459d12008-06-09 00:20:13 +00002108 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002109 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002110 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2111 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002112}
2113
Paul Brookd4c430a2010-03-17 02:14:28 +00002114/* Our TLB does not support large pages, so remember the area covered by
2115 large pages and trigger a full TLB flush if these are invalidated. */
2116static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2117 target_ulong size)
2118{
2119 target_ulong mask = ~(size - 1);
2120
2121 if (env->tlb_flush_addr == (target_ulong)-1) {
2122 env->tlb_flush_addr = vaddr & mask;
2123 env->tlb_flush_mask = mask;
2124 return;
2125 }
2126 /* Extend the existing region to include the new page.
2127 This is a compromise between unnecessary flushes and the cost
2128 of maintaining a full variable size TLB. */
2129 mask &= env->tlb_flush_mask;
2130 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2131 mask <<= 1;
2132 }
2133 env->tlb_flush_addr &= mask;
2134 env->tlb_flush_mask = mask;
2135}
2136
Avi Kivity1d393fa2012-01-01 21:15:42 +02002137static bool is_ram_rom(ram_addr_t pd)
2138{
2139 pd &= ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002140 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002141}
2142
Avi Kivity75c578d2012-01-02 15:40:52 +02002143static bool is_romd(ram_addr_t pd)
2144{
2145 MemoryRegion *mr;
2146
2147 pd &= ~TARGET_PAGE_MASK;
Avi Kivity11c7ef02012-01-02 17:21:07 +02002148 mr = io_mem_region[pd];
Avi Kivity75c578d2012-01-02 15:40:52 +02002149 return mr->rom_device && mr->readable;
2150}
2151
Avi Kivity1d393fa2012-01-01 21:15:42 +02002152static bool is_ram_rom_romd(ram_addr_t pd)
2153{
Avi Kivity75c578d2012-01-02 15:40:52 +02002154 return is_ram_rom(pd) || is_romd(pd);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002155}
2156
Paul Brookd4c430a2010-03-17 02:14:28 +00002157/* Add a new TLB entry. At most one entry for a given virtual address
2158 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2159 supplied size is only used by tlb_flush_page. */
2160void tlb_set_page(CPUState *env, target_ulong vaddr,
2161 target_phys_addr_t paddr, int prot,
2162 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002163{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002164 PhysPageDesc p;
bellard4f2ac232004-04-26 19:44:02 +00002165 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002166 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002167 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002168 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002169 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002170 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002171 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002172 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002173
Paul Brookd4c430a2010-03-17 02:14:28 +00002174 assert(size >= TARGET_PAGE_SIZE);
2175 if (size != TARGET_PAGE_SIZE) {
2176 tlb_add_large_page(env, vaddr, size);
2177 }
bellard92e873b2004-05-21 14:52:29 +00002178 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002179 pd = p.phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002180#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002181 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2182 " prot=%x idx=%d pd=0x%08lx\n",
2183 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002184#endif
2185
pbrook0f459d12008-06-09 00:20:13 +00002186 address = vaddr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002187 if (!is_ram_rom_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002188 /* IO memory case (romd handled later) */
2189 address |= TLB_MMIO;
2190 }
pbrook5579c7f2009-04-11 14:47:08 +00002191 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002192 if (is_ram_rom(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002193 /* Normal RAM. */
2194 iotlb = pd & TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002195 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2196 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002197 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002198 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002199 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002200 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002201 It would be nice to pass an offset from the base address
2202 of that region. This would avoid having to special case RAM,
2203 and avoid full address decoding in every device.
2204 We can't use the high bits of pd for this because
2205 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002206 iotlb = (pd & ~TARGET_PAGE_MASK);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002207 iotlb += p.region_offset;
pbrook0f459d12008-06-09 00:20:13 +00002208 }
pbrook6658ffb2007-03-16 23:58:11 +00002209
pbrook0f459d12008-06-09 00:20:13 +00002210 code_address = address;
2211 /* Make accesses to pages with watchpoints go via the
2212 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002213 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002214 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002215 /* Avoid trapping reads of pages with a write breakpoint. */
2216 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivity1ec9b902012-01-02 12:47:48 +02002217 iotlb = io_mem_watch.ram_addr + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002218 address |= TLB_MMIO;
2219 break;
2220 }
pbrook6658ffb2007-03-16 23:58:11 +00002221 }
pbrook0f459d12008-06-09 00:20:13 +00002222 }
balrogd79acba2007-06-26 20:01:13 +00002223
pbrook0f459d12008-06-09 00:20:13 +00002224 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2225 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2226 te = &env->tlb_table[mmu_idx][index];
2227 te->addend = addend - vaddr;
2228 if (prot & PAGE_READ) {
2229 te->addr_read = address;
2230 } else {
2231 te->addr_read = -1;
2232 }
edgar_igl5c751e92008-05-06 08:44:21 +00002233
pbrook0f459d12008-06-09 00:20:13 +00002234 if (prot & PAGE_EXEC) {
2235 te->addr_code = code_address;
2236 } else {
2237 te->addr_code = -1;
2238 }
2239 if (prot & PAGE_WRITE) {
Avi Kivity75c578d2012-01-02 15:40:52 +02002240 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr || is_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002241 /* Write access calls the I/O callback. */
2242 te->addr_write = address | TLB_MMIO;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002243 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
pbrook0f459d12008-06-09 00:20:13 +00002244 !cpu_physical_memory_is_dirty(pd)) {
2245 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002246 } else {
pbrook0f459d12008-06-09 00:20:13 +00002247 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002248 }
pbrook0f459d12008-06-09 00:20:13 +00002249 } else {
2250 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002251 }
bellard9fa3e852004-01-04 18:06:42 +00002252}
2253
bellard01243112004-01-04 15:48:17 +00002254#else
2255
bellardee8b7022004-02-03 23:35:10 +00002256void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002257{
2258}
2259
bellard2e126692004-04-25 21:28:44 +00002260void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002261{
2262}
2263
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002264/*
2265 * Walks guest process memory "regions" one by one
2266 * and calls callback function 'fn' for each region.
2267 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002268
2269struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002270{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002271 walk_memory_regions_fn fn;
2272 void *priv;
2273 unsigned long start;
2274 int prot;
2275};
bellard9fa3e852004-01-04 18:06:42 +00002276
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002277static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002278 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002279{
2280 if (data->start != -1ul) {
2281 int rc = data->fn(data->priv, data->start, end, data->prot);
2282 if (rc != 0) {
2283 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002284 }
bellard33417e72003-08-10 21:47:01 +00002285 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002286
2287 data->start = (new_prot ? end : -1ul);
2288 data->prot = new_prot;
2289
2290 return 0;
2291}
2292
2293static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002294 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002295{
Paul Brookb480d9b2010-03-12 23:23:29 +00002296 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002297 int i, rc;
2298
2299 if (*lp == NULL) {
2300 return walk_memory_regions_end(data, base, 0);
2301 }
2302
2303 if (level == 0) {
2304 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002305 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002306 int prot = pd[i].flags;
2307
2308 pa = base | (i << TARGET_PAGE_BITS);
2309 if (prot != data->prot) {
2310 rc = walk_memory_regions_end(data, pa, prot);
2311 if (rc != 0) {
2312 return rc;
2313 }
2314 }
2315 }
2316 } else {
2317 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002318 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002319 pa = base | ((abi_ulong)i <<
2320 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002321 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2322 if (rc != 0) {
2323 return rc;
2324 }
2325 }
2326 }
2327
2328 return 0;
2329}
2330
2331int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2332{
2333 struct walk_memory_regions_data data;
2334 unsigned long i;
2335
2336 data.fn = fn;
2337 data.priv = priv;
2338 data.start = -1ul;
2339 data.prot = 0;
2340
2341 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002342 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002343 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2344 if (rc != 0) {
2345 return rc;
2346 }
2347 }
2348
2349 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002350}
2351
Paul Brookb480d9b2010-03-12 23:23:29 +00002352static int dump_region(void *priv, abi_ulong start,
2353 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002354{
2355 FILE *f = (FILE *)priv;
2356
Paul Brookb480d9b2010-03-12 23:23:29 +00002357 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2358 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002359 start, end, end - start,
2360 ((prot & PAGE_READ) ? 'r' : '-'),
2361 ((prot & PAGE_WRITE) ? 'w' : '-'),
2362 ((prot & PAGE_EXEC) ? 'x' : '-'));
2363
2364 return (0);
2365}
2366
2367/* dump memory mappings */
2368void page_dump(FILE *f)
2369{
2370 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2371 "start", "end", "size", "prot");
2372 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002373}
2374
pbrook53a59602006-03-25 19:31:22 +00002375int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002376{
bellard9fa3e852004-01-04 18:06:42 +00002377 PageDesc *p;
2378
2379 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002380 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002381 return 0;
2382 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002383}
2384
Richard Henderson376a7902010-03-10 15:57:04 -08002385/* Modify the flags of a page and invalidate the code if necessary.
2386 The flag PAGE_WRITE_ORG is positioned automatically depending
2387 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002388void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002389{
Richard Henderson376a7902010-03-10 15:57:04 -08002390 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002391
Richard Henderson376a7902010-03-10 15:57:04 -08002392 /* This function should never be called with addresses outside the
2393 guest address space. If this assert fires, it probably indicates
2394 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002395#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2396 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002397#endif
2398 assert(start < end);
2399
bellard9fa3e852004-01-04 18:06:42 +00002400 start = start & TARGET_PAGE_MASK;
2401 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002402
2403 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002404 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002405 }
2406
2407 for (addr = start, len = end - start;
2408 len != 0;
2409 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2410 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2411
2412 /* If the write protection bit is set, then we invalidate
2413 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002414 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002415 (flags & PAGE_WRITE) &&
2416 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002417 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002418 }
2419 p->flags = flags;
2420 }
bellard9fa3e852004-01-04 18:06:42 +00002421}
2422
ths3d97b402007-11-02 19:02:07 +00002423int page_check_range(target_ulong start, target_ulong len, int flags)
2424{
2425 PageDesc *p;
2426 target_ulong end;
2427 target_ulong addr;
2428
Richard Henderson376a7902010-03-10 15:57:04 -08002429 /* This function should never be called with addresses outside the
2430 guest address space. If this assert fires, it probably indicates
2431 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002432#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2433 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002434#endif
2435
Richard Henderson3e0650a2010-03-29 10:54:42 -07002436 if (len == 0) {
2437 return 0;
2438 }
Richard Henderson376a7902010-03-10 15:57:04 -08002439 if (start + len - 1 < start) {
2440 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002441 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002442 }
balrog55f280c2008-10-28 10:24:11 +00002443
ths3d97b402007-11-02 19:02:07 +00002444 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2445 start = start & TARGET_PAGE_MASK;
2446
Richard Henderson376a7902010-03-10 15:57:04 -08002447 for (addr = start, len = end - start;
2448 len != 0;
2449 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002450 p = page_find(addr >> TARGET_PAGE_BITS);
2451 if( !p )
2452 return -1;
2453 if( !(p->flags & PAGE_VALID) )
2454 return -1;
2455
bellarddae32702007-11-14 10:51:00 +00002456 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002457 return -1;
bellarddae32702007-11-14 10:51:00 +00002458 if (flags & PAGE_WRITE) {
2459 if (!(p->flags & PAGE_WRITE_ORG))
2460 return -1;
2461 /* unprotect the page if it was put read-only because it
2462 contains translated code */
2463 if (!(p->flags & PAGE_WRITE)) {
2464 if (!page_unprotect(addr, 0, NULL))
2465 return -1;
2466 }
2467 return 0;
2468 }
ths3d97b402007-11-02 19:02:07 +00002469 }
2470 return 0;
2471}
2472
bellard9fa3e852004-01-04 18:06:42 +00002473/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002474 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002475int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002476{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002477 unsigned int prot;
2478 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002479 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002480
pbrookc8a706f2008-06-02 16:16:42 +00002481 /* Technically this isn't safe inside a signal handler. However we
2482 know this only ever happens in a synchronous SEGV handler, so in
2483 practice it seems to be ok. */
2484 mmap_lock();
2485
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002486 p = page_find(address >> TARGET_PAGE_BITS);
2487 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002488 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002489 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002490 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002491
bellard9fa3e852004-01-04 18:06:42 +00002492 /* if the page was really writable, then we change its
2493 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002494 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2495 host_start = address & qemu_host_page_mask;
2496 host_end = host_start + qemu_host_page_size;
2497
2498 prot = 0;
2499 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2500 p = page_find(addr >> TARGET_PAGE_BITS);
2501 p->flags |= PAGE_WRITE;
2502 prot |= p->flags;
2503
bellard9fa3e852004-01-04 18:06:42 +00002504 /* and since the content will be modified, we must invalidate
2505 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002506 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002507#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002508 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002509#endif
bellard9fa3e852004-01-04 18:06:42 +00002510 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002511 mprotect((void *)g2h(host_start), qemu_host_page_size,
2512 prot & PAGE_BITS);
2513
2514 mmap_unlock();
2515 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002516 }
pbrookc8a706f2008-06-02 16:16:42 +00002517 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002518 return 0;
2519}
2520
bellard6a00d602005-11-21 23:25:50 +00002521static inline void tlb_set_dirty(CPUState *env,
2522 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002523{
2524}
bellard9fa3e852004-01-04 18:06:42 +00002525#endif /* defined(CONFIG_USER_ONLY) */
2526
pbrooke2eef172008-06-08 01:09:01 +00002527#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002528
Paul Brookc04b2b72010-03-01 03:31:14 +00002529#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2530typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002531 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002532 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002533 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002534} subpage_t;
2535
Anthony Liguoric227f092009-10-01 16:12:16 -05002536static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002537 uint16_t section);
2538static subpage_t *subpage_init (target_phys_addr_t base, uint16_t *section,
2539 uint16_t orig_section);
blueswir1db7b5422007-05-26 17:36:03 +00002540#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2541 need_subpage) \
2542 do { \
2543 if (addr > start_addr) \
2544 start_addr2 = 0; \
2545 else { \
2546 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2547 if (start_addr2 > 0) \
2548 need_subpage = 1; \
2549 } \
2550 \
blueswir149e9fba2007-05-30 17:25:06 +00002551 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002552 end_addr2 = TARGET_PAGE_SIZE - 1; \
2553 else { \
2554 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2555 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2556 need_subpage = 1; \
2557 } \
2558 } while (0)
2559
Avi Kivity5312bd82012-02-12 18:32:55 +02002560static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002561{
Avi Kivity5312bd82012-02-12 18:32:55 +02002562 MemoryRegionSection *section = &phys_sections[section_index];
2563 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002564
2565 if (mr->subpage) {
2566 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2567 memory_region_destroy(&subpage->iomem);
2568 g_free(subpage);
2569 }
2570}
2571
Avi Kivity4346ae32012-02-10 17:00:01 +02002572static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002573{
2574 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002575 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002576
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002577 if (lp->u.node == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002578 return;
2579 }
2580
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002581 p = phys_map_nodes[lp->u.node];
Avi Kivity4346ae32012-02-10 17:00:01 +02002582 for (i = 0; i < L2_SIZE; ++i) {
2583 if (level > 0) {
Avi Kivity54688b12012-02-09 17:34:32 +02002584 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002585 } else {
2586 destroy_page_desc(p[i].u.leaf);
Avi Kivity54688b12012-02-09 17:34:32 +02002587 }
Avi Kivity54688b12012-02-09 17:34:32 +02002588 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002589 lp->u.node = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002590}
2591
2592static void destroy_all_mappings(void)
2593{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002594 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002595 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002596}
2597
Avi Kivity5312bd82012-02-12 18:32:55 +02002598static uint16_t phys_section_add(MemoryRegionSection *section)
2599{
2600 if (phys_sections_nb == phys_sections_nb_alloc) {
2601 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2602 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2603 phys_sections_nb_alloc);
2604 }
2605 phys_sections[phys_sections_nb] = *section;
2606 return phys_sections_nb++;
2607}
2608
2609static void phys_sections_clear(void)
2610{
2611 phys_sections_nb = 0;
2612}
2613
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002614/* register physical memory.
2615 For RAM, 'size' must be a multiple of the target page size.
2616 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002617 io memory page. The address used when calling the IO function is
2618 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002619 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002620 before calculating this offset. This should not be a problem unless
2621 the low bits of start_addr and region_offset differ. */
Avi Kivitydd811242012-01-02 12:17:03 +02002622void cpu_register_physical_memory_log(MemoryRegionSection *section,
Avi Kivityd7ec83e2012-02-08 17:07:26 +02002623 bool readonly)
bellard33417e72003-08-10 21:47:01 +00002624{
Avi Kivitydd811242012-01-02 12:17:03 +02002625 target_phys_addr_t start_addr = section->offset_within_address_space;
2626 ram_addr_t size = section->size;
Anthony Liguoric227f092009-10-01 16:12:16 -05002627 target_phys_addr_t addr, end_addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002628 uint16_t *p;
bellard9d420372006-06-25 22:25:22 +00002629 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002630 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002631 subpage_t *subpage;
Avi Kivity5312bd82012-02-12 18:32:55 +02002632 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002633
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002634 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002635
bellard5fd386f2004-05-23 21:11:22 +00002636 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002637 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002638
2639 addr = start_addr;
2640 do {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002641 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
Avi Kivity5312bd82012-02-12 18:32:55 +02002642 if (p && *p != phys_section_unassigned) {
2643 uint16_t orig_memory= *p;
Anthony Liguoric227f092009-10-01 16:12:16 -05002644 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002645 int need_subpage = 0;
Avi Kivity5312bd82012-02-12 18:32:55 +02002646 MemoryRegion *mr = phys_sections[orig_memory].mr;
blueswir1db7b5422007-05-26 17:36:03 +00002647
2648 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2649 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002650 if (need_subpage) {
Avi Kivityb3b00c72012-01-02 13:20:11 +02002651 if (!(mr->subpage)) {
blueswir1db7b5422007-05-26 17:36:03 +00002652 subpage = subpage_init((addr & TARGET_PAGE_MASK),
Avi Kivity5312bd82012-02-12 18:32:55 +02002653 p, orig_memory);
blueswir1db7b5422007-05-26 17:36:03 +00002654 } else {
Avi Kivitya621f382012-01-02 13:12:08 +02002655 subpage = container_of(mr, subpage_t, iomem);
blueswir1db7b5422007-05-26 17:36:03 +00002656 }
Avi Kivity5312bd82012-02-12 18:32:55 +02002657 subpage_register(subpage, start_addr2, end_addr2,
2658 section_index);
blueswir1db7b5422007-05-26 17:36:03 +00002659 } else {
Avi Kivity5312bd82012-02-12 18:32:55 +02002660 *p = section_index;
blueswir1db7b5422007-05-26 17:36:03 +00002661 }
2662 } else {
Avi Kivity8636b922012-02-12 21:10:50 +02002663 target_phys_addr_t start_addr2, end_addr2;
2664 int need_subpage = 0;
2665
blueswir1db7b5422007-05-26 17:36:03 +00002666 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
Avi Kivity5312bd82012-02-12 18:32:55 +02002667 *p = section_index;
blueswir1db7b5422007-05-26 17:36:03 +00002668
Avi Kivity8636b922012-02-12 21:10:50 +02002669 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2670 end_addr2, need_subpage);
blueswir1db7b5422007-05-26 17:36:03 +00002671
Avi Kivity8636b922012-02-12 21:10:50 +02002672 if (need_subpage) {
2673 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2674 p, phys_section_unassigned);
2675 subpage_register(subpage, start_addr2, end_addr2,
2676 section_index);
blueswir1db7b5422007-05-26 17:36:03 +00002677 }
2678 }
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002679 addr += TARGET_PAGE_SIZE;
2680 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002681
bellard9d420372006-06-25 22:25:22 +00002682 /* since each CPU stores ram addresses in its TLB cache, we must
2683 reset the modified entries */
2684 /* XXX: slow ! */
2685 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2686 tlb_flush(env, 1);
2687 }
bellard33417e72003-08-10 21:47:01 +00002688}
2689
Anthony Liguoric227f092009-10-01 16:12:16 -05002690void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002691{
2692 if (kvm_enabled())
2693 kvm_coalesce_mmio_region(addr, size);
2694}
2695
Anthony Liguoric227f092009-10-01 16:12:16 -05002696void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002697{
2698 if (kvm_enabled())
2699 kvm_uncoalesce_mmio_region(addr, size);
2700}
2701
Sheng Yang62a27442010-01-26 19:21:16 +08002702void qemu_flush_coalesced_mmio_buffer(void)
2703{
2704 if (kvm_enabled())
2705 kvm_flush_coalesced_mmio_buffer();
2706}
2707
Marcelo Tosattic9027602010-03-01 20:25:08 -03002708#if defined(__linux__) && !defined(TARGET_S390X)
2709
2710#include <sys/vfs.h>
2711
2712#define HUGETLBFS_MAGIC 0x958458f6
2713
2714static long gethugepagesize(const char *path)
2715{
2716 struct statfs fs;
2717 int ret;
2718
2719 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002720 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002721 } while (ret != 0 && errno == EINTR);
2722
2723 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002724 perror(path);
2725 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002726 }
2727
2728 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002729 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002730
2731 return fs.f_bsize;
2732}
2733
Alex Williamson04b16652010-07-02 11:13:17 -06002734static void *file_ram_alloc(RAMBlock *block,
2735 ram_addr_t memory,
2736 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002737{
2738 char *filename;
2739 void *area;
2740 int fd;
2741#ifdef MAP_POPULATE
2742 int flags;
2743#endif
2744 unsigned long hpagesize;
2745
2746 hpagesize = gethugepagesize(path);
2747 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002748 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002749 }
2750
2751 if (memory < hpagesize) {
2752 return NULL;
2753 }
2754
2755 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2756 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2757 return NULL;
2758 }
2759
2760 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002761 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002762 }
2763
2764 fd = mkstemp(filename);
2765 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002766 perror("unable to create backing store for hugepages");
2767 free(filename);
2768 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002769 }
2770 unlink(filename);
2771 free(filename);
2772
2773 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2774
2775 /*
2776 * ftruncate is not supported by hugetlbfs in older
2777 * hosts, so don't bother bailing out on errors.
2778 * If anything goes wrong with it under other filesystems,
2779 * mmap will fail.
2780 */
2781 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002782 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002783
2784#ifdef MAP_POPULATE
2785 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2786 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2787 * to sidestep this quirk.
2788 */
2789 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2790 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2791#else
2792 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2793#endif
2794 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002795 perror("file_ram_alloc: can't mmap RAM pages");
2796 close(fd);
2797 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002798 }
Alex Williamson04b16652010-07-02 11:13:17 -06002799 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002800 return area;
2801}
2802#endif
2803
Alex Williamsond17b5282010-06-25 11:08:38 -06002804static ram_addr_t find_ram_offset(ram_addr_t size)
2805{
Alex Williamson04b16652010-07-02 11:13:17 -06002806 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002807 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002808
2809 if (QLIST_EMPTY(&ram_list.blocks))
2810 return 0;
2811
2812 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002813 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002814
2815 end = block->offset + block->length;
2816
2817 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2818 if (next_block->offset >= end) {
2819 next = MIN(next, next_block->offset);
2820 }
2821 }
2822 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002823 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002824 mingap = next - end;
2825 }
2826 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002827
2828 if (offset == RAM_ADDR_MAX) {
2829 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2830 (uint64_t)size);
2831 abort();
2832 }
2833
Alex Williamson04b16652010-07-02 11:13:17 -06002834 return offset;
2835}
2836
2837static ram_addr_t last_ram_offset(void)
2838{
Alex Williamsond17b5282010-06-25 11:08:38 -06002839 RAMBlock *block;
2840 ram_addr_t last = 0;
2841
2842 QLIST_FOREACH(block, &ram_list.blocks, next)
2843 last = MAX(last, block->offset + block->length);
2844
2845 return last;
2846}
2847
Avi Kivityc5705a72011-12-20 15:59:12 +02002848void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002849{
2850 RAMBlock *new_block, *block;
2851
Avi Kivityc5705a72011-12-20 15:59:12 +02002852 new_block = NULL;
2853 QLIST_FOREACH(block, &ram_list.blocks, next) {
2854 if (block->offset == addr) {
2855 new_block = block;
2856 break;
2857 }
2858 }
2859 assert(new_block);
2860 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002861
2862 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2863 char *id = dev->parent_bus->info->get_dev_path(dev);
2864 if (id) {
2865 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002866 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002867 }
2868 }
2869 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2870
2871 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002872 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002873 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2874 new_block->idstr);
2875 abort();
2876 }
2877 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002878}
2879
2880ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2881 MemoryRegion *mr)
2882{
2883 RAMBlock *new_block;
2884
2885 size = TARGET_PAGE_ALIGN(size);
2886 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002887
Avi Kivity7c637362011-12-21 13:09:49 +02002888 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002889 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002890 if (host) {
2891 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002892 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002893 } else {
2894 if (mem_path) {
2895#if defined (__linux__) && !defined(TARGET_S390X)
2896 new_block->host = file_ram_alloc(new_block, size, mem_path);
2897 if (!new_block->host) {
2898 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002899 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002900 }
2901#else
2902 fprintf(stderr, "-mem-path option unsupported\n");
2903 exit(1);
2904#endif
2905 } else {
2906#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002907 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2908 an system defined value, which is at least 256GB. Larger systems
2909 have larger values. We put the guest between the end of data
2910 segment (system break) and this value. We use 32GB as a base to
2911 have enough room for the system break to grow. */
2912 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002913 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002914 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002915 if (new_block->host == MAP_FAILED) {
2916 fprintf(stderr, "Allocating RAM failed\n");
2917 abort();
2918 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002919#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002920 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002921 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002922 } else {
2923 new_block->host = qemu_vmalloc(size);
2924 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002925#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002926 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002927 }
2928 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002929 new_block->length = size;
2930
2931 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2932
Anthony Liguori7267c092011-08-20 22:09:37 -05002933 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002934 last_ram_offset() >> TARGET_PAGE_BITS);
2935 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2936 0xff, size >> TARGET_PAGE_BITS);
2937
2938 if (kvm_enabled())
2939 kvm_setup_guest_memory(new_block->host, size);
2940
2941 return new_block->offset;
2942}
2943
Avi Kivityc5705a72011-12-20 15:59:12 +02002944ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002945{
Avi Kivityc5705a72011-12-20 15:59:12 +02002946 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002947}
bellarde9a1ab12007-02-08 23:08:38 +00002948
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002949void qemu_ram_free_from_ptr(ram_addr_t addr)
2950{
2951 RAMBlock *block;
2952
2953 QLIST_FOREACH(block, &ram_list.blocks, next) {
2954 if (addr == block->offset) {
2955 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002956 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002957 return;
2958 }
2959 }
2960}
2961
Anthony Liguoric227f092009-10-01 16:12:16 -05002962void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002963{
Alex Williamson04b16652010-07-02 11:13:17 -06002964 RAMBlock *block;
2965
2966 QLIST_FOREACH(block, &ram_list.blocks, next) {
2967 if (addr == block->offset) {
2968 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002969 if (block->flags & RAM_PREALLOC_MASK) {
2970 ;
2971 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002972#if defined (__linux__) && !defined(TARGET_S390X)
2973 if (block->fd) {
2974 munmap(block->host, block->length);
2975 close(block->fd);
2976 } else {
2977 qemu_vfree(block->host);
2978 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002979#else
2980 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002981#endif
2982 } else {
2983#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2984 munmap(block->host, block->length);
2985#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002986 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002987 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002988 } else {
2989 qemu_vfree(block->host);
2990 }
Alex Williamson04b16652010-07-02 11:13:17 -06002991#endif
2992 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002993 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002994 return;
2995 }
2996 }
2997
bellarde9a1ab12007-02-08 23:08:38 +00002998}
2999
Huang Yingcd19cfa2011-03-02 08:56:19 +01003000#ifndef _WIN32
3001void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3002{
3003 RAMBlock *block;
3004 ram_addr_t offset;
3005 int flags;
3006 void *area, *vaddr;
3007
3008 QLIST_FOREACH(block, &ram_list.blocks, next) {
3009 offset = addr - block->offset;
3010 if (offset < block->length) {
3011 vaddr = block->host + offset;
3012 if (block->flags & RAM_PREALLOC_MASK) {
3013 ;
3014 } else {
3015 flags = MAP_FIXED;
3016 munmap(vaddr, length);
3017 if (mem_path) {
3018#if defined(__linux__) && !defined(TARGET_S390X)
3019 if (block->fd) {
3020#ifdef MAP_POPULATE
3021 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3022 MAP_PRIVATE;
3023#else
3024 flags |= MAP_PRIVATE;
3025#endif
3026 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3027 flags, block->fd, offset);
3028 } else {
3029 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3030 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3031 flags, -1, 0);
3032 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003033#else
3034 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003035#endif
3036 } else {
3037#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3038 flags |= MAP_SHARED | MAP_ANONYMOUS;
3039 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3040 flags, -1, 0);
3041#else
3042 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3043 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3044 flags, -1, 0);
3045#endif
3046 }
3047 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003048 fprintf(stderr, "Could not remap addr: "
3049 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003050 length, addr);
3051 exit(1);
3052 }
3053 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3054 }
3055 return;
3056 }
3057 }
3058}
3059#endif /* !_WIN32 */
3060
pbrookdc828ca2009-04-09 22:21:07 +00003061/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003062 With the exception of the softmmu code in this file, this should
3063 only be used for local memory (e.g. video ram) that the device owns,
3064 and knows it isn't going to access beyond the end of the block.
3065
3066 It should not be used for general purpose DMA.
3067 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3068 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003069void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003070{
pbrook94a6b542009-04-11 17:15:54 +00003071 RAMBlock *block;
3072
Alex Williamsonf471a172010-06-11 11:11:42 -06003073 QLIST_FOREACH(block, &ram_list.blocks, next) {
3074 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003075 /* Move this entry to to start of the list. */
3076 if (block != QLIST_FIRST(&ram_list.blocks)) {
3077 QLIST_REMOVE(block, next);
3078 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3079 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003080 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003081 /* We need to check if the requested address is in the RAM
3082 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003083 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003084 */
3085 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003086 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003087 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003088 block->host =
3089 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003090 }
3091 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003092 return block->host + (addr - block->offset);
3093 }
pbrook94a6b542009-04-11 17:15:54 +00003094 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003095
3096 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3097 abort();
3098
3099 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003100}
3101
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003102/* Return a host pointer to ram allocated with qemu_ram_alloc.
3103 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3104 */
3105void *qemu_safe_ram_ptr(ram_addr_t addr)
3106{
3107 RAMBlock *block;
3108
3109 QLIST_FOREACH(block, &ram_list.blocks, next) {
3110 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003111 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003112 /* We need to check if the requested address is in the RAM
3113 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003114 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003115 */
3116 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003117 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003118 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003119 block->host =
3120 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003121 }
3122 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003123 return block->host + (addr - block->offset);
3124 }
3125 }
3126
3127 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3128 abort();
3129
3130 return NULL;
3131}
3132
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003133/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3134 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003135void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003136{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003137 if (*size == 0) {
3138 return NULL;
3139 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003140 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003141 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003142 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003143 RAMBlock *block;
3144
3145 QLIST_FOREACH(block, &ram_list.blocks, next) {
3146 if (addr - block->offset < block->length) {
3147 if (addr - block->offset + *size > block->length)
3148 *size = block->length - addr + block->offset;
3149 return block->host + (addr - block->offset);
3150 }
3151 }
3152
3153 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3154 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003155 }
3156}
3157
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003158void qemu_put_ram_ptr(void *addr)
3159{
3160 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003161}
3162
Marcelo Tosattie8902612010-10-11 15:31:19 -03003163int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003164{
pbrook94a6b542009-04-11 17:15:54 +00003165 RAMBlock *block;
3166 uint8_t *host = ptr;
3167
Jan Kiszka868bb332011-06-21 22:59:09 +02003168 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003169 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003170 return 0;
3171 }
3172
Alex Williamsonf471a172010-06-11 11:11:42 -06003173 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003174 /* This case append when the block is not mapped. */
3175 if (block->host == NULL) {
3176 continue;
3177 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003178 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003179 *ram_addr = block->offset + (host - block->host);
3180 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003181 }
pbrook94a6b542009-04-11 17:15:54 +00003182 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003183
Marcelo Tosattie8902612010-10-11 15:31:19 -03003184 return -1;
3185}
Alex Williamsonf471a172010-06-11 11:11:42 -06003186
Marcelo Tosattie8902612010-10-11 15:31:19 -03003187/* Some of the softmmu routines need to translate from a host pointer
3188 (typically a TLB entry) back to a ram offset. */
3189ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3190{
3191 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003192
Marcelo Tosattie8902612010-10-11 15:31:19 -03003193 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3194 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3195 abort();
3196 }
3197 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003198}
3199
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003200static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3201 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003202{
pbrook67d3b952006-12-18 05:03:52 +00003203#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003204 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003205#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003206#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003207 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003208#endif
3209 return 0;
3210}
3211
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003212static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3213 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003214{
3215#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003216 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003217#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003218#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003219 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003220#endif
3221}
3222
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003223static const MemoryRegionOps unassigned_mem_ops = {
3224 .read = unassigned_mem_read,
3225 .write = unassigned_mem_write,
3226 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003227};
3228
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003229static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3230 unsigned size)
3231{
3232 abort();
3233}
3234
3235static void error_mem_write(void *opaque, target_phys_addr_t addr,
3236 uint64_t value, unsigned size)
3237{
3238 abort();
3239}
3240
3241static const MemoryRegionOps error_mem_ops = {
3242 .read = error_mem_read,
3243 .write = error_mem_write,
3244 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003245};
3246
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003247static const MemoryRegionOps rom_mem_ops = {
3248 .read = error_mem_read,
3249 .write = unassigned_mem_write,
3250 .endianness = DEVICE_NATIVE_ENDIAN,
3251};
3252
3253static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3254 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003255{
bellard3a7d9292005-08-21 09:26:42 +00003256 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003257 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003258 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3259#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003260 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003261 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003262#endif
3263 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003264 switch (size) {
3265 case 1:
3266 stb_p(qemu_get_ram_ptr(ram_addr), val);
3267 break;
3268 case 2:
3269 stw_p(qemu_get_ram_ptr(ram_addr), val);
3270 break;
3271 case 4:
3272 stl_p(qemu_get_ram_ptr(ram_addr), val);
3273 break;
3274 default:
3275 abort();
3276 }
bellardf23db162005-08-21 19:12:28 +00003277 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003278 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003279 /* we remove the notdirty callback only if the code has been
3280 flushed */
3281 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003282 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003283}
3284
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003285static const MemoryRegionOps notdirty_mem_ops = {
3286 .read = error_mem_read,
3287 .write = notdirty_mem_write,
3288 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003289};
3290
pbrook0f459d12008-06-09 00:20:13 +00003291/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003292static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003293{
3294 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003295 target_ulong pc, cs_base;
3296 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003297 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003298 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003299 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003300
aliguori06d55cc2008-11-18 20:24:06 +00003301 if (env->watchpoint_hit) {
3302 /* We re-entered the check after replacing the TB. Now raise
3303 * the debug interrupt so that is will trigger after the
3304 * current instruction. */
3305 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3306 return;
3307 }
pbrook2e70f6e2008-06-29 01:03:05 +00003308 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003309 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003310 if ((vaddr == (wp->vaddr & len_mask) ||
3311 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003312 wp->flags |= BP_WATCHPOINT_HIT;
3313 if (!env->watchpoint_hit) {
3314 env->watchpoint_hit = wp;
3315 tb = tb_find_pc(env->mem_io_pc);
3316 if (!tb) {
3317 cpu_abort(env, "check_watchpoint: could not find TB for "
3318 "pc=%p", (void *)env->mem_io_pc);
3319 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003320 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003321 tb_phys_invalidate(tb, -1);
3322 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3323 env->exception_index = EXCP_DEBUG;
3324 } else {
3325 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3326 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3327 }
3328 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003329 }
aliguori6e140f22008-11-18 20:37:55 +00003330 } else {
3331 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003332 }
3333 }
3334}
3335
pbrook6658ffb2007-03-16 23:58:11 +00003336/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3337 so these check for a hit then pass through to the normal out-of-line
3338 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003339static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3340 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003341{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003342 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3343 switch (size) {
3344 case 1: return ldub_phys(addr);
3345 case 2: return lduw_phys(addr);
3346 case 4: return ldl_phys(addr);
3347 default: abort();
3348 }
pbrook6658ffb2007-03-16 23:58:11 +00003349}
3350
Avi Kivity1ec9b902012-01-02 12:47:48 +02003351static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3352 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003353{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003354 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3355 switch (size) {
3356 case 1: stb_phys(addr, val);
3357 case 2: stw_phys(addr, val);
3358 case 4: stl_phys(addr, val);
3359 default: abort();
3360 }
pbrook6658ffb2007-03-16 23:58:11 +00003361}
3362
Avi Kivity1ec9b902012-01-02 12:47:48 +02003363static const MemoryRegionOps watch_mem_ops = {
3364 .read = watch_mem_read,
3365 .write = watch_mem_write,
3366 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003367};
pbrook6658ffb2007-03-16 23:58:11 +00003368
Avi Kivity70c68e42012-01-02 12:32:48 +02003369static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3370 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003371{
Avi Kivity70c68e42012-01-02 12:32:48 +02003372 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003373 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003374 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003375#if defined(DEBUG_SUBPAGE)
3376 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3377 mmio, len, addr, idx);
3378#endif
blueswir1db7b5422007-05-26 17:36:03 +00003379
Avi Kivity5312bd82012-02-12 18:32:55 +02003380 section = &phys_sections[mmio->sub_section[idx]];
3381 addr += mmio->base;
3382 addr -= section->offset_within_address_space;
3383 addr += section->offset_within_region;
3384 return io_mem_read(section->mr->ram_addr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003385}
3386
Avi Kivity70c68e42012-01-02 12:32:48 +02003387static void subpage_write(void *opaque, target_phys_addr_t addr,
3388 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003389{
Avi Kivity70c68e42012-01-02 12:32:48 +02003390 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003391 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003392 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003393#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003394 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3395 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003396 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003397#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003398
Avi Kivity5312bd82012-02-12 18:32:55 +02003399 section = &phys_sections[mmio->sub_section[idx]];
3400 addr += mmio->base;
3401 addr -= section->offset_within_address_space;
3402 addr += section->offset_within_region;
3403 io_mem_write(section->mr->ram_addr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003404}
3405
Avi Kivity70c68e42012-01-02 12:32:48 +02003406static const MemoryRegionOps subpage_ops = {
3407 .read = subpage_read,
3408 .write = subpage_write,
3409 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003410};
3411
Avi Kivityde712f92012-01-02 12:41:07 +02003412static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3413 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003414{
3415 ram_addr_t raddr = addr;
3416 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003417 switch (size) {
3418 case 1: return ldub_p(ptr);
3419 case 2: return lduw_p(ptr);
3420 case 4: return ldl_p(ptr);
3421 default: abort();
3422 }
Andreas Färber56384e82011-11-30 16:26:21 +01003423}
3424
Avi Kivityde712f92012-01-02 12:41:07 +02003425static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3426 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003427{
3428 ram_addr_t raddr = addr;
3429 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003430 switch (size) {
3431 case 1: return stb_p(ptr, value);
3432 case 2: return stw_p(ptr, value);
3433 case 4: return stl_p(ptr, value);
3434 default: abort();
3435 }
Andreas Färber56384e82011-11-30 16:26:21 +01003436}
3437
Avi Kivityde712f92012-01-02 12:41:07 +02003438static const MemoryRegionOps subpage_ram_ops = {
3439 .read = subpage_ram_read,
3440 .write = subpage_ram_write,
3441 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003442};
3443
Anthony Liguoric227f092009-10-01 16:12:16 -05003444static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003445 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003446{
3447 int idx, eidx;
3448
3449 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3450 return -1;
3451 idx = SUBPAGE_IDX(start);
3452 eidx = SUBPAGE_IDX(end);
3453#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003454 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003455 mmio, start, end, idx, eidx, memory);
3456#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003457 if (memory_region_is_ram(phys_sections[section].mr)) {
3458 MemoryRegionSection new_section = phys_sections[section];
3459 new_section.mr = &io_mem_subpage_ram;
3460 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003461 }
blueswir1db7b5422007-05-26 17:36:03 +00003462 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003463 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003464 }
3465
3466 return 0;
3467}
3468
Avi Kivity5312bd82012-02-12 18:32:55 +02003469static subpage_t *subpage_init (target_phys_addr_t base, uint16_t *section_ind,
3470 uint16_t orig_section)
blueswir1db7b5422007-05-26 17:36:03 +00003471{
Anthony Liguoric227f092009-10-01 16:12:16 -05003472 subpage_t *mmio;
Avi Kivity5312bd82012-02-12 18:32:55 +02003473 MemoryRegionSection section = {
3474 .offset_within_address_space = base,
3475 .size = TARGET_PAGE_SIZE,
3476 };
blueswir1db7b5422007-05-26 17:36:03 +00003477
Anthony Liguori7267c092011-08-20 22:09:37 -05003478 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003479
3480 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003481 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3482 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003483 mmio->iomem.subpage = true;
Avi Kivity5312bd82012-02-12 18:32:55 +02003484 section.mr = &mmio->iomem;
blueswir1db7b5422007-05-26 17:36:03 +00003485#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003486 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3487 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003488#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003489 *section_ind = phys_section_add(&section);
3490 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_section);
blueswir1db7b5422007-05-26 17:36:03 +00003491
3492 return mmio;
3493}
3494
aliguori88715652009-02-11 15:20:58 +00003495static int get_free_io_mem_idx(void)
3496{
3497 int i;
3498
3499 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3500 if (!io_mem_used[i]) {
3501 io_mem_used[i] = 1;
3502 return i;
3503 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003504 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003505 return -1;
3506}
3507
bellard33417e72003-08-10 21:47:01 +00003508/* mem_read and mem_write are arrays of functions containing the
3509 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003510 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003511 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003512 modified. If it is zero, a new io zone is allocated. The return
3513 value can be used with cpu_register_physical_memory(). (-1) is
3514 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003515static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003516{
bellard33417e72003-08-10 21:47:01 +00003517 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003518 io_index = get_free_io_mem_idx();
3519 if (io_index == -1)
3520 return io_index;
bellard33417e72003-08-10 21:47:01 +00003521 } else {
3522 if (io_index >= IO_MEM_NB_ENTRIES)
3523 return -1;
3524 }
bellardb5ff1b32005-11-26 10:38:39 +00003525
Avi Kivitya621f382012-01-02 13:12:08 +02003526 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003527
Avi Kivity11c7ef02012-01-02 17:21:07 +02003528 return io_index;
bellard33417e72003-08-10 21:47:01 +00003529}
bellard61382a52003-10-27 21:22:23 +00003530
Avi Kivitya621f382012-01-02 13:12:08 +02003531int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003532{
Avi Kivitya621f382012-01-02 13:12:08 +02003533 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003534}
3535
Avi Kivity11c7ef02012-01-02 17:21:07 +02003536void cpu_unregister_io_memory(int io_index)
aliguori88715652009-02-11 15:20:58 +00003537{
Avi Kivitya621f382012-01-02 13:12:08 +02003538 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003539 io_mem_used[io_index] = 0;
3540}
3541
Avi Kivity5312bd82012-02-12 18:32:55 +02003542static uint16_t dummy_section(MemoryRegion *mr)
3543{
3544 MemoryRegionSection section = {
3545 .mr = mr,
3546 .offset_within_address_space = 0,
3547 .offset_within_region = 0,
3548 .size = UINT64_MAX,
3549 };
3550
3551 return phys_section_add(&section);
3552}
3553
Avi Kivitye9179ce2009-06-14 11:38:52 +03003554static void io_mem_init(void)
3555{
3556 int i;
3557
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003558 /* Must be first: */
3559 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3560 assert(io_mem_ram.ram_addr == 0);
3561 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3562 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3563 "unassigned", UINT64_MAX);
3564 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3565 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003566 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3567 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003568 for (i=0; i<5; i++)
3569 io_mem_used[i] = 1;
3570
Avi Kivity1ec9b902012-01-02 12:47:48 +02003571 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3572 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003573}
3574
Avi Kivity50c1e142012-02-08 21:36:02 +02003575static void core_begin(MemoryListener *listener)
3576{
Avi Kivity54688b12012-02-09 17:34:32 +02003577 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003578 phys_sections_clear();
Avi Kivityd6f2ea22012-02-12 20:12:49 +02003579 phys_map.u.node = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003580 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivity50c1e142012-02-08 21:36:02 +02003581}
3582
3583static void core_commit(MemoryListener *listener)
3584{
3585}
3586
Avi Kivity93632742012-02-08 16:54:16 +02003587static void core_region_add(MemoryListener *listener,
3588 MemoryRegionSection *section)
3589{
Avi Kivity4855d412012-02-08 21:16:05 +02003590 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003591}
3592
3593static void core_region_del(MemoryListener *listener,
3594 MemoryRegionSection *section)
3595{
Avi Kivity93632742012-02-08 16:54:16 +02003596}
3597
Avi Kivity50c1e142012-02-08 21:36:02 +02003598static void core_region_nop(MemoryListener *listener,
3599 MemoryRegionSection *section)
3600{
Avi Kivity54688b12012-02-09 17:34:32 +02003601 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003602}
3603
Avi Kivity93632742012-02-08 16:54:16 +02003604static void core_log_start(MemoryListener *listener,
3605 MemoryRegionSection *section)
3606{
3607}
3608
3609static void core_log_stop(MemoryListener *listener,
3610 MemoryRegionSection *section)
3611{
3612}
3613
3614static void core_log_sync(MemoryListener *listener,
3615 MemoryRegionSection *section)
3616{
3617}
3618
3619static void core_log_global_start(MemoryListener *listener)
3620{
3621 cpu_physical_memory_set_dirty_tracking(1);
3622}
3623
3624static void core_log_global_stop(MemoryListener *listener)
3625{
3626 cpu_physical_memory_set_dirty_tracking(0);
3627}
3628
3629static void core_eventfd_add(MemoryListener *listener,
3630 MemoryRegionSection *section,
3631 bool match_data, uint64_t data, int fd)
3632{
3633}
3634
3635static void core_eventfd_del(MemoryListener *listener,
3636 MemoryRegionSection *section,
3637 bool match_data, uint64_t data, int fd)
3638{
3639}
3640
Avi Kivity50c1e142012-02-08 21:36:02 +02003641static void io_begin(MemoryListener *listener)
3642{
3643}
3644
3645static void io_commit(MemoryListener *listener)
3646{
3647}
3648
Avi Kivity4855d412012-02-08 21:16:05 +02003649static void io_region_add(MemoryListener *listener,
3650 MemoryRegionSection *section)
3651{
3652 iorange_init(&section->mr->iorange, &memory_region_iorange_ops,
3653 section->offset_within_address_space, section->size);
3654 ioport_register(&section->mr->iorange);
3655}
3656
3657static void io_region_del(MemoryListener *listener,
3658 MemoryRegionSection *section)
3659{
3660 isa_unassign_ioport(section->offset_within_address_space, section->size);
3661}
3662
Avi Kivity50c1e142012-02-08 21:36:02 +02003663static void io_region_nop(MemoryListener *listener,
3664 MemoryRegionSection *section)
3665{
3666}
3667
Avi Kivity4855d412012-02-08 21:16:05 +02003668static void io_log_start(MemoryListener *listener,
3669 MemoryRegionSection *section)
3670{
3671}
3672
3673static void io_log_stop(MemoryListener *listener,
3674 MemoryRegionSection *section)
3675{
3676}
3677
3678static void io_log_sync(MemoryListener *listener,
3679 MemoryRegionSection *section)
3680{
3681}
3682
3683static void io_log_global_start(MemoryListener *listener)
3684{
3685}
3686
3687static void io_log_global_stop(MemoryListener *listener)
3688{
3689}
3690
3691static void io_eventfd_add(MemoryListener *listener,
3692 MemoryRegionSection *section,
3693 bool match_data, uint64_t data, int fd)
3694{
3695}
3696
3697static void io_eventfd_del(MemoryListener *listener,
3698 MemoryRegionSection *section,
3699 bool match_data, uint64_t data, int fd)
3700{
3701}
3702
Avi Kivity93632742012-02-08 16:54:16 +02003703static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003704 .begin = core_begin,
3705 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003706 .region_add = core_region_add,
3707 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003708 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003709 .log_start = core_log_start,
3710 .log_stop = core_log_stop,
3711 .log_sync = core_log_sync,
3712 .log_global_start = core_log_global_start,
3713 .log_global_stop = core_log_global_stop,
3714 .eventfd_add = core_eventfd_add,
3715 .eventfd_del = core_eventfd_del,
3716 .priority = 0,
3717};
3718
Avi Kivity4855d412012-02-08 21:16:05 +02003719static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003720 .begin = io_begin,
3721 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003722 .region_add = io_region_add,
3723 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003724 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003725 .log_start = io_log_start,
3726 .log_stop = io_log_stop,
3727 .log_sync = io_log_sync,
3728 .log_global_start = io_log_global_start,
3729 .log_global_stop = io_log_global_stop,
3730 .eventfd_add = io_eventfd_add,
3731 .eventfd_del = io_eventfd_del,
3732 .priority = 0,
3733};
3734
Avi Kivity62152b82011-07-26 14:26:14 +03003735static void memory_map_init(void)
3736{
Anthony Liguori7267c092011-08-20 22:09:37 -05003737 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003738 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003739 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003740
Anthony Liguori7267c092011-08-20 22:09:37 -05003741 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003742 memory_region_init(system_io, "io", 65536);
3743 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003744
Avi Kivity4855d412012-02-08 21:16:05 +02003745 memory_listener_register(&core_memory_listener, system_memory);
3746 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003747}
3748
3749MemoryRegion *get_system_memory(void)
3750{
3751 return system_memory;
3752}
3753
Avi Kivity309cb472011-08-08 16:09:03 +03003754MemoryRegion *get_system_io(void)
3755{
3756 return system_io;
3757}
3758
pbrooke2eef172008-06-08 01:09:01 +00003759#endif /* !defined(CONFIG_USER_ONLY) */
3760
bellard13eb76e2004-01-24 15:23:36 +00003761/* physical memory access (slow version, mainly for debug) */
3762#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003763int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3764 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003765{
3766 int l, flags;
3767 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003768 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003769
3770 while (len > 0) {
3771 page = addr & TARGET_PAGE_MASK;
3772 l = (page + TARGET_PAGE_SIZE) - addr;
3773 if (l > len)
3774 l = len;
3775 flags = page_get_flags(page);
3776 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003777 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003778 if (is_write) {
3779 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003780 return -1;
bellard579a97f2007-11-11 14:26:47 +00003781 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003782 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003783 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003784 memcpy(p, buf, l);
3785 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003786 } else {
3787 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003788 return -1;
bellard579a97f2007-11-11 14:26:47 +00003789 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003790 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003791 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003792 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003793 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003794 }
3795 len -= l;
3796 buf += l;
3797 addr += l;
3798 }
Paul Brooka68fe892010-03-01 00:08:59 +00003799 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003800}
bellard8df1cd02005-01-28 22:37:22 +00003801
bellard13eb76e2004-01-24 15:23:36 +00003802#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003803void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003804 int len, int is_write)
3805{
3806 int l, io_index;
3807 uint8_t *ptr;
3808 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003809 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003810 ram_addr_t pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003811 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003812
bellard13eb76e2004-01-24 15:23:36 +00003813 while (len > 0) {
3814 page = addr & TARGET_PAGE_MASK;
3815 l = (page + TARGET_PAGE_SIZE) - addr;
3816 if (l > len)
3817 l = len;
bellard92e873b2004-05-21 14:52:29 +00003818 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003819 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003820
bellard13eb76e2004-01-24 15:23:36 +00003821 if (is_write) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003822 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003823 target_phys_addr_t addr1;
Avi Kivity11c7ef02012-01-02 17:21:07 +02003824 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003825 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bellard6a00d602005-11-21 23:25:50 +00003826 /* XXX: could force cpu_single_env to NULL to avoid
3827 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003828 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003829 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003830 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003831 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003832 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003833 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003834 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003835 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003836 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003837 l = 2;
3838 } else {
bellard1c213d12005-09-03 10:49:04 +00003839 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003840 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003841 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003842 l = 1;
3843 }
3844 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003845 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003846 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003847 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003848 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003849 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003850 if (!cpu_physical_memory_is_dirty(addr1)) {
3851 /* invalidate code */
3852 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3853 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003854 cpu_physical_memory_set_dirty_flags(
3855 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003856 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003857 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003858 }
3859 } else {
Avi Kivity1d393fa2012-01-01 21:15:42 +02003860 if (!is_ram_rom_romd(pd)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003861 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003862 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02003863 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003864 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
aurel326c2934d2009-02-18 21:37:17 +00003865 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003866 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003867 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003868 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003869 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003870 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003871 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003872 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003873 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003874 l = 2;
3875 } else {
bellard1c213d12005-09-03 10:49:04 +00003876 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003877 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003878 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003879 l = 1;
3880 }
3881 } else {
3882 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003883 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3884 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3885 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003886 }
3887 }
3888 len -= l;
3889 buf += l;
3890 addr += l;
3891 }
3892}
bellard8df1cd02005-01-28 22:37:22 +00003893
bellardd0ecd2a2006-04-23 17:14:48 +00003894/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003895void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003896 const uint8_t *buf, int len)
3897{
3898 int l;
3899 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003900 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003901 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003902 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003903
bellardd0ecd2a2006-04-23 17:14:48 +00003904 while (len > 0) {
3905 page = addr & TARGET_PAGE_MASK;
3906 l = (page + TARGET_PAGE_SIZE) - addr;
3907 if (l > len)
3908 l = len;
3909 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003910 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003911
Avi Kivity1d393fa2012-01-01 21:15:42 +02003912 if (!is_ram_rom_romd(pd)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003913 /* do nothing */
3914 } else {
3915 unsigned long addr1;
3916 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3917 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003918 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003919 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003920 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003921 }
3922 len -= l;
3923 buf += l;
3924 addr += l;
3925 }
3926}
3927
aliguori6d16c2f2009-01-22 16:59:11 +00003928typedef struct {
3929 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003930 target_phys_addr_t addr;
3931 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003932} BounceBuffer;
3933
3934static BounceBuffer bounce;
3935
aliguoriba223c22009-01-22 16:59:16 +00003936typedef struct MapClient {
3937 void *opaque;
3938 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003939 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003940} MapClient;
3941
Blue Swirl72cf2d42009-09-12 07:36:22 +00003942static QLIST_HEAD(map_client_list, MapClient) map_client_list
3943 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003944
3945void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3946{
Anthony Liguori7267c092011-08-20 22:09:37 -05003947 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003948
3949 client->opaque = opaque;
3950 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003951 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003952 return client;
3953}
3954
3955void cpu_unregister_map_client(void *_client)
3956{
3957 MapClient *client = (MapClient *)_client;
3958
Blue Swirl72cf2d42009-09-12 07:36:22 +00003959 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003960 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003961}
3962
3963static void cpu_notify_map_clients(void)
3964{
3965 MapClient *client;
3966
Blue Swirl72cf2d42009-09-12 07:36:22 +00003967 while (!QLIST_EMPTY(&map_client_list)) {
3968 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003969 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003970 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003971 }
3972}
3973
aliguori6d16c2f2009-01-22 16:59:11 +00003974/* Map a physical memory region into a host virtual address.
3975 * May map a subset of the requested range, given by and returned in *plen.
3976 * May return NULL if resources needed to perform the mapping are exhausted.
3977 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003978 * Use cpu_register_map_client() to know when retrying the map operation is
3979 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003980 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003981void *cpu_physical_memory_map(target_phys_addr_t addr,
3982 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003983 int is_write)
3984{
Anthony Liguoric227f092009-10-01 16:12:16 -05003985 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003986 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003987 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003988 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003989 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003990 PhysPageDesc p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003991 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003992 ram_addr_t rlen;
3993 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003994
3995 while (len > 0) {
3996 page = addr & TARGET_PAGE_MASK;
3997 l = (page + TARGET_PAGE_SIZE) - addr;
3998 if (l > len)
3999 l = len;
4000 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004001 pd = p.phys_offset;
aliguori6d16c2f2009-01-22 16:59:11 +00004002
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004003 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004004 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004005 break;
4006 }
4007 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4008 bounce.addr = addr;
4009 bounce.len = l;
4010 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004011 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004012 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004013
4014 *plen = l;
4015 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004016 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004017 if (!todo) {
4018 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4019 }
aliguori6d16c2f2009-01-22 16:59:11 +00004020
4021 len -= l;
4022 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004023 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004024 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004025 rlen = todo;
4026 ret = qemu_ram_ptr_length(raddr, &rlen);
4027 *plen = rlen;
4028 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004029}
4030
4031/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4032 * Will also mark the memory as dirty if is_write == 1. access_len gives
4033 * the amount of memory that was actually read or written by the caller.
4034 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004035void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4036 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004037{
4038 if (buffer != bounce.buffer) {
4039 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004040 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004041 while (access_len) {
4042 unsigned l;
4043 l = TARGET_PAGE_SIZE;
4044 if (l > access_len)
4045 l = access_len;
4046 if (!cpu_physical_memory_is_dirty(addr1)) {
4047 /* invalidate code */
4048 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4049 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004050 cpu_physical_memory_set_dirty_flags(
4051 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004052 }
4053 addr1 += l;
4054 access_len -= l;
4055 }
4056 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004057 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004058 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004059 }
aliguori6d16c2f2009-01-22 16:59:11 +00004060 return;
4061 }
4062 if (is_write) {
4063 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4064 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004065 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004066 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004067 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004068}
bellardd0ecd2a2006-04-23 17:14:48 +00004069
bellard8df1cd02005-01-28 22:37:22 +00004070/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004071static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4072 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004073{
4074 int io_index;
4075 uint8_t *ptr;
4076 uint32_t val;
4077 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004078 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004079
4080 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004081 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004082
Avi Kivity1d393fa2012-01-01 21:15:42 +02004083 if (!is_ram_rom_romd(pd)) {
bellard8df1cd02005-01-28 22:37:22 +00004084 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004085 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004086 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004087 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004088#if defined(TARGET_WORDS_BIGENDIAN)
4089 if (endian == DEVICE_LITTLE_ENDIAN) {
4090 val = bswap32(val);
4091 }
4092#else
4093 if (endian == DEVICE_BIG_ENDIAN) {
4094 val = bswap32(val);
4095 }
4096#endif
bellard8df1cd02005-01-28 22:37:22 +00004097 } else {
4098 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004099 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004100 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004101 switch (endian) {
4102 case DEVICE_LITTLE_ENDIAN:
4103 val = ldl_le_p(ptr);
4104 break;
4105 case DEVICE_BIG_ENDIAN:
4106 val = ldl_be_p(ptr);
4107 break;
4108 default:
4109 val = ldl_p(ptr);
4110 break;
4111 }
bellard8df1cd02005-01-28 22:37:22 +00004112 }
4113 return val;
4114}
4115
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004116uint32_t ldl_phys(target_phys_addr_t addr)
4117{
4118 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4119}
4120
4121uint32_t ldl_le_phys(target_phys_addr_t addr)
4122{
4123 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4124}
4125
4126uint32_t ldl_be_phys(target_phys_addr_t addr)
4127{
4128 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4129}
4130
bellard84b7b8e2005-11-28 21:19:04 +00004131/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004132static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4133 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004134{
4135 int io_index;
4136 uint8_t *ptr;
4137 uint64_t val;
4138 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004139 PhysPageDesc p;
bellard84b7b8e2005-11-28 21:19:04 +00004140
4141 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004142 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004143
Avi Kivity1d393fa2012-01-01 21:15:42 +02004144 if (!is_ram_rom_romd(pd)) {
bellard84b7b8e2005-11-28 21:19:04 +00004145 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004146 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004147 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004148
4149 /* XXX This is broken when device endian != cpu endian.
4150 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004151#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004152 val = io_mem_read(io_index, addr, 4) << 32;
4153 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004154#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004155 val = io_mem_read(io_index, addr, 4);
4156 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004157#endif
4158 } else {
4159 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004160 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004161 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004162 switch (endian) {
4163 case DEVICE_LITTLE_ENDIAN:
4164 val = ldq_le_p(ptr);
4165 break;
4166 case DEVICE_BIG_ENDIAN:
4167 val = ldq_be_p(ptr);
4168 break;
4169 default:
4170 val = ldq_p(ptr);
4171 break;
4172 }
bellard84b7b8e2005-11-28 21:19:04 +00004173 }
4174 return val;
4175}
4176
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004177uint64_t ldq_phys(target_phys_addr_t addr)
4178{
4179 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4180}
4181
4182uint64_t ldq_le_phys(target_phys_addr_t addr)
4183{
4184 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4185}
4186
4187uint64_t ldq_be_phys(target_phys_addr_t addr)
4188{
4189 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4190}
4191
bellardaab33092005-10-30 20:48:42 +00004192/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004193uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004194{
4195 uint8_t val;
4196 cpu_physical_memory_read(addr, &val, 1);
4197 return val;
4198}
4199
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004200/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004201static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4202 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004203{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004204 int io_index;
4205 uint8_t *ptr;
4206 uint64_t val;
4207 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004208 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004209
4210 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004211 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004212
Avi Kivity1d393fa2012-01-01 21:15:42 +02004213 if (!is_ram_rom_romd(pd)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004214 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004215 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004216 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004217 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004218#if defined(TARGET_WORDS_BIGENDIAN)
4219 if (endian == DEVICE_LITTLE_ENDIAN) {
4220 val = bswap16(val);
4221 }
4222#else
4223 if (endian == DEVICE_BIG_ENDIAN) {
4224 val = bswap16(val);
4225 }
4226#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004227 } else {
4228 /* RAM case */
4229 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4230 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004231 switch (endian) {
4232 case DEVICE_LITTLE_ENDIAN:
4233 val = lduw_le_p(ptr);
4234 break;
4235 case DEVICE_BIG_ENDIAN:
4236 val = lduw_be_p(ptr);
4237 break;
4238 default:
4239 val = lduw_p(ptr);
4240 break;
4241 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004242 }
4243 return val;
bellardaab33092005-10-30 20:48:42 +00004244}
4245
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004246uint32_t lduw_phys(target_phys_addr_t addr)
4247{
4248 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4249}
4250
4251uint32_t lduw_le_phys(target_phys_addr_t addr)
4252{
4253 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4254}
4255
4256uint32_t lduw_be_phys(target_phys_addr_t addr)
4257{
4258 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4259}
4260
bellard8df1cd02005-01-28 22:37:22 +00004261/* warning: addr must be aligned. The ram page is not masked as dirty
4262 and the code inside is not invalidated. It is useful if the dirty
4263 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004264void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004265{
4266 int io_index;
4267 uint8_t *ptr;
4268 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004269 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004270
4271 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004272 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004273
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004274 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004275 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004276 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004277 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004278 } else {
aliguori74576192008-10-06 14:02:03 +00004279 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004280 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004281 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004282
4283 if (unlikely(in_migration)) {
4284 if (!cpu_physical_memory_is_dirty(addr1)) {
4285 /* invalidate code */
4286 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4287 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004288 cpu_physical_memory_set_dirty_flags(
4289 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004290 }
4291 }
bellard8df1cd02005-01-28 22:37:22 +00004292 }
4293}
4294
Anthony Liguoric227f092009-10-01 16:12:16 -05004295void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004296{
4297 int io_index;
4298 uint8_t *ptr;
4299 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004300 PhysPageDesc p;
j_mayerbc98a7e2007-04-04 07:55:12 +00004301
4302 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004303 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004304
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004305 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004306 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004307 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004308#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004309 io_mem_write(io_index, addr, val >> 32, 4);
4310 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004311#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004312 io_mem_write(io_index, addr, (uint32_t)val, 4);
4313 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004314#endif
4315 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004316 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004317 (addr & ~TARGET_PAGE_MASK);
4318 stq_p(ptr, val);
4319 }
4320}
4321
bellard8df1cd02005-01-28 22:37:22 +00004322/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004323static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4324 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004325{
4326 int io_index;
4327 uint8_t *ptr;
4328 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004329 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004330
4331 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004332 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004333
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004334 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004335 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004336 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004337#if defined(TARGET_WORDS_BIGENDIAN)
4338 if (endian == DEVICE_LITTLE_ENDIAN) {
4339 val = bswap32(val);
4340 }
4341#else
4342 if (endian == DEVICE_BIG_ENDIAN) {
4343 val = bswap32(val);
4344 }
4345#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004346 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004347 } else {
4348 unsigned long addr1;
4349 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4350 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004351 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004352 switch (endian) {
4353 case DEVICE_LITTLE_ENDIAN:
4354 stl_le_p(ptr, val);
4355 break;
4356 case DEVICE_BIG_ENDIAN:
4357 stl_be_p(ptr, val);
4358 break;
4359 default:
4360 stl_p(ptr, val);
4361 break;
4362 }
bellard3a7d9292005-08-21 09:26:42 +00004363 if (!cpu_physical_memory_is_dirty(addr1)) {
4364 /* invalidate code */
4365 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4366 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004367 cpu_physical_memory_set_dirty_flags(addr1,
4368 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004369 }
bellard8df1cd02005-01-28 22:37:22 +00004370 }
4371}
4372
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004373void stl_phys(target_phys_addr_t addr, uint32_t val)
4374{
4375 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4376}
4377
4378void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4379{
4380 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4381}
4382
4383void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4384{
4385 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4386}
4387
bellardaab33092005-10-30 20:48:42 +00004388/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004389void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004390{
4391 uint8_t v = val;
4392 cpu_physical_memory_write(addr, &v, 1);
4393}
4394
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004395/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004396static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4397 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004398{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004399 int io_index;
4400 uint8_t *ptr;
4401 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004402 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004403
4404 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004405 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004406
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004407 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004408 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004409 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004410#if defined(TARGET_WORDS_BIGENDIAN)
4411 if (endian == DEVICE_LITTLE_ENDIAN) {
4412 val = bswap16(val);
4413 }
4414#else
4415 if (endian == DEVICE_BIG_ENDIAN) {
4416 val = bswap16(val);
4417 }
4418#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004419 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004420 } else {
4421 unsigned long addr1;
4422 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4423 /* RAM case */
4424 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004425 switch (endian) {
4426 case DEVICE_LITTLE_ENDIAN:
4427 stw_le_p(ptr, val);
4428 break;
4429 case DEVICE_BIG_ENDIAN:
4430 stw_be_p(ptr, val);
4431 break;
4432 default:
4433 stw_p(ptr, val);
4434 break;
4435 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004436 if (!cpu_physical_memory_is_dirty(addr1)) {
4437 /* invalidate code */
4438 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4439 /* set dirty bit */
4440 cpu_physical_memory_set_dirty_flags(addr1,
4441 (0xff & ~CODE_DIRTY_FLAG));
4442 }
4443 }
bellardaab33092005-10-30 20:48:42 +00004444}
4445
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004446void stw_phys(target_phys_addr_t addr, uint32_t val)
4447{
4448 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4449}
4450
4451void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4452{
4453 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4454}
4455
4456void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4457{
4458 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4459}
4460
bellardaab33092005-10-30 20:48:42 +00004461/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004462void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004463{
4464 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004465 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004466}
4467
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004468void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4469{
4470 val = cpu_to_le64(val);
4471 cpu_physical_memory_write(addr, &val, 8);
4472}
4473
4474void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4475{
4476 val = cpu_to_be64(val);
4477 cpu_physical_memory_write(addr, &val, 8);
4478}
4479
aliguori5e2972f2009-03-28 17:51:36 +00004480/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004481int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004482 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004483{
4484 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004485 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004486 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004487
4488 while (len > 0) {
4489 page = addr & TARGET_PAGE_MASK;
4490 phys_addr = cpu_get_phys_page_debug(env, page);
4491 /* if no physical page mapped, return an error */
4492 if (phys_addr == -1)
4493 return -1;
4494 l = (page + TARGET_PAGE_SIZE) - addr;
4495 if (l > len)
4496 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004497 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004498 if (is_write)
4499 cpu_physical_memory_write_rom(phys_addr, buf, l);
4500 else
aliguori5e2972f2009-03-28 17:51:36 +00004501 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004502 len -= l;
4503 buf += l;
4504 addr += l;
4505 }
4506 return 0;
4507}
Paul Brooka68fe892010-03-01 00:08:59 +00004508#endif
bellard13eb76e2004-01-24 15:23:36 +00004509
pbrook2e70f6e2008-06-29 01:03:05 +00004510/* in deterministic execution mode, instructions doing device I/Os
4511 must be at the end of the TB */
4512void cpu_io_recompile(CPUState *env, void *retaddr)
4513{
4514 TranslationBlock *tb;
4515 uint32_t n, cflags;
4516 target_ulong pc, cs_base;
4517 uint64_t flags;
4518
4519 tb = tb_find_pc((unsigned long)retaddr);
4520 if (!tb) {
4521 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4522 retaddr);
4523 }
4524 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004525 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004526 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004527 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004528 n = n - env->icount_decr.u16.low;
4529 /* Generate a new TB ending on the I/O insn. */
4530 n++;
4531 /* On MIPS and SH, delay slot instructions can only be restarted if
4532 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004533 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004534 branch. */
4535#if defined(TARGET_MIPS)
4536 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4537 env->active_tc.PC -= 4;
4538 env->icount_decr.u16.low++;
4539 env->hflags &= ~MIPS_HFLAG_BMASK;
4540 }
4541#elif defined(TARGET_SH4)
4542 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4543 && n > 1) {
4544 env->pc -= 2;
4545 env->icount_decr.u16.low++;
4546 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4547 }
4548#endif
4549 /* This should never happen. */
4550 if (n > CF_COUNT_MASK)
4551 cpu_abort(env, "TB too big during recompile");
4552
4553 cflags = n | CF_LAST_IO;
4554 pc = tb->pc;
4555 cs_base = tb->cs_base;
4556 flags = tb->flags;
4557 tb_phys_invalidate(tb, -1);
4558 /* FIXME: In theory this could raise an exception. In practice
4559 we have already translated the block once so it's probably ok. */
4560 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004561 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004562 the first in the TB) then we end up generating a whole new TB and
4563 repeating the fault, which is horribly inefficient.
4564 Better would be to execute just this insn uncached, or generate a
4565 second new TB. */
4566 cpu_resume_from_signal(env, NULL);
4567}
4568
Paul Brookb3755a92010-03-12 16:54:58 +00004569#if !defined(CONFIG_USER_ONLY)
4570
Stefan Weil055403b2010-10-22 23:03:32 +02004571void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004572{
4573 int i, target_code_size, max_target_code_size;
4574 int direct_jmp_count, direct_jmp2_count, cross_page;
4575 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004576
bellarde3db7222005-01-26 22:00:47 +00004577 target_code_size = 0;
4578 max_target_code_size = 0;
4579 cross_page = 0;
4580 direct_jmp_count = 0;
4581 direct_jmp2_count = 0;
4582 for(i = 0; i < nb_tbs; i++) {
4583 tb = &tbs[i];
4584 target_code_size += tb->size;
4585 if (tb->size > max_target_code_size)
4586 max_target_code_size = tb->size;
4587 if (tb->page_addr[1] != -1)
4588 cross_page++;
4589 if (tb->tb_next_offset[0] != 0xffff) {
4590 direct_jmp_count++;
4591 if (tb->tb_next_offset[1] != 0xffff) {
4592 direct_jmp2_count++;
4593 }
4594 }
4595 }
4596 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004597 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004598 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004599 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4600 cpu_fprintf(f, "TB count %d/%d\n",
4601 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004602 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004603 nb_tbs ? target_code_size / nb_tbs : 0,
4604 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004605 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004606 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4607 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004608 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4609 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004610 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4611 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004612 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004613 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4614 direct_jmp2_count,
4615 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004616 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004617 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4618 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4619 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004620 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004621}
4622
Avi Kivityd39e8222012-01-01 23:35:10 +02004623/* NOTE: this function can trigger an exception */
4624/* NOTE2: the returned address is not exactly the physical address: it
4625 is the offset relative to phys_ram_base */
4626tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4627{
4628 int mmu_idx, page_index, pd;
4629 void *p;
4630
4631 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4632 mmu_idx = cpu_mmu_index(env1);
4633 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4634 (addr & TARGET_PAGE_MASK))) {
4635 ldub_code(addr);
4636 }
4637 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004638 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
Avi Kivity75c578d2012-01-02 15:40:52 +02004639 && !is_romd(pd)) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004640#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4641 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4642#else
4643 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4644#endif
4645 }
4646 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4647 return qemu_ram_addr_from_host_nofail(p);
4648}
4649
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004650/*
4651 * A helper function for the _utterly broken_ virtio device model to find out if
4652 * it's running on a big endian machine. Don't do this at home kids!
4653 */
4654bool virtio_is_big_endian(void);
4655bool virtio_is_big_endian(void)
4656{
4657#if defined(TARGET_WORDS_BIGENDIAN)
4658 return true;
4659#else
4660 return false;
4661#endif
4662}
4663
bellard61382a52003-10-27 21:22:23 +00004664#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004665#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004666#define GETPC() NULL
4667#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004668#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004669
4670#define SHIFT 0
4671#include "softmmu_template.h"
4672
4673#define SHIFT 1
4674#include "softmmu_template.h"
4675
4676#define SHIFT 2
4677#include "softmmu_template.h"
4678
4679#define SHIFT 3
4680#include "softmmu_template.h"
4681
4682#undef env
4683
4684#endif