blob: 8916825688ab48634ee5c4d71bbcf9056c250521 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800163/* The bits remaining after N lower levels of page tables. */
164#define P_L1_BITS_REM \
165 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
169/* Size of the L1 page table. Avoid silly small sizes. */
170#if P_L1_BITS_REM < 4
171#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
172#else
173#define P_L1_BITS P_L1_BITS_REM
174#endif
175
176#if V_L1_BITS_REM < 4
177#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
178#else
179#define V_L1_BITS V_L1_BITS_REM
180#endif
181
182#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
183#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
184
185#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
186#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
187
bellard83fb7ad2004-07-05 21:25:26 +0000188unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000189unsigned long qemu_host_page_size;
190unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000191
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800192/* This is a multi-level map on the virtual address space.
193 The bottom level has pointers to PageDesc. */
194static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000195
pbrooke2eef172008-06-08 01:09:01 +0000196#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000197typedef struct PhysPageDesc {
198 /* offset in host memory of the page + io_index in the low bits */
199 ram_addr_t phys_offset;
200 ram_addr_t region_offset;
201} PhysPageDesc;
202
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800203/* This is a multi-level map on the physical address space.
204 The bottom level has pointers to PhysPageDesc. */
205static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000206
pbrooke2eef172008-06-08 01:09:01 +0000207static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300208static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000209
bellard33417e72003-08-10 21:47:01 +0000210/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200211MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000212static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200213static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000214#endif
bellard33417e72003-08-10 21:47:01 +0000215
bellard34865132003-10-05 14:28:56 +0000216/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200217#ifdef WIN32
218static const char *logfilename = "qemu.log";
219#else
blueswir1d9b630f2008-10-05 09:57:08 +0000220static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200221#endif
bellard34865132003-10-05 14:28:56 +0000222FILE *logfile;
223int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000224static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000225
bellarde3db7222005-01-26 22:00:47 +0000226/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000227#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000228static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000229#endif
bellarde3db7222005-01-26 22:00:47 +0000230static int tb_flush_count;
231static int tb_phys_invalidate_count;
232
bellard7cb69ca2008-05-10 10:55:51 +0000233#ifdef _WIN32
234static void map_exec(void *addr, long size)
235{
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
239
240}
241#else
242static void map_exec(void *addr, long size)
243{
bellard43694152008-05-29 09:35:57 +0000244 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000245
bellard43694152008-05-29 09:35:57 +0000246 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000247 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000248 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000249
250 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000251 end += page_size - 1;
252 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000253
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
256}
257#endif
258
bellardb346ff42003-06-15 20:05:50 +0000259static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000260{
bellard83fb7ad2004-07-05 21:25:26 +0000261 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000262 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000263#ifdef _WIN32
264 {
265 SYSTEM_INFO system_info;
266
267 GetSystemInfo(&system_info);
268 qemu_real_host_page_size = system_info.dwPageSize;
269 }
270#else
271 qemu_real_host_page_size = getpagesize();
272#endif
bellard83fb7ad2004-07-05 21:25:26 +0000273 if (qemu_host_page_size == 0)
274 qemu_host_page_size = qemu_real_host_page_size;
275 if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000277 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000278
Paul Brook2e9a5712010-05-05 16:32:59 +0100279#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000280 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100281#ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry *freep;
283 int i, cnt;
284
285 freep = kinfo_getvmmap(getpid(), &cnt);
286 if (freep) {
287 mmap_lock();
288 for (i = 0; i < cnt; i++) {
289 unsigned long startaddr, endaddr;
290
291 startaddr = freep[i].kve_start;
292 endaddr = freep[i].kve_end;
293 if (h2g_valid(startaddr)) {
294 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295
296 if (h2g_valid(endaddr)) {
297 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100299 } else {
300#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100303#endif
304 }
305 }
306 }
307 free(freep);
308 mmap_unlock();
309 }
310#else
balrog50a95692007-12-12 01:16:23 +0000311 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000312
pbrook07765902008-05-31 16:33:53 +0000313 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800314
Aurelien Jarnofd436902010-04-10 17:20:36 +0200315 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000316 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800317 mmap_lock();
318
balrog50a95692007-12-12 01:16:23 +0000319 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800320 unsigned long startaddr, endaddr;
321 int n;
322
323 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324
325 if (n == 2 && h2g_valid(startaddr)) {
326 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327
328 if (h2g_valid(endaddr)) {
329 endaddr = h2g(endaddr);
330 } else {
331 endaddr = ~0ul;
332 }
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000334 }
335 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800336
balrog50a95692007-12-12 01:16:23 +0000337 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800338 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000339 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100340#endif
balrog50a95692007-12-12 01:16:23 +0000341 }
342#endif
bellard54936002003-05-13 00:25:15 +0000343}
344
Paul Brook41c1b1c2010-03-12 16:54:58 +0000345static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000346{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000347 PageDesc *pd;
348 void **lp;
349 int i;
350
pbrook17e23772008-06-09 13:47:45 +0000351#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500352 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353# define ALLOC(P, SIZE) \
354 do { \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800357 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000358#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500360 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000361#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800363 /* Level 1. Always allocated. */
364 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365
366 /* Level 2..N-1. */
367 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 void **p = *lp;
369
370 if (p == NULL) {
371 if (!alloc) {
372 return NULL;
373 }
374 ALLOC(p, sizeof(void *) * L2_SIZE);
375 *lp = p;
376 }
377
378 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000379 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800380
381 pd = *lp;
382 if (pd == NULL) {
383 if (!alloc) {
384 return NULL;
385 }
386 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 *lp = pd;
388 }
389
390#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800391
392 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000393}
394
Paul Brook41c1b1c2010-03-12 16:54:58 +0000395static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000396{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800397 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000398}
399
Paul Brook6d9a1302010-02-28 23:55:53 +0000400#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500401static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000402{
pbrooke3f4e2a2006-04-08 20:02:06 +0000403 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800404 void **lp;
405 int i;
bellard92e873b2004-05-21 14:52:29 +0000406
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800407 /* Level 1. Always allocated. */
408 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000409
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800410 /* Level 2..N-1. */
411 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
412 void **p = *lp;
413 if (p == NULL) {
414 if (!alloc) {
415 return NULL;
416 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500417 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800418 }
419 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000420 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800421
pbrooke3f4e2a2006-04-08 20:02:06 +0000422 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800423 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000424 int i;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200425 int first_index = index & ~(L2_SIZE - 1);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800426
427 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000428 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800429 }
430
Anthony Liguori7267c092011-08-20 22:09:37 -0500431 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800432
pbrook67c4d232009-02-23 13:16:07 +0000433 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200434 pd[i].phys_offset = io_mem_unassigned.ram_addr;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200435 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000436 }
bellard92e873b2004-05-21 14:52:29 +0000437 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800438
439 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000440}
441
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200442static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000443{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200444 PhysPageDesc *p = phys_page_find_alloc(index, 0);
445
446 if (p) {
447 return *p;
448 } else {
449 return (PhysPageDesc) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200450 .phys_offset = io_mem_unassigned.ram_addr,
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200451 .region_offset = index << TARGET_PAGE_BITS,
452 };
453 }
bellard92e873b2004-05-21 14:52:29 +0000454}
455
Anthony Liguoric227f092009-10-01 16:12:16 -0500456static void tlb_protect_code(ram_addr_t ram_addr);
457static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000458 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000459#define mmap_lock() do { } while(0)
460#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000461#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000462
bellard43694152008-05-29 09:35:57 +0000463#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
464
465#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100466/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000467 user mode. It will change when a dedicated libc will be used */
468#define USE_STATIC_CODE_GEN_BUFFER
469#endif
470
471#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200472static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
473 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000474#endif
475
blueswir18fcd3692008-08-17 20:26:25 +0000476static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000477{
bellard43694152008-05-29 09:35:57 +0000478#ifdef USE_STATIC_CODE_GEN_BUFFER
479 code_gen_buffer = static_code_gen_buffer;
480 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
481 map_exec(code_gen_buffer, code_gen_buffer_size);
482#else
bellard26a5f132008-05-28 12:30:31 +0000483 code_gen_buffer_size = tb_size;
484 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000485#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000486 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
487#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100488 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000489 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000490#endif
bellard26a5f132008-05-28 12:30:31 +0000491 }
492 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
493 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
494 /* The code gen buffer location may have constraints depending on
495 the host cpu and OS */
496#if defined(__linux__)
497 {
498 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000499 void *start = NULL;
500
bellard26a5f132008-05-28 12:30:31 +0000501 flags = MAP_PRIVATE | MAP_ANONYMOUS;
502#if defined(__x86_64__)
503 flags |= MAP_32BIT;
504 /* Cannot map more than that */
505 if (code_gen_buffer_size > (800 * 1024 * 1024))
506 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000507#elif defined(__sparc_v9__)
508 // Map the buffer below 2G, so we can use direct calls and branches
509 flags |= MAP_FIXED;
510 start = (void *) 0x60000000UL;
511 if (code_gen_buffer_size > (512 * 1024 * 1024))
512 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000513#elif defined(__arm__)
Dr. David Alan Gilbert222f23f2011-12-12 16:37:31 +0100514 /* Keep the buffer no bigger than 16GB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000515 if (code_gen_buffer_size > 16 * 1024 * 1024)
516 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700517#elif defined(__s390x__)
518 /* Map the buffer so that we can use direct calls and branches. */
519 /* We have a +- 4GB range on the branches; leave some slop. */
520 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
521 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
522 }
523 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000524#endif
blueswir1141ac462008-07-26 15:05:57 +0000525 code_gen_buffer = mmap(start, code_gen_buffer_size,
526 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000527 flags, -1, 0);
528 if (code_gen_buffer == MAP_FAILED) {
529 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
530 exit(1);
531 }
532 }
Bradcbb608a2010-12-20 21:25:40 -0500533#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000534 || defined(__DragonFly__) || defined(__OpenBSD__) \
535 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000536 {
537 int flags;
538 void *addr = NULL;
539 flags = MAP_PRIVATE | MAP_ANONYMOUS;
540#if defined(__x86_64__)
541 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
542 * 0x40000000 is free */
543 flags |= MAP_FIXED;
544 addr = (void *)0x40000000;
545 /* Cannot map more than that */
546 if (code_gen_buffer_size > (800 * 1024 * 1024))
547 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000548#elif defined(__sparc_v9__)
549 // Map the buffer below 2G, so we can use direct calls and branches
550 flags |= MAP_FIXED;
551 addr = (void *) 0x60000000UL;
552 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
553 code_gen_buffer_size = (512 * 1024 * 1024);
554 }
aliguori06e67a82008-09-27 15:32:41 +0000555#endif
556 code_gen_buffer = mmap(addr, code_gen_buffer_size,
557 PROT_WRITE | PROT_READ | PROT_EXEC,
558 flags, -1, 0);
559 if (code_gen_buffer == MAP_FAILED) {
560 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
561 exit(1);
562 }
563 }
bellard26a5f132008-05-28 12:30:31 +0000564#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500565 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000566 map_exec(code_gen_buffer, code_gen_buffer_size);
567#endif
bellard43694152008-05-29 09:35:57 +0000568#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000569 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100570 code_gen_buffer_max_size = code_gen_buffer_size -
571 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000572 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500573 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000574}
575
576/* Must be called before using the QEMU cpus. 'tb_size' is the size
577 (in bytes) allocated to the translation buffer. Zero means default
578 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200579void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000580{
bellard26a5f132008-05-28 12:30:31 +0000581 cpu_gen_init();
582 code_gen_alloc(tb_size);
583 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000584 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700585#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
586 /* There's no guest base to take into account, so go ahead and
587 initialize the prologue now. */
588 tcg_prologue_init(&tcg_ctx);
589#endif
bellard26a5f132008-05-28 12:30:31 +0000590}
591
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200592bool tcg_enabled(void)
593{
594 return code_gen_buffer != NULL;
595}
596
597void cpu_exec_init_all(void)
598{
599#if !defined(CONFIG_USER_ONLY)
600 memory_map_init();
601 io_mem_init();
602#endif
603}
604
pbrook9656f322008-07-01 20:01:19 +0000605#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
606
Juan Quintelae59fb372009-09-29 22:48:21 +0200607static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200608{
609 CPUState *env = opaque;
610
aurel323098dba2009-03-07 21:28:24 +0000611 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
612 version_id is increased. */
613 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000614 tlb_flush(env, 1);
615
616 return 0;
617}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200618
619static const VMStateDescription vmstate_cpu_common = {
620 .name = "cpu_common",
621 .version_id = 1,
622 .minimum_version_id = 1,
623 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200624 .post_load = cpu_common_post_load,
625 .fields = (VMStateField []) {
626 VMSTATE_UINT32(halted, CPUState),
627 VMSTATE_UINT32(interrupt_request, CPUState),
628 VMSTATE_END_OF_LIST()
629 }
630};
pbrook9656f322008-07-01 20:01:19 +0000631#endif
632
Glauber Costa950f1472009-06-09 12:15:18 -0400633CPUState *qemu_get_cpu(int cpu)
634{
635 CPUState *env = first_cpu;
636
637 while (env) {
638 if (env->cpu_index == cpu)
639 break;
640 env = env->next_cpu;
641 }
642
643 return env;
644}
645
bellard6a00d602005-11-21 23:25:50 +0000646void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000647{
bellard6a00d602005-11-21 23:25:50 +0000648 CPUState **penv;
649 int cpu_index;
650
pbrookc2764712009-03-07 15:24:59 +0000651#if defined(CONFIG_USER_ONLY)
652 cpu_list_lock();
653#endif
bellard6a00d602005-11-21 23:25:50 +0000654 env->next_cpu = NULL;
655 penv = &first_cpu;
656 cpu_index = 0;
657 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700658 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000659 cpu_index++;
660 }
661 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000662 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000663 QTAILQ_INIT(&env->breakpoints);
664 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100665#ifndef CONFIG_USER_ONLY
666 env->thread_id = qemu_get_thread_id();
667#endif
bellard6a00d602005-11-21 23:25:50 +0000668 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000669#if defined(CONFIG_USER_ONLY)
670 cpu_list_unlock();
671#endif
pbrookb3c77242008-06-30 16:31:04 +0000672#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600673 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
674 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000675 cpu_save, cpu_load, env);
676#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000677}
678
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100679/* Allocate a new translation block. Flush the translation buffer if
680 too many translation blocks or too much generated code. */
681static TranslationBlock *tb_alloc(target_ulong pc)
682{
683 TranslationBlock *tb;
684
685 if (nb_tbs >= code_gen_max_blocks ||
686 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
687 return NULL;
688 tb = &tbs[nb_tbs++];
689 tb->pc = pc;
690 tb->cflags = 0;
691 return tb;
692}
693
694void tb_free(TranslationBlock *tb)
695{
696 /* In practice this is mostly used for single use temporary TB
697 Ignore the hard cases and just back up if this TB happens to
698 be the last one generated. */
699 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
700 code_gen_ptr = tb->tc_ptr;
701 nb_tbs--;
702 }
703}
704
bellard9fa3e852004-01-04 18:06:42 +0000705static inline void invalidate_page_bitmap(PageDesc *p)
706{
707 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500708 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000709 p->code_bitmap = NULL;
710 }
711 p->code_write_count = 0;
712}
713
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800714/* Set to NULL all the 'first_tb' fields in all PageDescs. */
715
716static void page_flush_tb_1 (int level, void **lp)
717{
718 int i;
719
720 if (*lp == NULL) {
721 return;
722 }
723 if (level == 0) {
724 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000725 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800726 pd[i].first_tb = NULL;
727 invalidate_page_bitmap(pd + i);
728 }
729 } else {
730 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000731 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800732 page_flush_tb_1 (level - 1, pp + i);
733 }
734 }
735}
736
bellardfd6ce8f2003-05-14 19:00:11 +0000737static void page_flush_tb(void)
738{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800739 int i;
740 for (i = 0; i < V_L1_SIZE; i++) {
741 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000742 }
743}
744
745/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000746/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000747void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000748{
bellard6a00d602005-11-21 23:25:50 +0000749 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000750#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000751 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
752 (unsigned long)(code_gen_ptr - code_gen_buffer),
753 nb_tbs, nb_tbs > 0 ?
754 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000755#endif
bellard26a5f132008-05-28 12:30:31 +0000756 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000757 cpu_abort(env1, "Internal error: code buffer overflow\n");
758
bellardfd6ce8f2003-05-14 19:00:11 +0000759 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000760
bellard6a00d602005-11-21 23:25:50 +0000761 for(env = first_cpu; env != NULL; env = env->next_cpu) {
762 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
763 }
bellard9fa3e852004-01-04 18:06:42 +0000764
bellard8a8a6082004-10-03 13:36:49 +0000765 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000766 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000767
bellardfd6ce8f2003-05-14 19:00:11 +0000768 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000769 /* XXX: flush processor icache at this point if cache flush is
770 expensive */
bellarde3db7222005-01-26 22:00:47 +0000771 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000772}
773
774#ifdef DEBUG_TB_CHECK
775
j_mayerbc98a7e2007-04-04 07:55:12 +0000776static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000777{
778 TranslationBlock *tb;
779 int i;
780 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000781 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
782 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000783 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
784 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000785 printf("ERROR invalidate: address=" TARGET_FMT_lx
786 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000787 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000788 }
789 }
790 }
791}
792
793/* verify that all the pages have correct rights for code */
794static void tb_page_check(void)
795{
796 TranslationBlock *tb;
797 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000798
pbrook99773bd2006-04-16 15:14:59 +0000799 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
800 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000801 flags1 = page_get_flags(tb->pc);
802 flags2 = page_get_flags(tb->pc + tb->size - 1);
803 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
804 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000805 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000806 }
807 }
808 }
809}
810
811#endif
812
813/* invalidate one TB */
814static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
815 int next_offset)
816{
817 TranslationBlock *tb1;
818 for(;;) {
819 tb1 = *ptb;
820 if (tb1 == tb) {
821 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
822 break;
823 }
824 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
825 }
826}
827
bellard9fa3e852004-01-04 18:06:42 +0000828static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
829{
830 TranslationBlock *tb1;
831 unsigned int n1;
832
833 for(;;) {
834 tb1 = *ptb;
835 n1 = (long)tb1 & 3;
836 tb1 = (TranslationBlock *)((long)tb1 & ~3);
837 if (tb1 == tb) {
838 *ptb = tb1->page_next[n1];
839 break;
840 }
841 ptb = &tb1->page_next[n1];
842 }
843}
844
bellardd4e81642003-05-25 16:46:15 +0000845static inline void tb_jmp_remove(TranslationBlock *tb, int n)
846{
847 TranslationBlock *tb1, **ptb;
848 unsigned int n1;
849
850 ptb = &tb->jmp_next[n];
851 tb1 = *ptb;
852 if (tb1) {
853 /* find tb(n) in circular list */
854 for(;;) {
855 tb1 = *ptb;
856 n1 = (long)tb1 & 3;
857 tb1 = (TranslationBlock *)((long)tb1 & ~3);
858 if (n1 == n && tb1 == tb)
859 break;
860 if (n1 == 2) {
861 ptb = &tb1->jmp_first;
862 } else {
863 ptb = &tb1->jmp_next[n1];
864 }
865 }
866 /* now we can suppress tb(n) from the list */
867 *ptb = tb->jmp_next[n];
868
869 tb->jmp_next[n] = NULL;
870 }
871}
872
873/* reset the jump entry 'n' of a TB so that it is not chained to
874 another TB */
875static inline void tb_reset_jump(TranslationBlock *tb, int n)
876{
877 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
878}
879
Paul Brook41c1b1c2010-03-12 16:54:58 +0000880void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000881{
bellard6a00d602005-11-21 23:25:50 +0000882 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000883 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000884 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000885 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000886 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000887
bellard9fa3e852004-01-04 18:06:42 +0000888 /* remove the TB from the hash list */
889 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
890 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000891 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000892 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000893
bellard9fa3e852004-01-04 18:06:42 +0000894 /* remove the TB from the page list */
895 if (tb->page_addr[0] != page_addr) {
896 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
897 tb_page_remove(&p->first_tb, tb);
898 invalidate_page_bitmap(p);
899 }
900 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
901 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
902 tb_page_remove(&p->first_tb, tb);
903 invalidate_page_bitmap(p);
904 }
905
bellard8a40a182005-11-20 10:35:40 +0000906 tb_invalidated_flag = 1;
907
908 /* remove the TB from the hash list */
909 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000910 for(env = first_cpu; env != NULL; env = env->next_cpu) {
911 if (env->tb_jmp_cache[h] == tb)
912 env->tb_jmp_cache[h] = NULL;
913 }
bellard8a40a182005-11-20 10:35:40 +0000914
915 /* suppress this TB from the two jump lists */
916 tb_jmp_remove(tb, 0);
917 tb_jmp_remove(tb, 1);
918
919 /* suppress any remaining jumps to this TB */
920 tb1 = tb->jmp_first;
921 for(;;) {
922 n1 = (long)tb1 & 3;
923 if (n1 == 2)
924 break;
925 tb1 = (TranslationBlock *)((long)tb1 & ~3);
926 tb2 = tb1->jmp_next[n1];
927 tb_reset_jump(tb1, n1);
928 tb1->jmp_next[n1] = NULL;
929 tb1 = tb2;
930 }
931 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
932
bellarde3db7222005-01-26 22:00:47 +0000933 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000934}
935
936static inline void set_bits(uint8_t *tab, int start, int len)
937{
938 int end, mask, end1;
939
940 end = start + len;
941 tab += start >> 3;
942 mask = 0xff << (start & 7);
943 if ((start & ~7) == (end & ~7)) {
944 if (start < end) {
945 mask &= ~(0xff << (end & 7));
946 *tab |= mask;
947 }
948 } else {
949 *tab++ |= mask;
950 start = (start + 8) & ~7;
951 end1 = end & ~7;
952 while (start < end1) {
953 *tab++ = 0xff;
954 start += 8;
955 }
956 if (start < end) {
957 mask = ~(0xff << (end & 7));
958 *tab |= mask;
959 }
960 }
961}
962
963static void build_page_bitmap(PageDesc *p)
964{
965 int n, tb_start, tb_end;
966 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000967
Anthony Liguori7267c092011-08-20 22:09:37 -0500968 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000969
970 tb = p->first_tb;
971 while (tb != NULL) {
972 n = (long)tb & 3;
973 tb = (TranslationBlock *)((long)tb & ~3);
974 /* NOTE: this is subtle as a TB may span two physical pages */
975 if (n == 0) {
976 /* NOTE: tb_end may be after the end of the page, but
977 it is not a problem */
978 tb_start = tb->pc & ~TARGET_PAGE_MASK;
979 tb_end = tb_start + tb->size;
980 if (tb_end > TARGET_PAGE_SIZE)
981 tb_end = TARGET_PAGE_SIZE;
982 } else {
983 tb_start = 0;
984 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
985 }
986 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
987 tb = tb->page_next[n];
988 }
989}
990
pbrook2e70f6e2008-06-29 01:03:05 +0000991TranslationBlock *tb_gen_code(CPUState *env,
992 target_ulong pc, target_ulong cs_base,
993 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000994{
995 TranslationBlock *tb;
996 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000997 tb_page_addr_t phys_pc, phys_page2;
998 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000999 int code_gen_size;
1000
Paul Brook41c1b1c2010-03-12 16:54:58 +00001001 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001002 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001003 if (!tb) {
1004 /* flush must be done */
1005 tb_flush(env);
1006 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001007 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001008 /* Don't forget to invalidate previous TB info. */
1009 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001010 }
1011 tc_ptr = code_gen_ptr;
1012 tb->tc_ptr = tc_ptr;
1013 tb->cs_base = cs_base;
1014 tb->flags = flags;
1015 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001016 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001017 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001018
bellardd720b932004-04-25 17:57:43 +00001019 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001020 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001021 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001022 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001023 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001024 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001025 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001026 return tb;
bellardd720b932004-04-25 17:57:43 +00001027}
ths3b46e622007-09-17 08:09:54 +00001028
bellard9fa3e852004-01-04 18:06:42 +00001029/* invalidate all TBs which intersect with the target physical page
1030 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001031 the same physical page. 'is_cpu_write_access' should be true if called
1032 from a real cpu write access: the virtual CPU will exit the current
1033 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001034void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001035 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001036{
aliguori6b917542008-11-18 19:46:41 +00001037 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001038 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001039 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001040 PageDesc *p;
1041 int n;
1042#ifdef TARGET_HAS_PRECISE_SMC
1043 int current_tb_not_found = is_cpu_write_access;
1044 TranslationBlock *current_tb = NULL;
1045 int current_tb_modified = 0;
1046 target_ulong current_pc = 0;
1047 target_ulong current_cs_base = 0;
1048 int current_flags = 0;
1049#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001050
1051 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001052 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001053 return;
ths5fafdf22007-09-16 21:08:06 +00001054 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001055 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1056 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001057 /* build code bitmap */
1058 build_page_bitmap(p);
1059 }
1060
1061 /* we remove all the TBs in the range [start, end[ */
1062 /* XXX: see if in some cases it could be faster to invalidate all the code */
1063 tb = p->first_tb;
1064 while (tb != NULL) {
1065 n = (long)tb & 3;
1066 tb = (TranslationBlock *)((long)tb & ~3);
1067 tb_next = tb->page_next[n];
1068 /* NOTE: this is subtle as a TB may span two physical pages */
1069 if (n == 0) {
1070 /* NOTE: tb_end may be after the end of the page, but
1071 it is not a problem */
1072 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1073 tb_end = tb_start + tb->size;
1074 } else {
1075 tb_start = tb->page_addr[1];
1076 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1077 }
1078 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001079#ifdef TARGET_HAS_PRECISE_SMC
1080 if (current_tb_not_found) {
1081 current_tb_not_found = 0;
1082 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001083 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001084 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001085 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001086 }
1087 }
1088 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001089 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001090 /* If we are modifying the current TB, we must stop
1091 its execution. We could be more precise by checking
1092 that the modification is after the current PC, but it
1093 would require a specialized function to partially
1094 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001095
bellardd720b932004-04-25 17:57:43 +00001096 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001097 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001098 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1099 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001100 }
1101#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001102 /* we need to do that to handle the case where a signal
1103 occurs while doing tb_phys_invalidate() */
1104 saved_tb = NULL;
1105 if (env) {
1106 saved_tb = env->current_tb;
1107 env->current_tb = NULL;
1108 }
bellard9fa3e852004-01-04 18:06:42 +00001109 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001110 if (env) {
1111 env->current_tb = saved_tb;
1112 if (env->interrupt_request && env->current_tb)
1113 cpu_interrupt(env, env->interrupt_request);
1114 }
bellard9fa3e852004-01-04 18:06:42 +00001115 }
1116 tb = tb_next;
1117 }
1118#if !defined(CONFIG_USER_ONLY)
1119 /* if no code remaining, no need to continue to use slow writes */
1120 if (!p->first_tb) {
1121 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001122 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001123 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001124 }
1125 }
1126#endif
1127#ifdef TARGET_HAS_PRECISE_SMC
1128 if (current_tb_modified) {
1129 /* we generate a block containing just the instruction
1130 modifying the memory. It will ensure that it cannot modify
1131 itself */
bellardea1c1802004-06-14 18:56:36 +00001132 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001133 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001134 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001135 }
1136#endif
1137}
1138
1139/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001140static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001141{
1142 PageDesc *p;
1143 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001144#if 0
bellarda4193c82004-06-03 14:01:43 +00001145 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001146 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1147 cpu_single_env->mem_io_vaddr, len,
1148 cpu_single_env->eip,
1149 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001150 }
1151#endif
bellard9fa3e852004-01-04 18:06:42 +00001152 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001153 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001154 return;
1155 if (p->code_bitmap) {
1156 offset = start & ~TARGET_PAGE_MASK;
1157 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1158 if (b & ((1 << len) - 1))
1159 goto do_invalidate;
1160 } else {
1161 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001162 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001163 }
1164}
1165
bellard9fa3e852004-01-04 18:06:42 +00001166#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001167static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001168 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001169{
aliguori6b917542008-11-18 19:46:41 +00001170 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001171 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001172 int n;
bellardd720b932004-04-25 17:57:43 +00001173#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001174 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001175 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001176 int current_tb_modified = 0;
1177 target_ulong current_pc = 0;
1178 target_ulong current_cs_base = 0;
1179 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001180#endif
bellard9fa3e852004-01-04 18:06:42 +00001181
1182 addr &= TARGET_PAGE_MASK;
1183 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001184 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001185 return;
1186 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001187#ifdef TARGET_HAS_PRECISE_SMC
1188 if (tb && pc != 0) {
1189 current_tb = tb_find_pc(pc);
1190 }
1191#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001192 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001193 n = (long)tb & 3;
1194 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001195#ifdef TARGET_HAS_PRECISE_SMC
1196 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001197 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001198 /* If we are modifying the current TB, we must stop
1199 its execution. We could be more precise by checking
1200 that the modification is after the current PC, but it
1201 would require a specialized function to partially
1202 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001203
bellardd720b932004-04-25 17:57:43 +00001204 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001205 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001206 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1207 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001208 }
1209#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001210 tb_phys_invalidate(tb, addr);
1211 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001212 }
1213 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001214#ifdef TARGET_HAS_PRECISE_SMC
1215 if (current_tb_modified) {
1216 /* we generate a block containing just the instruction
1217 modifying the memory. It will ensure that it cannot modify
1218 itself */
bellardea1c1802004-06-14 18:56:36 +00001219 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001220 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001221 cpu_resume_from_signal(env, puc);
1222 }
1223#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001224}
bellard9fa3e852004-01-04 18:06:42 +00001225#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001226
1227/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001228static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001229 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001230{
1231 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001232#ifndef CONFIG_USER_ONLY
1233 bool page_already_protected;
1234#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001235
bellard9fa3e852004-01-04 18:06:42 +00001236 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001237 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001238 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001239#ifndef CONFIG_USER_ONLY
1240 page_already_protected = p->first_tb != NULL;
1241#endif
bellard9fa3e852004-01-04 18:06:42 +00001242 p->first_tb = (TranslationBlock *)((long)tb | n);
1243 invalidate_page_bitmap(p);
1244
bellard107db442004-06-22 18:48:46 +00001245#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001246
bellard9fa3e852004-01-04 18:06:42 +00001247#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001248 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001249 target_ulong addr;
1250 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001251 int prot;
1252
bellardfd6ce8f2003-05-14 19:00:11 +00001253 /* force the host page as non writable (writes will have a
1254 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001255 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001256 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001257 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1258 addr += TARGET_PAGE_SIZE) {
1259
1260 p2 = page_find (addr >> TARGET_PAGE_BITS);
1261 if (!p2)
1262 continue;
1263 prot |= p2->flags;
1264 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001265 }
ths5fafdf22007-09-16 21:08:06 +00001266 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001267 (prot & PAGE_BITS) & ~PAGE_WRITE);
1268#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001269 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001270 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001271#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001272 }
bellard9fa3e852004-01-04 18:06:42 +00001273#else
1274 /* if some code is already present, then the pages are already
1275 protected. So we handle the case where only the first TB is
1276 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001277 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001278 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001279 }
1280#endif
bellardd720b932004-04-25 17:57:43 +00001281
1282#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001283}
1284
bellard9fa3e852004-01-04 18:06:42 +00001285/* add a new TB and link it to the physical page tables. phys_page2 is
1286 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001287void tb_link_page(TranslationBlock *tb,
1288 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001289{
bellard9fa3e852004-01-04 18:06:42 +00001290 unsigned int h;
1291 TranslationBlock **ptb;
1292
pbrookc8a706f2008-06-02 16:16:42 +00001293 /* Grab the mmap lock to stop another thread invalidating this TB
1294 before we are done. */
1295 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001296 /* add in the physical hash table */
1297 h = tb_phys_hash_func(phys_pc);
1298 ptb = &tb_phys_hash[h];
1299 tb->phys_hash_next = *ptb;
1300 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001301
1302 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001303 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1304 if (phys_page2 != -1)
1305 tb_alloc_page(tb, 1, phys_page2);
1306 else
1307 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001308
bellardd4e81642003-05-25 16:46:15 +00001309 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1310 tb->jmp_next[0] = NULL;
1311 tb->jmp_next[1] = NULL;
1312
1313 /* init original jump addresses */
1314 if (tb->tb_next_offset[0] != 0xffff)
1315 tb_reset_jump(tb, 0);
1316 if (tb->tb_next_offset[1] != 0xffff)
1317 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001318
1319#ifdef DEBUG_TB_CHECK
1320 tb_page_check();
1321#endif
pbrookc8a706f2008-06-02 16:16:42 +00001322 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001323}
1324
bellarda513fe12003-05-27 23:29:48 +00001325/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1326 tb[1].tc_ptr. Return NULL if not found */
1327TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1328{
1329 int m_min, m_max, m;
1330 unsigned long v;
1331 TranslationBlock *tb;
1332
1333 if (nb_tbs <= 0)
1334 return NULL;
1335 if (tc_ptr < (unsigned long)code_gen_buffer ||
1336 tc_ptr >= (unsigned long)code_gen_ptr)
1337 return NULL;
1338 /* binary search (cf Knuth) */
1339 m_min = 0;
1340 m_max = nb_tbs - 1;
1341 while (m_min <= m_max) {
1342 m = (m_min + m_max) >> 1;
1343 tb = &tbs[m];
1344 v = (unsigned long)tb->tc_ptr;
1345 if (v == tc_ptr)
1346 return tb;
1347 else if (tc_ptr < v) {
1348 m_max = m - 1;
1349 } else {
1350 m_min = m + 1;
1351 }
ths5fafdf22007-09-16 21:08:06 +00001352 }
bellarda513fe12003-05-27 23:29:48 +00001353 return &tbs[m_max];
1354}
bellard75012672003-06-21 13:11:07 +00001355
bellardea041c02003-06-25 16:16:50 +00001356static void tb_reset_jump_recursive(TranslationBlock *tb);
1357
1358static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1359{
1360 TranslationBlock *tb1, *tb_next, **ptb;
1361 unsigned int n1;
1362
1363 tb1 = tb->jmp_next[n];
1364 if (tb1 != NULL) {
1365 /* find head of list */
1366 for(;;) {
1367 n1 = (long)tb1 & 3;
1368 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1369 if (n1 == 2)
1370 break;
1371 tb1 = tb1->jmp_next[n1];
1372 }
1373 /* we are now sure now that tb jumps to tb1 */
1374 tb_next = tb1;
1375
1376 /* remove tb from the jmp_first list */
1377 ptb = &tb_next->jmp_first;
1378 for(;;) {
1379 tb1 = *ptb;
1380 n1 = (long)tb1 & 3;
1381 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1382 if (n1 == n && tb1 == tb)
1383 break;
1384 ptb = &tb1->jmp_next[n1];
1385 }
1386 *ptb = tb->jmp_next[n];
1387 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001388
bellardea041c02003-06-25 16:16:50 +00001389 /* suppress the jump to next tb in generated code */
1390 tb_reset_jump(tb, n);
1391
bellard01243112004-01-04 15:48:17 +00001392 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001393 tb_reset_jump_recursive(tb_next);
1394 }
1395}
1396
1397static void tb_reset_jump_recursive(TranslationBlock *tb)
1398{
1399 tb_reset_jump_recursive2(tb, 0);
1400 tb_reset_jump_recursive2(tb, 1);
1401}
1402
bellard1fddef42005-04-17 19:16:13 +00001403#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001404#if defined(CONFIG_USER_ONLY)
1405static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1406{
1407 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1408}
1409#else
bellardd720b932004-04-25 17:57:43 +00001410static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1411{
Anthony Liguoric227f092009-10-01 16:12:16 -05001412 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001413 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001414 ram_addr_t ram_addr;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001415 PhysPageDesc p;
bellardd720b932004-04-25 17:57:43 +00001416
pbrookc2f07f82006-04-08 17:14:56 +00001417 addr = cpu_get_phys_page_debug(env, pc);
1418 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001419 pd = p.phys_offset;
pbrookc2f07f82006-04-08 17:14:56 +00001420 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001421 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001422}
bellardc27004e2005-01-03 23:35:10 +00001423#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001424#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001425
Paul Brookc527ee82010-03-01 03:31:14 +00001426#if defined(CONFIG_USER_ONLY)
1427void cpu_watchpoint_remove_all(CPUState *env, int mask)
1428
1429{
1430}
1431
1432int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1433 int flags, CPUWatchpoint **watchpoint)
1434{
1435 return -ENOSYS;
1436}
1437#else
pbrook6658ffb2007-03-16 23:58:11 +00001438/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001439int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1440 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001441{
aliguorib4051332008-11-18 20:14:20 +00001442 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001443 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001444
aliguorib4051332008-11-18 20:14:20 +00001445 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1446 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1447 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1448 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1449 return -EINVAL;
1450 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001451 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001452
aliguoria1d1bb32008-11-18 20:07:32 +00001453 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001454 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001455 wp->flags = flags;
1456
aliguori2dc9f412008-11-18 20:56:59 +00001457 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001458 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001459 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001460 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001461 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001462
pbrook6658ffb2007-03-16 23:58:11 +00001463 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001464
1465 if (watchpoint)
1466 *watchpoint = wp;
1467 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001468}
1469
aliguoria1d1bb32008-11-18 20:07:32 +00001470/* Remove a specific watchpoint. */
1471int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1472 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001473{
aliguorib4051332008-11-18 20:14:20 +00001474 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001475 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001476
Blue Swirl72cf2d42009-09-12 07:36:22 +00001477 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001478 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001479 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001480 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001481 return 0;
1482 }
1483 }
aliguoria1d1bb32008-11-18 20:07:32 +00001484 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001485}
1486
aliguoria1d1bb32008-11-18 20:07:32 +00001487/* Remove a specific watchpoint by reference. */
1488void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1489{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001490 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001491
aliguoria1d1bb32008-11-18 20:07:32 +00001492 tlb_flush_page(env, watchpoint->vaddr);
1493
Anthony Liguori7267c092011-08-20 22:09:37 -05001494 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001495}
1496
aliguoria1d1bb32008-11-18 20:07:32 +00001497/* Remove all matching watchpoints. */
1498void cpu_watchpoint_remove_all(CPUState *env, int mask)
1499{
aliguoric0ce9982008-11-25 22:13:57 +00001500 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001501
Blue Swirl72cf2d42009-09-12 07:36:22 +00001502 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001503 if (wp->flags & mask)
1504 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001505 }
aliguoria1d1bb32008-11-18 20:07:32 +00001506}
Paul Brookc527ee82010-03-01 03:31:14 +00001507#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001508
1509/* Add a breakpoint. */
1510int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1511 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001512{
bellard1fddef42005-04-17 19:16:13 +00001513#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001514 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001515
Anthony Liguori7267c092011-08-20 22:09:37 -05001516 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001517
1518 bp->pc = pc;
1519 bp->flags = flags;
1520
aliguori2dc9f412008-11-18 20:56:59 +00001521 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001522 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001523 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001524 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001525 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001526
1527 breakpoint_invalidate(env, pc);
1528
1529 if (breakpoint)
1530 *breakpoint = bp;
1531 return 0;
1532#else
1533 return -ENOSYS;
1534#endif
1535}
1536
1537/* Remove a specific breakpoint. */
1538int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1539{
1540#if defined(TARGET_HAS_ICE)
1541 CPUBreakpoint *bp;
1542
Blue Swirl72cf2d42009-09-12 07:36:22 +00001543 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001544 if (bp->pc == pc && bp->flags == flags) {
1545 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001546 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001547 }
bellard4c3a88a2003-07-26 12:06:08 +00001548 }
aliguoria1d1bb32008-11-18 20:07:32 +00001549 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001550#else
aliguoria1d1bb32008-11-18 20:07:32 +00001551 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001552#endif
1553}
1554
aliguoria1d1bb32008-11-18 20:07:32 +00001555/* Remove a specific breakpoint by reference. */
1556void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001557{
bellard1fddef42005-04-17 19:16:13 +00001558#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001559 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001560
aliguoria1d1bb32008-11-18 20:07:32 +00001561 breakpoint_invalidate(env, breakpoint->pc);
1562
Anthony Liguori7267c092011-08-20 22:09:37 -05001563 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001564#endif
1565}
1566
1567/* Remove all matching breakpoints. */
1568void cpu_breakpoint_remove_all(CPUState *env, int mask)
1569{
1570#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001571 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001572
Blue Swirl72cf2d42009-09-12 07:36:22 +00001573 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001574 if (bp->flags & mask)
1575 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001576 }
bellard4c3a88a2003-07-26 12:06:08 +00001577#endif
1578}
1579
bellardc33a3462003-07-29 20:50:33 +00001580/* enable or disable single step mode. EXCP_DEBUG is returned by the
1581 CPU loop after each instruction */
1582void cpu_single_step(CPUState *env, int enabled)
1583{
bellard1fddef42005-04-17 19:16:13 +00001584#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001585 if (env->singlestep_enabled != enabled) {
1586 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001587 if (kvm_enabled())
1588 kvm_update_guest_debug(env, 0);
1589 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001590 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001591 /* XXX: only flush what is necessary */
1592 tb_flush(env);
1593 }
bellardc33a3462003-07-29 20:50:33 +00001594 }
1595#endif
1596}
1597
bellard34865132003-10-05 14:28:56 +00001598/* enable or disable low levels log */
1599void cpu_set_log(int log_flags)
1600{
1601 loglevel = log_flags;
1602 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001603 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001604 if (!logfile) {
1605 perror(logfilename);
1606 _exit(1);
1607 }
bellard9fa3e852004-01-04 18:06:42 +00001608#if !defined(CONFIG_SOFTMMU)
1609 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1610 {
blueswir1b55266b2008-09-20 08:07:15 +00001611 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001612 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1613 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001614#elif defined(_WIN32)
1615 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1616 setvbuf(logfile, NULL, _IONBF, 0);
1617#else
bellard34865132003-10-05 14:28:56 +00001618 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001619#endif
pbrooke735b912007-06-30 13:53:24 +00001620 log_append = 1;
1621 }
1622 if (!loglevel && logfile) {
1623 fclose(logfile);
1624 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001625 }
1626}
1627
1628void cpu_set_log_filename(const char *filename)
1629{
1630 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001631 if (logfile) {
1632 fclose(logfile);
1633 logfile = NULL;
1634 }
1635 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001636}
bellardc33a3462003-07-29 20:50:33 +00001637
aurel323098dba2009-03-07 21:28:24 +00001638static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001639{
pbrookd5975362008-06-07 20:50:51 +00001640 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1641 problem and hope the cpu will stop of its own accord. For userspace
1642 emulation this often isn't actually as bad as it sounds. Often
1643 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001644 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001645 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001646
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001647 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001648 tb = env->current_tb;
1649 /* if the cpu is currently executing code, we must unlink it and
1650 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001651 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001652 env->current_tb = NULL;
1653 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001654 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001655 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001656}
1657
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001658#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001659/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001660static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001661{
1662 int old_mask;
1663
1664 old_mask = env->interrupt_request;
1665 env->interrupt_request |= mask;
1666
aliguori8edac962009-04-24 18:03:45 +00001667 /*
1668 * If called from iothread context, wake the target cpu in
1669 * case its halted.
1670 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001671 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001672 qemu_cpu_kick(env);
1673 return;
1674 }
aliguori8edac962009-04-24 18:03:45 +00001675
pbrook2e70f6e2008-06-29 01:03:05 +00001676 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001677 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001678 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001679 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001680 cpu_abort(env, "Raised interrupt while not in I/O function");
1681 }
pbrook2e70f6e2008-06-29 01:03:05 +00001682 } else {
aurel323098dba2009-03-07 21:28:24 +00001683 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001684 }
1685}
1686
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001687CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1688
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001689#else /* CONFIG_USER_ONLY */
1690
1691void cpu_interrupt(CPUState *env, int mask)
1692{
1693 env->interrupt_request |= mask;
1694 cpu_unlink_tb(env);
1695}
1696#endif /* CONFIG_USER_ONLY */
1697
bellardb54ad042004-05-20 13:42:52 +00001698void cpu_reset_interrupt(CPUState *env, int mask)
1699{
1700 env->interrupt_request &= ~mask;
1701}
1702
aurel323098dba2009-03-07 21:28:24 +00001703void cpu_exit(CPUState *env)
1704{
1705 env->exit_request = 1;
1706 cpu_unlink_tb(env);
1707}
1708
blueswir1c7cd6a32008-10-02 18:27:46 +00001709const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001710 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001711 "show generated host assembly code for each compiled TB" },
1712 { CPU_LOG_TB_IN_ASM, "in_asm",
1713 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001714 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001715 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001716 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001717 "show micro ops "
1718#ifdef TARGET_I386
1719 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001720#endif
blueswir1e01a1152008-03-14 17:37:11 +00001721 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001722 { CPU_LOG_INT, "int",
1723 "show interrupts/exceptions in short format" },
1724 { CPU_LOG_EXEC, "exec",
1725 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001726 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001727 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001728#ifdef TARGET_I386
1729 { CPU_LOG_PCALL, "pcall",
1730 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001731 { CPU_LOG_RESET, "cpu_reset",
1732 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001733#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001734#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001735 { CPU_LOG_IOPORT, "ioport",
1736 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001737#endif
bellardf193c792004-03-21 17:06:25 +00001738 { 0, NULL, NULL },
1739};
1740
1741static int cmp1(const char *s1, int n, const char *s2)
1742{
1743 if (strlen(s2) != n)
1744 return 0;
1745 return memcmp(s1, s2, n) == 0;
1746}
ths3b46e622007-09-17 08:09:54 +00001747
bellardf193c792004-03-21 17:06:25 +00001748/* takes a comma separated list of log masks. Return 0 if error. */
1749int cpu_str_to_log_mask(const char *str)
1750{
blueswir1c7cd6a32008-10-02 18:27:46 +00001751 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001752 int mask;
1753 const char *p, *p1;
1754
1755 p = str;
1756 mask = 0;
1757 for(;;) {
1758 p1 = strchr(p, ',');
1759 if (!p1)
1760 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001761 if(cmp1(p,p1-p,"all")) {
1762 for(item = cpu_log_items; item->mask != 0; item++) {
1763 mask |= item->mask;
1764 }
1765 } else {
1766 for(item = cpu_log_items; item->mask != 0; item++) {
1767 if (cmp1(p, p1 - p, item->name))
1768 goto found;
1769 }
1770 return 0;
bellardf193c792004-03-21 17:06:25 +00001771 }
bellardf193c792004-03-21 17:06:25 +00001772 found:
1773 mask |= item->mask;
1774 if (*p1 != ',')
1775 break;
1776 p = p1 + 1;
1777 }
1778 return mask;
1779}
bellardea041c02003-06-25 16:16:50 +00001780
bellard75012672003-06-21 13:11:07 +00001781void cpu_abort(CPUState *env, const char *fmt, ...)
1782{
1783 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001784 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001785
1786 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001787 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001788 fprintf(stderr, "qemu: fatal: ");
1789 vfprintf(stderr, fmt, ap);
1790 fprintf(stderr, "\n");
1791#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001792 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1793#else
1794 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001795#endif
aliguori93fcfe32009-01-15 22:34:14 +00001796 if (qemu_log_enabled()) {
1797 qemu_log("qemu: fatal: ");
1798 qemu_log_vprintf(fmt, ap2);
1799 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001800#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001801 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001802#else
aliguori93fcfe32009-01-15 22:34:14 +00001803 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001804#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001805 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001806 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001807 }
pbrook493ae1f2007-11-23 16:53:59 +00001808 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001809 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001810#if defined(CONFIG_USER_ONLY)
1811 {
1812 struct sigaction act;
1813 sigfillset(&act.sa_mask);
1814 act.sa_handler = SIG_DFL;
1815 sigaction(SIGABRT, &act, NULL);
1816 }
1817#endif
bellard75012672003-06-21 13:11:07 +00001818 abort();
1819}
1820
thsc5be9f02007-02-28 20:20:53 +00001821CPUState *cpu_copy(CPUState *env)
1822{
ths01ba9812007-12-09 02:22:57 +00001823 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001824 CPUState *next_cpu = new_env->next_cpu;
1825 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001826#if defined(TARGET_HAS_ICE)
1827 CPUBreakpoint *bp;
1828 CPUWatchpoint *wp;
1829#endif
1830
thsc5be9f02007-02-28 20:20:53 +00001831 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001832
1833 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001834 new_env->next_cpu = next_cpu;
1835 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001836
1837 /* Clone all break/watchpoints.
1838 Note: Once we support ptrace with hw-debug register access, make sure
1839 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001840 QTAILQ_INIT(&env->breakpoints);
1841 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001842#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001843 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001844 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1845 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001846 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001847 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1848 wp->flags, NULL);
1849 }
1850#endif
1851
thsc5be9f02007-02-28 20:20:53 +00001852 return new_env;
1853}
1854
bellard01243112004-01-04 15:48:17 +00001855#if !defined(CONFIG_USER_ONLY)
1856
edgar_igl5c751e92008-05-06 08:44:21 +00001857static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1858{
1859 unsigned int i;
1860
1861 /* Discard jump cache entries for any tb which might potentially
1862 overlap the flushed page. */
1863 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1864 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001865 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001866
1867 i = tb_jmp_cache_hash_page(addr);
1868 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001869 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001870}
1871
Igor Kovalenko08738982009-07-12 02:15:40 +04001872static CPUTLBEntry s_cputlb_empty_entry = {
1873 .addr_read = -1,
1874 .addr_write = -1,
1875 .addr_code = -1,
1876 .addend = -1,
1877};
1878
bellardee8b7022004-02-03 23:35:10 +00001879/* NOTE: if flush_global is true, also flush global entries (not
1880 implemented yet) */
1881void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001882{
bellard33417e72003-08-10 21:47:01 +00001883 int i;
bellard01243112004-01-04 15:48:17 +00001884
bellard9fa3e852004-01-04 18:06:42 +00001885#if defined(DEBUG_TLB)
1886 printf("tlb_flush:\n");
1887#endif
bellard01243112004-01-04 15:48:17 +00001888 /* must reset current TB so that interrupts cannot modify the
1889 links while we are modifying them */
1890 env->current_tb = NULL;
1891
bellard33417e72003-08-10 21:47:01 +00001892 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001893 int mmu_idx;
1894 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001895 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001896 }
bellard33417e72003-08-10 21:47:01 +00001897 }
bellard9fa3e852004-01-04 18:06:42 +00001898
bellard8a40a182005-11-20 10:35:40 +00001899 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001900
Paul Brookd4c430a2010-03-17 02:14:28 +00001901 env->tlb_flush_addr = -1;
1902 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001903 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001904}
1905
bellard274da6b2004-05-20 21:56:27 +00001906static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001907{
ths5fafdf22007-09-16 21:08:06 +00001908 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001909 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001910 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001911 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001912 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001913 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001914 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001915 }
bellard61382a52003-10-27 21:22:23 +00001916}
1917
bellard2e126692004-04-25 21:28:44 +00001918void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001919{
bellard8a40a182005-11-20 10:35:40 +00001920 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001921 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001922
bellard9fa3e852004-01-04 18:06:42 +00001923#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001924 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001925#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001926 /* Check if we need to flush due to large pages. */
1927 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1928#if defined(DEBUG_TLB)
1929 printf("tlb_flush_page: forced full flush ("
1930 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1931 env->tlb_flush_addr, env->tlb_flush_mask);
1932#endif
1933 tlb_flush(env, 1);
1934 return;
1935 }
bellard01243112004-01-04 15:48:17 +00001936 /* must reset current TB so that interrupts cannot modify the
1937 links while we are modifying them */
1938 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001939
bellard61382a52003-10-27 21:22:23 +00001940 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001941 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001942 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1943 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001944
edgar_igl5c751e92008-05-06 08:44:21 +00001945 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001946}
1947
bellard9fa3e852004-01-04 18:06:42 +00001948/* update the TLBs so that writes to code in the virtual page 'addr'
1949 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001950static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001951{
ths5fafdf22007-09-16 21:08:06 +00001952 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001953 ram_addr + TARGET_PAGE_SIZE,
1954 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001955}
1956
bellard9fa3e852004-01-04 18:06:42 +00001957/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001958 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001959static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001960 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001961{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001962 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00001963}
1964
ths5fafdf22007-09-16 21:08:06 +00001965static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001966 unsigned long start, unsigned long length)
1967{
1968 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001969 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00001970 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001971 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001972 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001973 }
1974 }
1975}
1976
pbrook5579c7f2009-04-11 14:47:08 +00001977/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001978void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001979 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001980{
1981 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001982 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001983 int i;
bellard1ccde1c2004-02-06 19:46:14 +00001984
1985 start &= TARGET_PAGE_MASK;
1986 end = TARGET_PAGE_ALIGN(end);
1987
1988 length = end - start;
1989 if (length == 0)
1990 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001991 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001992
bellard1ccde1c2004-02-06 19:46:14 +00001993 /* we modify the TLB cache so that the dirty bit will be set again
1994 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001995 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001996 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001997 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001998 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001999 != (end - 1) - start) {
2000 abort();
2001 }
2002
bellard6a00d602005-11-21 23:25:50 +00002003 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002004 int mmu_idx;
2005 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2006 for(i = 0; i < CPU_TLB_SIZE; i++)
2007 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2008 start1, length);
2009 }
bellard6a00d602005-11-21 23:25:50 +00002010 }
bellard1ccde1c2004-02-06 19:46:14 +00002011}
2012
aliguori74576192008-10-06 14:02:03 +00002013int cpu_physical_memory_set_dirty_tracking(int enable)
2014{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002015 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002016 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002017 return ret;
aliguori74576192008-10-06 14:02:03 +00002018}
2019
bellard3a7d9292005-08-21 09:26:42 +00002020static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2021{
Anthony Liguoric227f092009-10-01 16:12:16 -05002022 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002023 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002024
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002025 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002026 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2027 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002028 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002029 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002030 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002031 }
2032 }
2033}
2034
2035/* update the TLB according to the current state of the dirty bits */
2036void cpu_tlb_update_dirty(CPUState *env)
2037{
2038 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002039 int mmu_idx;
2040 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2041 for(i = 0; i < CPU_TLB_SIZE; i++)
2042 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2043 }
bellard3a7d9292005-08-21 09:26:42 +00002044}
2045
pbrook0f459d12008-06-09 00:20:13 +00002046static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002047{
pbrook0f459d12008-06-09 00:20:13 +00002048 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2049 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002050}
2051
pbrook0f459d12008-06-09 00:20:13 +00002052/* update the TLB corresponding to virtual page vaddr
2053 so that it is no longer dirty */
2054static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002055{
bellard1ccde1c2004-02-06 19:46:14 +00002056 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002057 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002058
pbrook0f459d12008-06-09 00:20:13 +00002059 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002060 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002061 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2062 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002063}
2064
Paul Brookd4c430a2010-03-17 02:14:28 +00002065/* Our TLB does not support large pages, so remember the area covered by
2066 large pages and trigger a full TLB flush if these are invalidated. */
2067static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2068 target_ulong size)
2069{
2070 target_ulong mask = ~(size - 1);
2071
2072 if (env->tlb_flush_addr == (target_ulong)-1) {
2073 env->tlb_flush_addr = vaddr & mask;
2074 env->tlb_flush_mask = mask;
2075 return;
2076 }
2077 /* Extend the existing region to include the new page.
2078 This is a compromise between unnecessary flushes and the cost
2079 of maintaining a full variable size TLB. */
2080 mask &= env->tlb_flush_mask;
2081 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2082 mask <<= 1;
2083 }
2084 env->tlb_flush_addr &= mask;
2085 env->tlb_flush_mask = mask;
2086}
2087
Avi Kivity1d393fa2012-01-01 21:15:42 +02002088static bool is_ram_rom(ram_addr_t pd)
2089{
2090 pd &= ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002091 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002092}
2093
Avi Kivity75c578d2012-01-02 15:40:52 +02002094static bool is_romd(ram_addr_t pd)
2095{
2096 MemoryRegion *mr;
2097
2098 pd &= ~TARGET_PAGE_MASK;
2099 mr = io_mem_region[pd >> IO_MEM_SHIFT];
2100 return mr->rom_device && mr->readable;
2101}
2102
Avi Kivity1d393fa2012-01-01 21:15:42 +02002103static bool is_ram_rom_romd(ram_addr_t pd)
2104{
Avi Kivity75c578d2012-01-02 15:40:52 +02002105 return is_ram_rom(pd) || is_romd(pd);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002106}
2107
Paul Brookd4c430a2010-03-17 02:14:28 +00002108/* Add a new TLB entry. At most one entry for a given virtual address
2109 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2110 supplied size is only used by tlb_flush_page. */
2111void tlb_set_page(CPUState *env, target_ulong vaddr,
2112 target_phys_addr_t paddr, int prot,
2113 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002114{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002115 PhysPageDesc p;
bellard4f2ac232004-04-26 19:44:02 +00002116 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002117 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002118 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002119 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002120 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002121 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002122 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002123 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002124
Paul Brookd4c430a2010-03-17 02:14:28 +00002125 assert(size >= TARGET_PAGE_SIZE);
2126 if (size != TARGET_PAGE_SIZE) {
2127 tlb_add_large_page(env, vaddr, size);
2128 }
bellard92e873b2004-05-21 14:52:29 +00002129 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002130 pd = p.phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002131#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002132 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2133 " prot=%x idx=%d pd=0x%08lx\n",
2134 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002135#endif
2136
pbrook0f459d12008-06-09 00:20:13 +00002137 address = vaddr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002138 if (!is_ram_rom_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002139 /* IO memory case (romd handled later) */
2140 address |= TLB_MMIO;
2141 }
pbrook5579c7f2009-04-11 14:47:08 +00002142 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002143 if (is_ram_rom(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002144 /* Normal RAM. */
2145 iotlb = pd & TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002146 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2147 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002148 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002149 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002150 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002151 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002152 It would be nice to pass an offset from the base address
2153 of that region. This would avoid having to special case RAM,
2154 and avoid full address decoding in every device.
2155 We can't use the high bits of pd for this because
2156 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002157 iotlb = (pd & ~TARGET_PAGE_MASK);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002158 iotlb += p.region_offset;
pbrook0f459d12008-06-09 00:20:13 +00002159 }
pbrook6658ffb2007-03-16 23:58:11 +00002160
pbrook0f459d12008-06-09 00:20:13 +00002161 code_address = address;
2162 /* Make accesses to pages with watchpoints go via the
2163 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002164 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002165 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002166 /* Avoid trapping reads of pages with a write breakpoint. */
2167 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivity1ec9b902012-01-02 12:47:48 +02002168 iotlb = io_mem_watch.ram_addr + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002169 address |= TLB_MMIO;
2170 break;
2171 }
pbrook6658ffb2007-03-16 23:58:11 +00002172 }
pbrook0f459d12008-06-09 00:20:13 +00002173 }
balrogd79acba2007-06-26 20:01:13 +00002174
pbrook0f459d12008-06-09 00:20:13 +00002175 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2176 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2177 te = &env->tlb_table[mmu_idx][index];
2178 te->addend = addend - vaddr;
2179 if (prot & PAGE_READ) {
2180 te->addr_read = address;
2181 } else {
2182 te->addr_read = -1;
2183 }
edgar_igl5c751e92008-05-06 08:44:21 +00002184
pbrook0f459d12008-06-09 00:20:13 +00002185 if (prot & PAGE_EXEC) {
2186 te->addr_code = code_address;
2187 } else {
2188 te->addr_code = -1;
2189 }
2190 if (prot & PAGE_WRITE) {
Avi Kivity75c578d2012-01-02 15:40:52 +02002191 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr || is_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002192 /* Write access calls the I/O callback. */
2193 te->addr_write = address | TLB_MMIO;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002194 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
pbrook0f459d12008-06-09 00:20:13 +00002195 !cpu_physical_memory_is_dirty(pd)) {
2196 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002197 } else {
pbrook0f459d12008-06-09 00:20:13 +00002198 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002199 }
pbrook0f459d12008-06-09 00:20:13 +00002200 } else {
2201 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002202 }
bellard9fa3e852004-01-04 18:06:42 +00002203}
2204
bellard01243112004-01-04 15:48:17 +00002205#else
2206
bellardee8b7022004-02-03 23:35:10 +00002207void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002208{
2209}
2210
bellard2e126692004-04-25 21:28:44 +00002211void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002212{
2213}
2214
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002215/*
2216 * Walks guest process memory "regions" one by one
2217 * and calls callback function 'fn' for each region.
2218 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002219
2220struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002221{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002222 walk_memory_regions_fn fn;
2223 void *priv;
2224 unsigned long start;
2225 int prot;
2226};
bellard9fa3e852004-01-04 18:06:42 +00002227
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002228static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002229 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002230{
2231 if (data->start != -1ul) {
2232 int rc = data->fn(data->priv, data->start, end, data->prot);
2233 if (rc != 0) {
2234 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002235 }
bellard33417e72003-08-10 21:47:01 +00002236 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002237
2238 data->start = (new_prot ? end : -1ul);
2239 data->prot = new_prot;
2240
2241 return 0;
2242}
2243
2244static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002245 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002246{
Paul Brookb480d9b2010-03-12 23:23:29 +00002247 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002248 int i, rc;
2249
2250 if (*lp == NULL) {
2251 return walk_memory_regions_end(data, base, 0);
2252 }
2253
2254 if (level == 0) {
2255 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002256 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002257 int prot = pd[i].flags;
2258
2259 pa = base | (i << TARGET_PAGE_BITS);
2260 if (prot != data->prot) {
2261 rc = walk_memory_regions_end(data, pa, prot);
2262 if (rc != 0) {
2263 return rc;
2264 }
2265 }
2266 }
2267 } else {
2268 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002269 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002270 pa = base | ((abi_ulong)i <<
2271 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002272 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2273 if (rc != 0) {
2274 return rc;
2275 }
2276 }
2277 }
2278
2279 return 0;
2280}
2281
2282int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2283{
2284 struct walk_memory_regions_data data;
2285 unsigned long i;
2286
2287 data.fn = fn;
2288 data.priv = priv;
2289 data.start = -1ul;
2290 data.prot = 0;
2291
2292 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002293 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002294 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2295 if (rc != 0) {
2296 return rc;
2297 }
2298 }
2299
2300 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002301}
2302
Paul Brookb480d9b2010-03-12 23:23:29 +00002303static int dump_region(void *priv, abi_ulong start,
2304 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002305{
2306 FILE *f = (FILE *)priv;
2307
Paul Brookb480d9b2010-03-12 23:23:29 +00002308 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2309 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002310 start, end, end - start,
2311 ((prot & PAGE_READ) ? 'r' : '-'),
2312 ((prot & PAGE_WRITE) ? 'w' : '-'),
2313 ((prot & PAGE_EXEC) ? 'x' : '-'));
2314
2315 return (0);
2316}
2317
2318/* dump memory mappings */
2319void page_dump(FILE *f)
2320{
2321 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2322 "start", "end", "size", "prot");
2323 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002324}
2325
pbrook53a59602006-03-25 19:31:22 +00002326int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002327{
bellard9fa3e852004-01-04 18:06:42 +00002328 PageDesc *p;
2329
2330 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002331 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002332 return 0;
2333 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002334}
2335
Richard Henderson376a7902010-03-10 15:57:04 -08002336/* Modify the flags of a page and invalidate the code if necessary.
2337 The flag PAGE_WRITE_ORG is positioned automatically depending
2338 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002339void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002340{
Richard Henderson376a7902010-03-10 15:57:04 -08002341 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002342
Richard Henderson376a7902010-03-10 15:57:04 -08002343 /* This function should never be called with addresses outside the
2344 guest address space. If this assert fires, it probably indicates
2345 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002346#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2347 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002348#endif
2349 assert(start < end);
2350
bellard9fa3e852004-01-04 18:06:42 +00002351 start = start & TARGET_PAGE_MASK;
2352 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002353
2354 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002355 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002356 }
2357
2358 for (addr = start, len = end - start;
2359 len != 0;
2360 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2361 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2362
2363 /* If the write protection bit is set, then we invalidate
2364 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002365 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002366 (flags & PAGE_WRITE) &&
2367 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002368 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002369 }
2370 p->flags = flags;
2371 }
bellard9fa3e852004-01-04 18:06:42 +00002372}
2373
ths3d97b402007-11-02 19:02:07 +00002374int page_check_range(target_ulong start, target_ulong len, int flags)
2375{
2376 PageDesc *p;
2377 target_ulong end;
2378 target_ulong addr;
2379
Richard Henderson376a7902010-03-10 15:57:04 -08002380 /* This function should never be called with addresses outside the
2381 guest address space. If this assert fires, it probably indicates
2382 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002383#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2384 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002385#endif
2386
Richard Henderson3e0650a2010-03-29 10:54:42 -07002387 if (len == 0) {
2388 return 0;
2389 }
Richard Henderson376a7902010-03-10 15:57:04 -08002390 if (start + len - 1 < start) {
2391 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002392 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002393 }
balrog55f280c2008-10-28 10:24:11 +00002394
ths3d97b402007-11-02 19:02:07 +00002395 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2396 start = start & TARGET_PAGE_MASK;
2397
Richard Henderson376a7902010-03-10 15:57:04 -08002398 for (addr = start, len = end - start;
2399 len != 0;
2400 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002401 p = page_find(addr >> TARGET_PAGE_BITS);
2402 if( !p )
2403 return -1;
2404 if( !(p->flags & PAGE_VALID) )
2405 return -1;
2406
bellarddae32702007-11-14 10:51:00 +00002407 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002408 return -1;
bellarddae32702007-11-14 10:51:00 +00002409 if (flags & PAGE_WRITE) {
2410 if (!(p->flags & PAGE_WRITE_ORG))
2411 return -1;
2412 /* unprotect the page if it was put read-only because it
2413 contains translated code */
2414 if (!(p->flags & PAGE_WRITE)) {
2415 if (!page_unprotect(addr, 0, NULL))
2416 return -1;
2417 }
2418 return 0;
2419 }
ths3d97b402007-11-02 19:02:07 +00002420 }
2421 return 0;
2422}
2423
bellard9fa3e852004-01-04 18:06:42 +00002424/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002425 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002426int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002427{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002428 unsigned int prot;
2429 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002430 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002431
pbrookc8a706f2008-06-02 16:16:42 +00002432 /* Technically this isn't safe inside a signal handler. However we
2433 know this only ever happens in a synchronous SEGV handler, so in
2434 practice it seems to be ok. */
2435 mmap_lock();
2436
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002437 p = page_find(address >> TARGET_PAGE_BITS);
2438 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002439 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002440 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002441 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002442
bellard9fa3e852004-01-04 18:06:42 +00002443 /* if the page was really writable, then we change its
2444 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002445 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2446 host_start = address & qemu_host_page_mask;
2447 host_end = host_start + qemu_host_page_size;
2448
2449 prot = 0;
2450 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2451 p = page_find(addr >> TARGET_PAGE_BITS);
2452 p->flags |= PAGE_WRITE;
2453 prot |= p->flags;
2454
bellard9fa3e852004-01-04 18:06:42 +00002455 /* and since the content will be modified, we must invalidate
2456 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002457 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002458#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002459 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002460#endif
bellard9fa3e852004-01-04 18:06:42 +00002461 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002462 mprotect((void *)g2h(host_start), qemu_host_page_size,
2463 prot & PAGE_BITS);
2464
2465 mmap_unlock();
2466 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002467 }
pbrookc8a706f2008-06-02 16:16:42 +00002468 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002469 return 0;
2470}
2471
bellard6a00d602005-11-21 23:25:50 +00002472static inline void tlb_set_dirty(CPUState *env,
2473 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002474{
2475}
bellard9fa3e852004-01-04 18:06:42 +00002476#endif /* defined(CONFIG_USER_ONLY) */
2477
pbrooke2eef172008-06-08 01:09:01 +00002478#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002479
Paul Brookc04b2b72010-03-01 03:31:14 +00002480#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2481typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002482 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002483 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002484 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2485 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002486} subpage_t;
2487
Anthony Liguoric227f092009-10-01 16:12:16 -05002488static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2489 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002490static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2491 ram_addr_t orig_memory,
2492 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002493#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2494 need_subpage) \
2495 do { \
2496 if (addr > start_addr) \
2497 start_addr2 = 0; \
2498 else { \
2499 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2500 if (start_addr2 > 0) \
2501 need_subpage = 1; \
2502 } \
2503 \
blueswir149e9fba2007-05-30 17:25:06 +00002504 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002505 end_addr2 = TARGET_PAGE_SIZE - 1; \
2506 else { \
2507 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2508 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2509 need_subpage = 1; \
2510 } \
2511 } while (0)
2512
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002513/* register physical memory.
2514 For RAM, 'size' must be a multiple of the target page size.
2515 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002516 io memory page. The address used when calling the IO function is
2517 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002518 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002519 before calculating this offset. This should not be a problem unless
2520 the low bits of start_addr and region_offset differ. */
Avi Kivitydd811242012-01-02 12:17:03 +02002521void cpu_register_physical_memory_log(MemoryRegionSection *section,
2522 bool readable, bool readonly)
bellard33417e72003-08-10 21:47:01 +00002523{
Avi Kivitydd811242012-01-02 12:17:03 +02002524 target_phys_addr_t start_addr = section->offset_within_address_space;
2525 ram_addr_t size = section->size;
2526 ram_addr_t phys_offset = section->mr->ram_addr;
2527 ram_addr_t region_offset = section->offset_within_region;
Anthony Liguoric227f092009-10-01 16:12:16 -05002528 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002529 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002530 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002531 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002532 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002533
Avi Kivitydd811242012-01-02 12:17:03 +02002534 if (memory_region_is_ram(section->mr)) {
2535 phys_offset += region_offset;
2536 region_offset = 0;
2537 }
2538
Avi Kivitydd811242012-01-02 12:17:03 +02002539 if (readonly) {
2540 phys_offset |= io_mem_rom.ram_addr;
2541 }
2542
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002543 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002544
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002545 if (phys_offset == io_mem_unassigned.ram_addr) {
pbrook67c4d232009-02-23 13:16:07 +00002546 region_offset = start_addr;
2547 }
pbrook8da3ff12008-12-01 18:59:50 +00002548 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002549 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002550 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002551
2552 addr = start_addr;
2553 do {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002554 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002555 if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002556 ram_addr_t orig_memory = p->phys_offset;
2557 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002558 int need_subpage = 0;
Avi Kivityb3b00c72012-01-02 13:20:11 +02002559 MemoryRegion *mr = io_mem_region[(orig_memory & ~TARGET_PAGE_MASK)
2560 >> IO_MEM_SHIFT];
blueswir1db7b5422007-05-26 17:36:03 +00002561
2562 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2563 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002564 if (need_subpage) {
Avi Kivityb3b00c72012-01-02 13:20:11 +02002565 if (!(mr->subpage)) {
blueswir1db7b5422007-05-26 17:36:03 +00002566 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002567 &p->phys_offset, orig_memory,
2568 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002569 } else {
Avi Kivitya621f382012-01-02 13:12:08 +02002570 subpage = container_of(mr, subpage_t, iomem);
blueswir1db7b5422007-05-26 17:36:03 +00002571 }
pbrook8da3ff12008-12-01 18:59:50 +00002572 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2573 region_offset);
2574 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002575 } else {
2576 p->phys_offset = phys_offset;
Avi Kivity2774c6d2012-01-01 18:24:24 +02002577 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002578 if (is_ram_rom_romd(phys_offset))
blueswir1db7b5422007-05-26 17:36:03 +00002579 phys_offset += TARGET_PAGE_SIZE;
2580 }
2581 } else {
2582 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2583 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002584 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002585 if (is_ram_rom_romd(phys_offset)) {
blueswir1db7b5422007-05-26 17:36:03 +00002586 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002587 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002588 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002589 int need_subpage = 0;
2590
2591 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2592 end_addr2, need_subpage);
2593
Richard Hendersonf6405242010-04-22 16:47:31 -07002594 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002595 subpage = subpage_init((addr & TARGET_PAGE_MASK),
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002596 &p->phys_offset,
2597 io_mem_unassigned.ram_addr,
pbrook67c4d232009-02-23 13:16:07 +00002598 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002599 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002600 phys_offset, region_offset);
2601 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002602 }
2603 }
2604 }
pbrook8da3ff12008-12-01 18:59:50 +00002605 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002606 addr += TARGET_PAGE_SIZE;
2607 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002608
bellard9d420372006-06-25 22:25:22 +00002609 /* since each CPU stores ram addresses in its TLB cache, we must
2610 reset the modified entries */
2611 /* XXX: slow ! */
2612 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2613 tlb_flush(env, 1);
2614 }
bellard33417e72003-08-10 21:47:01 +00002615}
2616
Anthony Liguoric227f092009-10-01 16:12:16 -05002617void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002618{
2619 if (kvm_enabled())
2620 kvm_coalesce_mmio_region(addr, size);
2621}
2622
Anthony Liguoric227f092009-10-01 16:12:16 -05002623void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002624{
2625 if (kvm_enabled())
2626 kvm_uncoalesce_mmio_region(addr, size);
2627}
2628
Sheng Yang62a27442010-01-26 19:21:16 +08002629void qemu_flush_coalesced_mmio_buffer(void)
2630{
2631 if (kvm_enabled())
2632 kvm_flush_coalesced_mmio_buffer();
2633}
2634
Marcelo Tosattic9027602010-03-01 20:25:08 -03002635#if defined(__linux__) && !defined(TARGET_S390X)
2636
2637#include <sys/vfs.h>
2638
2639#define HUGETLBFS_MAGIC 0x958458f6
2640
2641static long gethugepagesize(const char *path)
2642{
2643 struct statfs fs;
2644 int ret;
2645
2646 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002647 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002648 } while (ret != 0 && errno == EINTR);
2649
2650 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002651 perror(path);
2652 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002653 }
2654
2655 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002656 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002657
2658 return fs.f_bsize;
2659}
2660
Alex Williamson04b16652010-07-02 11:13:17 -06002661static void *file_ram_alloc(RAMBlock *block,
2662 ram_addr_t memory,
2663 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002664{
2665 char *filename;
2666 void *area;
2667 int fd;
2668#ifdef MAP_POPULATE
2669 int flags;
2670#endif
2671 unsigned long hpagesize;
2672
2673 hpagesize = gethugepagesize(path);
2674 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002675 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002676 }
2677
2678 if (memory < hpagesize) {
2679 return NULL;
2680 }
2681
2682 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2683 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2684 return NULL;
2685 }
2686
2687 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002688 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002689 }
2690
2691 fd = mkstemp(filename);
2692 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002693 perror("unable to create backing store for hugepages");
2694 free(filename);
2695 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002696 }
2697 unlink(filename);
2698 free(filename);
2699
2700 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2701
2702 /*
2703 * ftruncate is not supported by hugetlbfs in older
2704 * hosts, so don't bother bailing out on errors.
2705 * If anything goes wrong with it under other filesystems,
2706 * mmap will fail.
2707 */
2708 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002709 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002710
2711#ifdef MAP_POPULATE
2712 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2713 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2714 * to sidestep this quirk.
2715 */
2716 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2717 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2718#else
2719 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2720#endif
2721 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002722 perror("file_ram_alloc: can't mmap RAM pages");
2723 close(fd);
2724 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002725 }
Alex Williamson04b16652010-07-02 11:13:17 -06002726 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002727 return area;
2728}
2729#endif
2730
Alex Williamsond17b5282010-06-25 11:08:38 -06002731static ram_addr_t find_ram_offset(ram_addr_t size)
2732{
Alex Williamson04b16652010-07-02 11:13:17 -06002733 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002734 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002735
2736 if (QLIST_EMPTY(&ram_list.blocks))
2737 return 0;
2738
2739 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002740 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002741
2742 end = block->offset + block->length;
2743
2744 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2745 if (next_block->offset >= end) {
2746 next = MIN(next, next_block->offset);
2747 }
2748 }
2749 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002750 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002751 mingap = next - end;
2752 }
2753 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002754
2755 if (offset == RAM_ADDR_MAX) {
2756 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2757 (uint64_t)size);
2758 abort();
2759 }
2760
Alex Williamson04b16652010-07-02 11:13:17 -06002761 return offset;
2762}
2763
2764static ram_addr_t last_ram_offset(void)
2765{
Alex Williamsond17b5282010-06-25 11:08:38 -06002766 RAMBlock *block;
2767 ram_addr_t last = 0;
2768
2769 QLIST_FOREACH(block, &ram_list.blocks, next)
2770 last = MAX(last, block->offset + block->length);
2771
2772 return last;
2773}
2774
Avi Kivityc5705a72011-12-20 15:59:12 +02002775void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002776{
2777 RAMBlock *new_block, *block;
2778
Avi Kivityc5705a72011-12-20 15:59:12 +02002779 new_block = NULL;
2780 QLIST_FOREACH(block, &ram_list.blocks, next) {
2781 if (block->offset == addr) {
2782 new_block = block;
2783 break;
2784 }
2785 }
2786 assert(new_block);
2787 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002788
2789 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2790 char *id = dev->parent_bus->info->get_dev_path(dev);
2791 if (id) {
2792 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002793 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002794 }
2795 }
2796 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2797
2798 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002799 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002800 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2801 new_block->idstr);
2802 abort();
2803 }
2804 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002805}
2806
2807ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2808 MemoryRegion *mr)
2809{
2810 RAMBlock *new_block;
2811
2812 size = TARGET_PAGE_ALIGN(size);
2813 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002814
Avi Kivity7c637362011-12-21 13:09:49 +02002815 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002816 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002817 if (host) {
2818 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002819 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002820 } else {
2821 if (mem_path) {
2822#if defined (__linux__) && !defined(TARGET_S390X)
2823 new_block->host = file_ram_alloc(new_block, size, mem_path);
2824 if (!new_block->host) {
2825 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002826 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002827 }
2828#else
2829 fprintf(stderr, "-mem-path option unsupported\n");
2830 exit(1);
2831#endif
2832 } else {
2833#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002834 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2835 an system defined value, which is at least 256GB. Larger systems
2836 have larger values. We put the guest between the end of data
2837 segment (system break) and this value. We use 32GB as a base to
2838 have enough room for the system break to grow. */
2839 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002840 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002841 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002842 if (new_block->host == MAP_FAILED) {
2843 fprintf(stderr, "Allocating RAM failed\n");
2844 abort();
2845 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002846#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002847 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002848 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002849 } else {
2850 new_block->host = qemu_vmalloc(size);
2851 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002852#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002853 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002854 }
2855 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002856 new_block->length = size;
2857
2858 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2859
Anthony Liguori7267c092011-08-20 22:09:37 -05002860 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002861 last_ram_offset() >> TARGET_PAGE_BITS);
2862 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2863 0xff, size >> TARGET_PAGE_BITS);
2864
2865 if (kvm_enabled())
2866 kvm_setup_guest_memory(new_block->host, size);
2867
2868 return new_block->offset;
2869}
2870
Avi Kivityc5705a72011-12-20 15:59:12 +02002871ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002872{
Avi Kivityc5705a72011-12-20 15:59:12 +02002873 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002874}
bellarde9a1ab12007-02-08 23:08:38 +00002875
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002876void qemu_ram_free_from_ptr(ram_addr_t addr)
2877{
2878 RAMBlock *block;
2879
2880 QLIST_FOREACH(block, &ram_list.blocks, next) {
2881 if (addr == block->offset) {
2882 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002883 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002884 return;
2885 }
2886 }
2887}
2888
Anthony Liguoric227f092009-10-01 16:12:16 -05002889void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002890{
Alex Williamson04b16652010-07-02 11:13:17 -06002891 RAMBlock *block;
2892
2893 QLIST_FOREACH(block, &ram_list.blocks, next) {
2894 if (addr == block->offset) {
2895 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002896 if (block->flags & RAM_PREALLOC_MASK) {
2897 ;
2898 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002899#if defined (__linux__) && !defined(TARGET_S390X)
2900 if (block->fd) {
2901 munmap(block->host, block->length);
2902 close(block->fd);
2903 } else {
2904 qemu_vfree(block->host);
2905 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002906#else
2907 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002908#endif
2909 } else {
2910#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2911 munmap(block->host, block->length);
2912#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002913 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002914 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002915 } else {
2916 qemu_vfree(block->host);
2917 }
Alex Williamson04b16652010-07-02 11:13:17 -06002918#endif
2919 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002920 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002921 return;
2922 }
2923 }
2924
bellarde9a1ab12007-02-08 23:08:38 +00002925}
2926
Huang Yingcd19cfa2011-03-02 08:56:19 +01002927#ifndef _WIN32
2928void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2929{
2930 RAMBlock *block;
2931 ram_addr_t offset;
2932 int flags;
2933 void *area, *vaddr;
2934
2935 QLIST_FOREACH(block, &ram_list.blocks, next) {
2936 offset = addr - block->offset;
2937 if (offset < block->length) {
2938 vaddr = block->host + offset;
2939 if (block->flags & RAM_PREALLOC_MASK) {
2940 ;
2941 } else {
2942 flags = MAP_FIXED;
2943 munmap(vaddr, length);
2944 if (mem_path) {
2945#if defined(__linux__) && !defined(TARGET_S390X)
2946 if (block->fd) {
2947#ifdef MAP_POPULATE
2948 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2949 MAP_PRIVATE;
2950#else
2951 flags |= MAP_PRIVATE;
2952#endif
2953 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2954 flags, block->fd, offset);
2955 } else {
2956 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2957 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2958 flags, -1, 0);
2959 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002960#else
2961 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002962#endif
2963 } else {
2964#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2965 flags |= MAP_SHARED | MAP_ANONYMOUS;
2966 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2967 flags, -1, 0);
2968#else
2969 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2970 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2971 flags, -1, 0);
2972#endif
2973 }
2974 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002975 fprintf(stderr, "Could not remap addr: "
2976 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002977 length, addr);
2978 exit(1);
2979 }
2980 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2981 }
2982 return;
2983 }
2984 }
2985}
2986#endif /* !_WIN32 */
2987
pbrookdc828ca2009-04-09 22:21:07 +00002988/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002989 With the exception of the softmmu code in this file, this should
2990 only be used for local memory (e.g. video ram) that the device owns,
2991 and knows it isn't going to access beyond the end of the block.
2992
2993 It should not be used for general purpose DMA.
2994 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2995 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002996void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002997{
pbrook94a6b542009-04-11 17:15:54 +00002998 RAMBlock *block;
2999
Alex Williamsonf471a172010-06-11 11:11:42 -06003000 QLIST_FOREACH(block, &ram_list.blocks, next) {
3001 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003002 /* Move this entry to to start of the list. */
3003 if (block != QLIST_FIRST(&ram_list.blocks)) {
3004 QLIST_REMOVE(block, next);
3005 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3006 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003007 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003008 /* We need to check if the requested address is in the RAM
3009 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003010 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003011 */
3012 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003013 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003014 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003015 block->host =
3016 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003017 }
3018 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003019 return block->host + (addr - block->offset);
3020 }
pbrook94a6b542009-04-11 17:15:54 +00003021 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003022
3023 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3024 abort();
3025
3026 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003027}
3028
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003029/* Return a host pointer to ram allocated with qemu_ram_alloc.
3030 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3031 */
3032void *qemu_safe_ram_ptr(ram_addr_t addr)
3033{
3034 RAMBlock *block;
3035
3036 QLIST_FOREACH(block, &ram_list.blocks, next) {
3037 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003038 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003039 /* We need to check if the requested address is in the RAM
3040 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003041 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003042 */
3043 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003044 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003045 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003046 block->host =
3047 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003048 }
3049 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003050 return block->host + (addr - block->offset);
3051 }
3052 }
3053
3054 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3055 abort();
3056
3057 return NULL;
3058}
3059
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003060/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3061 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003062void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003063{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003064 if (*size == 0) {
3065 return NULL;
3066 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003067 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003068 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003069 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003070 RAMBlock *block;
3071
3072 QLIST_FOREACH(block, &ram_list.blocks, next) {
3073 if (addr - block->offset < block->length) {
3074 if (addr - block->offset + *size > block->length)
3075 *size = block->length - addr + block->offset;
3076 return block->host + (addr - block->offset);
3077 }
3078 }
3079
3080 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3081 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003082 }
3083}
3084
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003085void qemu_put_ram_ptr(void *addr)
3086{
3087 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003088}
3089
Marcelo Tosattie8902612010-10-11 15:31:19 -03003090int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003091{
pbrook94a6b542009-04-11 17:15:54 +00003092 RAMBlock *block;
3093 uint8_t *host = ptr;
3094
Jan Kiszka868bb332011-06-21 22:59:09 +02003095 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003096 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003097 return 0;
3098 }
3099
Alex Williamsonf471a172010-06-11 11:11:42 -06003100 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003101 /* This case append when the block is not mapped. */
3102 if (block->host == NULL) {
3103 continue;
3104 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003105 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003106 *ram_addr = block->offset + (host - block->host);
3107 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003108 }
pbrook94a6b542009-04-11 17:15:54 +00003109 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003110
Marcelo Tosattie8902612010-10-11 15:31:19 -03003111 return -1;
3112}
Alex Williamsonf471a172010-06-11 11:11:42 -06003113
Marcelo Tosattie8902612010-10-11 15:31:19 -03003114/* Some of the softmmu routines need to translate from a host pointer
3115 (typically a TLB entry) back to a ram offset. */
3116ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3117{
3118 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003119
Marcelo Tosattie8902612010-10-11 15:31:19 -03003120 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3121 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3122 abort();
3123 }
3124 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003125}
3126
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003127static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3128 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003129{
pbrook67d3b952006-12-18 05:03:52 +00003130#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003131 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003132#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003133#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003134 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003135#endif
3136 return 0;
3137}
3138
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003139static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3140 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003141{
3142#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003143 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003144#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003145#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003146 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003147#endif
3148}
3149
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003150static const MemoryRegionOps unassigned_mem_ops = {
3151 .read = unassigned_mem_read,
3152 .write = unassigned_mem_write,
3153 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003154};
3155
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003156static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3157 unsigned size)
3158{
3159 abort();
3160}
3161
3162static void error_mem_write(void *opaque, target_phys_addr_t addr,
3163 uint64_t value, unsigned size)
3164{
3165 abort();
3166}
3167
3168static const MemoryRegionOps error_mem_ops = {
3169 .read = error_mem_read,
3170 .write = error_mem_write,
3171 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003172};
3173
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003174static const MemoryRegionOps rom_mem_ops = {
3175 .read = error_mem_read,
3176 .write = unassigned_mem_write,
3177 .endianness = DEVICE_NATIVE_ENDIAN,
3178};
3179
3180static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3181 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003182{
bellard3a7d9292005-08-21 09:26:42 +00003183 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003184 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003185 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3186#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003187 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003188 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003189#endif
3190 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003191 switch (size) {
3192 case 1:
3193 stb_p(qemu_get_ram_ptr(ram_addr), val);
3194 break;
3195 case 2:
3196 stw_p(qemu_get_ram_ptr(ram_addr), val);
3197 break;
3198 case 4:
3199 stl_p(qemu_get_ram_ptr(ram_addr), val);
3200 break;
3201 default:
3202 abort();
3203 }
bellardf23db162005-08-21 19:12:28 +00003204 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003205 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003206 /* we remove the notdirty callback only if the code has been
3207 flushed */
3208 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003209 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003210}
3211
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003212static const MemoryRegionOps notdirty_mem_ops = {
3213 .read = error_mem_read,
3214 .write = notdirty_mem_write,
3215 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003216};
3217
pbrook0f459d12008-06-09 00:20:13 +00003218/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003219static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003220{
3221 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003222 target_ulong pc, cs_base;
3223 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003224 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003225 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003226 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003227
aliguori06d55cc2008-11-18 20:24:06 +00003228 if (env->watchpoint_hit) {
3229 /* We re-entered the check after replacing the TB. Now raise
3230 * the debug interrupt so that is will trigger after the
3231 * current instruction. */
3232 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3233 return;
3234 }
pbrook2e70f6e2008-06-29 01:03:05 +00003235 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003236 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003237 if ((vaddr == (wp->vaddr & len_mask) ||
3238 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003239 wp->flags |= BP_WATCHPOINT_HIT;
3240 if (!env->watchpoint_hit) {
3241 env->watchpoint_hit = wp;
3242 tb = tb_find_pc(env->mem_io_pc);
3243 if (!tb) {
3244 cpu_abort(env, "check_watchpoint: could not find TB for "
3245 "pc=%p", (void *)env->mem_io_pc);
3246 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003247 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003248 tb_phys_invalidate(tb, -1);
3249 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3250 env->exception_index = EXCP_DEBUG;
3251 } else {
3252 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3253 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3254 }
3255 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003256 }
aliguori6e140f22008-11-18 20:37:55 +00003257 } else {
3258 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003259 }
3260 }
3261}
3262
pbrook6658ffb2007-03-16 23:58:11 +00003263/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3264 so these check for a hit then pass through to the normal out-of-line
3265 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003266static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3267 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003268{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003269 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3270 switch (size) {
3271 case 1: return ldub_phys(addr);
3272 case 2: return lduw_phys(addr);
3273 case 4: return ldl_phys(addr);
3274 default: abort();
3275 }
pbrook6658ffb2007-03-16 23:58:11 +00003276}
3277
Avi Kivity1ec9b902012-01-02 12:47:48 +02003278static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3279 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003280{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003281 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3282 switch (size) {
3283 case 1: stb_phys(addr, val);
3284 case 2: stw_phys(addr, val);
3285 case 4: stl_phys(addr, val);
3286 default: abort();
3287 }
pbrook6658ffb2007-03-16 23:58:11 +00003288}
3289
Avi Kivity1ec9b902012-01-02 12:47:48 +02003290static const MemoryRegionOps watch_mem_ops = {
3291 .read = watch_mem_read,
3292 .write = watch_mem_write,
3293 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003294};
pbrook6658ffb2007-03-16 23:58:11 +00003295
Avi Kivity70c68e42012-01-02 12:32:48 +02003296static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3297 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003298{
Avi Kivity70c68e42012-01-02 12:32:48 +02003299 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003300 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003301#if defined(DEBUG_SUBPAGE)
3302 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3303 mmio, len, addr, idx);
3304#endif
blueswir1db7b5422007-05-26 17:36:03 +00003305
Richard Hendersonf6405242010-04-22 16:47:31 -07003306 addr += mmio->region_offset[idx];
3307 idx = mmio->sub_io_index[idx];
Avi Kivity70c68e42012-01-02 12:32:48 +02003308 return io_mem_read(idx, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003309}
3310
Avi Kivity70c68e42012-01-02 12:32:48 +02003311static void subpage_write(void *opaque, target_phys_addr_t addr,
3312 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003313{
Avi Kivity70c68e42012-01-02 12:32:48 +02003314 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003315 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003316#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003317 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3318 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003319 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003320#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003321
3322 addr += mmio->region_offset[idx];
3323 idx = mmio->sub_io_index[idx];
Avi Kivity70c68e42012-01-02 12:32:48 +02003324 io_mem_write(idx, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003325}
3326
Avi Kivity70c68e42012-01-02 12:32:48 +02003327static const MemoryRegionOps subpage_ops = {
3328 .read = subpage_read,
3329 .write = subpage_write,
3330 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003331};
3332
Avi Kivityde712f92012-01-02 12:41:07 +02003333static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3334 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003335{
3336 ram_addr_t raddr = addr;
3337 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003338 switch (size) {
3339 case 1: return ldub_p(ptr);
3340 case 2: return lduw_p(ptr);
3341 case 4: return ldl_p(ptr);
3342 default: abort();
3343 }
Andreas Färber56384e82011-11-30 16:26:21 +01003344}
3345
Avi Kivityde712f92012-01-02 12:41:07 +02003346static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3347 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003348{
3349 ram_addr_t raddr = addr;
3350 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003351 switch (size) {
3352 case 1: return stb_p(ptr, value);
3353 case 2: return stw_p(ptr, value);
3354 case 4: return stl_p(ptr, value);
3355 default: abort();
3356 }
Andreas Färber56384e82011-11-30 16:26:21 +01003357}
3358
Avi Kivityde712f92012-01-02 12:41:07 +02003359static const MemoryRegionOps subpage_ram_ops = {
3360 .read = subpage_ram_read,
3361 .write = subpage_ram_write,
3362 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003363};
3364
Anthony Liguoric227f092009-10-01 16:12:16 -05003365static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3366 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003367{
3368 int idx, eidx;
3369
3370 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3371 return -1;
3372 idx = SUBPAGE_IDX(start);
3373 eidx = SUBPAGE_IDX(end);
3374#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003375 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003376 mmio, start, end, idx, eidx, memory);
3377#endif
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003378 if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
Avi Kivityde712f92012-01-02 12:41:07 +02003379 memory = io_mem_subpage_ram.ram_addr;
Andreas Färber56384e82011-11-30 16:26:21 +01003380 }
Richard Hendersonf6405242010-04-22 16:47:31 -07003381 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003382 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003383 mmio->sub_io_index[idx] = memory;
3384 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003385 }
3386
3387 return 0;
3388}
3389
Richard Hendersonf6405242010-04-22 16:47:31 -07003390static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3391 ram_addr_t orig_memory,
3392 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003393{
Anthony Liguoric227f092009-10-01 16:12:16 -05003394 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003395 int subpage_memory;
3396
Anthony Liguori7267c092011-08-20 22:09:37 -05003397 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003398
3399 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003400 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3401 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003402 mmio->iomem.subpage = true;
Avi Kivity70c68e42012-01-02 12:32:48 +02003403 subpage_memory = mmio->iomem.ram_addr;
blueswir1db7b5422007-05-26 17:36:03 +00003404#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003405 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3406 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003407#endif
Avi Kivityb3b00c72012-01-02 13:20:11 +02003408 *phys = subpage_memory;
Richard Hendersonf6405242010-04-22 16:47:31 -07003409 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003410
3411 return mmio;
3412}
3413
aliguori88715652009-02-11 15:20:58 +00003414static int get_free_io_mem_idx(void)
3415{
3416 int i;
3417
3418 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3419 if (!io_mem_used[i]) {
3420 io_mem_used[i] = 1;
3421 return i;
3422 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003423 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003424 return -1;
3425}
3426
bellard33417e72003-08-10 21:47:01 +00003427/* mem_read and mem_write are arrays of functions containing the
3428 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003429 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003430 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003431 modified. If it is zero, a new io zone is allocated. The return
3432 value can be used with cpu_register_physical_memory(). (-1) is
3433 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003434static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003435{
bellard33417e72003-08-10 21:47:01 +00003436 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003437 io_index = get_free_io_mem_idx();
3438 if (io_index == -1)
3439 return io_index;
bellard33417e72003-08-10 21:47:01 +00003440 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003441 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003442 if (io_index >= IO_MEM_NB_ENTRIES)
3443 return -1;
3444 }
bellardb5ff1b32005-11-26 10:38:39 +00003445
Avi Kivitya621f382012-01-02 13:12:08 +02003446 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003447
3448 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003449}
bellard61382a52003-10-27 21:22:23 +00003450
Avi Kivitya621f382012-01-02 13:12:08 +02003451int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003452{
Avi Kivitya621f382012-01-02 13:12:08 +02003453 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003454}
3455
aliguori88715652009-02-11 15:20:58 +00003456void cpu_unregister_io_memory(int io_table_address)
3457{
aliguori88715652009-02-11 15:20:58 +00003458 int io_index = io_table_address >> IO_MEM_SHIFT;
3459
Avi Kivitya621f382012-01-02 13:12:08 +02003460 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003461 io_mem_used[io_index] = 0;
3462}
3463
Avi Kivitye9179ce2009-06-14 11:38:52 +03003464static void io_mem_init(void)
3465{
3466 int i;
3467
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003468 /* Must be first: */
3469 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3470 assert(io_mem_ram.ram_addr == 0);
3471 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3472 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3473 "unassigned", UINT64_MAX);
3474 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3475 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003476 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3477 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003478 for (i=0; i<5; i++)
3479 io_mem_used[i] = 1;
3480
Avi Kivity1ec9b902012-01-02 12:47:48 +02003481 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3482 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003483}
3484
Avi Kivity62152b82011-07-26 14:26:14 +03003485static void memory_map_init(void)
3486{
Anthony Liguori7267c092011-08-20 22:09:37 -05003487 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003488 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003489 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003490
Anthony Liguori7267c092011-08-20 22:09:37 -05003491 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003492 memory_region_init(system_io, "io", 65536);
3493 set_system_io_map(system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003494}
3495
3496MemoryRegion *get_system_memory(void)
3497{
3498 return system_memory;
3499}
3500
Avi Kivity309cb472011-08-08 16:09:03 +03003501MemoryRegion *get_system_io(void)
3502{
3503 return system_io;
3504}
3505
pbrooke2eef172008-06-08 01:09:01 +00003506#endif /* !defined(CONFIG_USER_ONLY) */
3507
bellard13eb76e2004-01-24 15:23:36 +00003508/* physical memory access (slow version, mainly for debug) */
3509#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003510int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3511 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003512{
3513 int l, flags;
3514 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003515 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003516
3517 while (len > 0) {
3518 page = addr & TARGET_PAGE_MASK;
3519 l = (page + TARGET_PAGE_SIZE) - addr;
3520 if (l > len)
3521 l = len;
3522 flags = page_get_flags(page);
3523 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003524 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003525 if (is_write) {
3526 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003527 return -1;
bellard579a97f2007-11-11 14:26:47 +00003528 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003529 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003530 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003531 memcpy(p, buf, l);
3532 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003533 } else {
3534 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003535 return -1;
bellard579a97f2007-11-11 14:26:47 +00003536 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003537 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003538 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003539 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003540 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003541 }
3542 len -= l;
3543 buf += l;
3544 addr += l;
3545 }
Paul Brooka68fe892010-03-01 00:08:59 +00003546 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003547}
bellard8df1cd02005-01-28 22:37:22 +00003548
bellard13eb76e2004-01-24 15:23:36 +00003549#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003550void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003551 int len, int is_write)
3552{
3553 int l, io_index;
3554 uint8_t *ptr;
3555 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003556 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003557 ram_addr_t pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003558 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003559
bellard13eb76e2004-01-24 15:23:36 +00003560 while (len > 0) {
3561 page = addr & TARGET_PAGE_MASK;
3562 l = (page + TARGET_PAGE_SIZE) - addr;
3563 if (l > len)
3564 l = len;
bellard92e873b2004-05-21 14:52:29 +00003565 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003566 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003567
bellard13eb76e2004-01-24 15:23:36 +00003568 if (is_write) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003569 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003570 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003571 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003572 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bellard6a00d602005-11-21 23:25:50 +00003573 /* XXX: could force cpu_single_env to NULL to avoid
3574 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003575 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003576 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003577 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003578 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003579 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003580 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003581 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003582 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003583 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003584 l = 2;
3585 } else {
bellard1c213d12005-09-03 10:49:04 +00003586 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003587 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003588 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003589 l = 1;
3590 }
3591 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003592 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003593 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003594 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003595 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003596 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003597 if (!cpu_physical_memory_is_dirty(addr1)) {
3598 /* invalidate code */
3599 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3600 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003601 cpu_physical_memory_set_dirty_flags(
3602 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003603 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003604 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003605 }
3606 } else {
Avi Kivity1d393fa2012-01-01 21:15:42 +02003607 if (!is_ram_rom_romd(pd)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003608 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003609 /* I/O case */
3610 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003611 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
aurel326c2934d2009-02-18 21:37:17 +00003612 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003613 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003614 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003615 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003616 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003617 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003618 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003619 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003620 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003621 l = 2;
3622 } else {
bellard1c213d12005-09-03 10:49:04 +00003623 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003624 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003625 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003626 l = 1;
3627 }
3628 } else {
3629 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003630 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3631 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3632 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003633 }
3634 }
3635 len -= l;
3636 buf += l;
3637 addr += l;
3638 }
3639}
bellard8df1cd02005-01-28 22:37:22 +00003640
bellardd0ecd2a2006-04-23 17:14:48 +00003641/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003642void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003643 const uint8_t *buf, int len)
3644{
3645 int l;
3646 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003647 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003648 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003649 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003650
bellardd0ecd2a2006-04-23 17:14:48 +00003651 while (len > 0) {
3652 page = addr & TARGET_PAGE_MASK;
3653 l = (page + TARGET_PAGE_SIZE) - addr;
3654 if (l > len)
3655 l = len;
3656 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003657 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003658
Avi Kivity1d393fa2012-01-01 21:15:42 +02003659 if (!is_ram_rom_romd(pd)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003660 /* do nothing */
3661 } else {
3662 unsigned long addr1;
3663 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3664 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003665 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003666 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003667 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003668 }
3669 len -= l;
3670 buf += l;
3671 addr += l;
3672 }
3673}
3674
aliguori6d16c2f2009-01-22 16:59:11 +00003675typedef struct {
3676 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003677 target_phys_addr_t addr;
3678 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003679} BounceBuffer;
3680
3681static BounceBuffer bounce;
3682
aliguoriba223c22009-01-22 16:59:16 +00003683typedef struct MapClient {
3684 void *opaque;
3685 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003686 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003687} MapClient;
3688
Blue Swirl72cf2d42009-09-12 07:36:22 +00003689static QLIST_HEAD(map_client_list, MapClient) map_client_list
3690 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003691
3692void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3693{
Anthony Liguori7267c092011-08-20 22:09:37 -05003694 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003695
3696 client->opaque = opaque;
3697 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003698 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003699 return client;
3700}
3701
3702void cpu_unregister_map_client(void *_client)
3703{
3704 MapClient *client = (MapClient *)_client;
3705
Blue Swirl72cf2d42009-09-12 07:36:22 +00003706 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003707 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003708}
3709
3710static void cpu_notify_map_clients(void)
3711{
3712 MapClient *client;
3713
Blue Swirl72cf2d42009-09-12 07:36:22 +00003714 while (!QLIST_EMPTY(&map_client_list)) {
3715 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003716 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003717 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003718 }
3719}
3720
aliguori6d16c2f2009-01-22 16:59:11 +00003721/* Map a physical memory region into a host virtual address.
3722 * May map a subset of the requested range, given by and returned in *plen.
3723 * May return NULL if resources needed to perform the mapping are exhausted.
3724 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003725 * Use cpu_register_map_client() to know when retrying the map operation is
3726 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003727 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003728void *cpu_physical_memory_map(target_phys_addr_t addr,
3729 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003730 int is_write)
3731{
Anthony Liguoric227f092009-10-01 16:12:16 -05003732 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003733 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003734 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003735 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003736 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003737 PhysPageDesc p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003738 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003739 ram_addr_t rlen;
3740 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003741
3742 while (len > 0) {
3743 page = addr & TARGET_PAGE_MASK;
3744 l = (page + TARGET_PAGE_SIZE) - addr;
3745 if (l > len)
3746 l = len;
3747 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003748 pd = p.phys_offset;
aliguori6d16c2f2009-01-22 16:59:11 +00003749
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003750 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003751 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003752 break;
3753 }
3754 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3755 bounce.addr = addr;
3756 bounce.len = l;
3757 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003758 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003759 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003760
3761 *plen = l;
3762 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003763 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003764 if (!todo) {
3765 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3766 }
aliguori6d16c2f2009-01-22 16:59:11 +00003767
3768 len -= l;
3769 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003770 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003771 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003772 rlen = todo;
3773 ret = qemu_ram_ptr_length(raddr, &rlen);
3774 *plen = rlen;
3775 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003776}
3777
3778/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3779 * Will also mark the memory as dirty if is_write == 1. access_len gives
3780 * the amount of memory that was actually read or written by the caller.
3781 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003782void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3783 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003784{
3785 if (buffer != bounce.buffer) {
3786 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003787 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003788 while (access_len) {
3789 unsigned l;
3790 l = TARGET_PAGE_SIZE;
3791 if (l > access_len)
3792 l = access_len;
3793 if (!cpu_physical_memory_is_dirty(addr1)) {
3794 /* invalidate code */
3795 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3796 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003797 cpu_physical_memory_set_dirty_flags(
3798 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003799 }
3800 addr1 += l;
3801 access_len -= l;
3802 }
3803 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003804 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003805 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003806 }
aliguori6d16c2f2009-01-22 16:59:11 +00003807 return;
3808 }
3809 if (is_write) {
3810 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3811 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003812 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003813 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003814 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003815}
bellardd0ecd2a2006-04-23 17:14:48 +00003816
bellard8df1cd02005-01-28 22:37:22 +00003817/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003818static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3819 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003820{
3821 int io_index;
3822 uint8_t *ptr;
3823 uint32_t val;
3824 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003825 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00003826
3827 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003828 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003829
Avi Kivity1d393fa2012-01-01 21:15:42 +02003830 if (!is_ram_rom_romd(pd)) {
bellard8df1cd02005-01-28 22:37:22 +00003831 /* I/O case */
3832 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003833 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02003834 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003835#if defined(TARGET_WORDS_BIGENDIAN)
3836 if (endian == DEVICE_LITTLE_ENDIAN) {
3837 val = bswap32(val);
3838 }
3839#else
3840 if (endian == DEVICE_BIG_ENDIAN) {
3841 val = bswap32(val);
3842 }
3843#endif
bellard8df1cd02005-01-28 22:37:22 +00003844 } else {
3845 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003846 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00003847 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003848 switch (endian) {
3849 case DEVICE_LITTLE_ENDIAN:
3850 val = ldl_le_p(ptr);
3851 break;
3852 case DEVICE_BIG_ENDIAN:
3853 val = ldl_be_p(ptr);
3854 break;
3855 default:
3856 val = ldl_p(ptr);
3857 break;
3858 }
bellard8df1cd02005-01-28 22:37:22 +00003859 }
3860 return val;
3861}
3862
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003863uint32_t ldl_phys(target_phys_addr_t addr)
3864{
3865 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3866}
3867
3868uint32_t ldl_le_phys(target_phys_addr_t addr)
3869{
3870 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3871}
3872
3873uint32_t ldl_be_phys(target_phys_addr_t addr)
3874{
3875 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3876}
3877
bellard84b7b8e2005-11-28 21:19:04 +00003878/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003879static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3880 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003881{
3882 int io_index;
3883 uint8_t *ptr;
3884 uint64_t val;
3885 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003886 PhysPageDesc p;
bellard84b7b8e2005-11-28 21:19:04 +00003887
3888 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003889 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003890
Avi Kivity1d393fa2012-01-01 21:15:42 +02003891 if (!is_ram_rom_romd(pd)) {
bellard84b7b8e2005-11-28 21:19:04 +00003892 /* I/O case */
3893 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003894 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003895
3896 /* XXX This is broken when device endian != cpu endian.
3897 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003898#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02003899 val = io_mem_read(io_index, addr, 4) << 32;
3900 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003901#else
Avi Kivityacbbec52011-11-21 12:27:03 +02003902 val = io_mem_read(io_index, addr, 4);
3903 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003904#endif
3905 } else {
3906 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003907 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00003908 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003909 switch (endian) {
3910 case DEVICE_LITTLE_ENDIAN:
3911 val = ldq_le_p(ptr);
3912 break;
3913 case DEVICE_BIG_ENDIAN:
3914 val = ldq_be_p(ptr);
3915 break;
3916 default:
3917 val = ldq_p(ptr);
3918 break;
3919 }
bellard84b7b8e2005-11-28 21:19:04 +00003920 }
3921 return val;
3922}
3923
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003924uint64_t ldq_phys(target_phys_addr_t addr)
3925{
3926 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3927}
3928
3929uint64_t ldq_le_phys(target_phys_addr_t addr)
3930{
3931 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3932}
3933
3934uint64_t ldq_be_phys(target_phys_addr_t addr)
3935{
3936 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3937}
3938
bellardaab33092005-10-30 20:48:42 +00003939/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003940uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003941{
3942 uint8_t val;
3943 cpu_physical_memory_read(addr, &val, 1);
3944 return val;
3945}
3946
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003947/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003948static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3949 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003950{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003951 int io_index;
3952 uint8_t *ptr;
3953 uint64_t val;
3954 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003955 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003956
3957 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003958 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003959
Avi Kivity1d393fa2012-01-01 21:15:42 +02003960 if (!is_ram_rom_romd(pd)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003961 /* I/O case */
3962 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003963 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02003964 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003965#if defined(TARGET_WORDS_BIGENDIAN)
3966 if (endian == DEVICE_LITTLE_ENDIAN) {
3967 val = bswap16(val);
3968 }
3969#else
3970 if (endian == DEVICE_BIG_ENDIAN) {
3971 val = bswap16(val);
3972 }
3973#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003974 } else {
3975 /* RAM case */
3976 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3977 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003978 switch (endian) {
3979 case DEVICE_LITTLE_ENDIAN:
3980 val = lduw_le_p(ptr);
3981 break;
3982 case DEVICE_BIG_ENDIAN:
3983 val = lduw_be_p(ptr);
3984 break;
3985 default:
3986 val = lduw_p(ptr);
3987 break;
3988 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003989 }
3990 return val;
bellardaab33092005-10-30 20:48:42 +00003991}
3992
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003993uint32_t lduw_phys(target_phys_addr_t addr)
3994{
3995 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3996}
3997
3998uint32_t lduw_le_phys(target_phys_addr_t addr)
3999{
4000 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4001}
4002
4003uint32_t lduw_be_phys(target_phys_addr_t addr)
4004{
4005 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4006}
4007
bellard8df1cd02005-01-28 22:37:22 +00004008/* warning: addr must be aligned. The ram page is not masked as dirty
4009 and the code inside is not invalidated. It is useful if the dirty
4010 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004011void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004012{
4013 int io_index;
4014 uint8_t *ptr;
4015 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004016 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004017
4018 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004019 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004020
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004021 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
bellard8df1cd02005-01-28 22:37:22 +00004022 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004023 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004024 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004025 } else {
aliguori74576192008-10-06 14:02:03 +00004026 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004027 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004028 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004029
4030 if (unlikely(in_migration)) {
4031 if (!cpu_physical_memory_is_dirty(addr1)) {
4032 /* invalidate code */
4033 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4034 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004035 cpu_physical_memory_set_dirty_flags(
4036 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004037 }
4038 }
bellard8df1cd02005-01-28 22:37:22 +00004039 }
4040}
4041
Anthony Liguoric227f092009-10-01 16:12:16 -05004042void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004043{
4044 int io_index;
4045 uint8_t *ptr;
4046 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004047 PhysPageDesc p;
j_mayerbc98a7e2007-04-04 07:55:12 +00004048
4049 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004050 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004051
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004052 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
j_mayerbc98a7e2007-04-04 07:55:12 +00004053 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004054 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004055#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004056 io_mem_write(io_index, addr, val >> 32, 4);
4057 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004058#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004059 io_mem_write(io_index, addr, (uint32_t)val, 4);
4060 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004061#endif
4062 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004063 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004064 (addr & ~TARGET_PAGE_MASK);
4065 stq_p(ptr, val);
4066 }
4067}
4068
bellard8df1cd02005-01-28 22:37:22 +00004069/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004070static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4071 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004072{
4073 int io_index;
4074 uint8_t *ptr;
4075 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004076 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004077
4078 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004079 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004080
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004081 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
bellard8df1cd02005-01-28 22:37:22 +00004082 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004083 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004084#if defined(TARGET_WORDS_BIGENDIAN)
4085 if (endian == DEVICE_LITTLE_ENDIAN) {
4086 val = bswap32(val);
4087 }
4088#else
4089 if (endian == DEVICE_BIG_ENDIAN) {
4090 val = bswap32(val);
4091 }
4092#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004093 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004094 } else {
4095 unsigned long addr1;
4096 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4097 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004098 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004099 switch (endian) {
4100 case DEVICE_LITTLE_ENDIAN:
4101 stl_le_p(ptr, val);
4102 break;
4103 case DEVICE_BIG_ENDIAN:
4104 stl_be_p(ptr, val);
4105 break;
4106 default:
4107 stl_p(ptr, val);
4108 break;
4109 }
bellard3a7d9292005-08-21 09:26:42 +00004110 if (!cpu_physical_memory_is_dirty(addr1)) {
4111 /* invalidate code */
4112 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4113 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004114 cpu_physical_memory_set_dirty_flags(addr1,
4115 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004116 }
bellard8df1cd02005-01-28 22:37:22 +00004117 }
4118}
4119
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004120void stl_phys(target_phys_addr_t addr, uint32_t val)
4121{
4122 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4123}
4124
4125void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4126{
4127 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4128}
4129
4130void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4131{
4132 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4133}
4134
bellardaab33092005-10-30 20:48:42 +00004135/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004136void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004137{
4138 uint8_t v = val;
4139 cpu_physical_memory_write(addr, &v, 1);
4140}
4141
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004142/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004143static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4144 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004145{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004146 int io_index;
4147 uint8_t *ptr;
4148 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004149 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004150
4151 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004152 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004153
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004154 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004155 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004156 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004157#if defined(TARGET_WORDS_BIGENDIAN)
4158 if (endian == DEVICE_LITTLE_ENDIAN) {
4159 val = bswap16(val);
4160 }
4161#else
4162 if (endian == DEVICE_BIG_ENDIAN) {
4163 val = bswap16(val);
4164 }
4165#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004166 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004167 } else {
4168 unsigned long addr1;
4169 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4170 /* RAM case */
4171 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004172 switch (endian) {
4173 case DEVICE_LITTLE_ENDIAN:
4174 stw_le_p(ptr, val);
4175 break;
4176 case DEVICE_BIG_ENDIAN:
4177 stw_be_p(ptr, val);
4178 break;
4179 default:
4180 stw_p(ptr, val);
4181 break;
4182 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004183 if (!cpu_physical_memory_is_dirty(addr1)) {
4184 /* invalidate code */
4185 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4186 /* set dirty bit */
4187 cpu_physical_memory_set_dirty_flags(addr1,
4188 (0xff & ~CODE_DIRTY_FLAG));
4189 }
4190 }
bellardaab33092005-10-30 20:48:42 +00004191}
4192
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004193void stw_phys(target_phys_addr_t addr, uint32_t val)
4194{
4195 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4196}
4197
4198void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4199{
4200 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4201}
4202
4203void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4204{
4205 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4206}
4207
bellardaab33092005-10-30 20:48:42 +00004208/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004209void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004210{
4211 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004212 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004213}
4214
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004215void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4216{
4217 val = cpu_to_le64(val);
4218 cpu_physical_memory_write(addr, &val, 8);
4219}
4220
4221void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4222{
4223 val = cpu_to_be64(val);
4224 cpu_physical_memory_write(addr, &val, 8);
4225}
4226
aliguori5e2972f2009-03-28 17:51:36 +00004227/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004228int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004229 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004230{
4231 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004232 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004233 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004234
4235 while (len > 0) {
4236 page = addr & TARGET_PAGE_MASK;
4237 phys_addr = cpu_get_phys_page_debug(env, page);
4238 /* if no physical page mapped, return an error */
4239 if (phys_addr == -1)
4240 return -1;
4241 l = (page + TARGET_PAGE_SIZE) - addr;
4242 if (l > len)
4243 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004244 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004245 if (is_write)
4246 cpu_physical_memory_write_rom(phys_addr, buf, l);
4247 else
aliguori5e2972f2009-03-28 17:51:36 +00004248 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004249 len -= l;
4250 buf += l;
4251 addr += l;
4252 }
4253 return 0;
4254}
Paul Brooka68fe892010-03-01 00:08:59 +00004255#endif
bellard13eb76e2004-01-24 15:23:36 +00004256
pbrook2e70f6e2008-06-29 01:03:05 +00004257/* in deterministic execution mode, instructions doing device I/Os
4258 must be at the end of the TB */
4259void cpu_io_recompile(CPUState *env, void *retaddr)
4260{
4261 TranslationBlock *tb;
4262 uint32_t n, cflags;
4263 target_ulong pc, cs_base;
4264 uint64_t flags;
4265
4266 tb = tb_find_pc((unsigned long)retaddr);
4267 if (!tb) {
4268 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4269 retaddr);
4270 }
4271 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004272 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004273 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004274 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004275 n = n - env->icount_decr.u16.low;
4276 /* Generate a new TB ending on the I/O insn. */
4277 n++;
4278 /* On MIPS and SH, delay slot instructions can only be restarted if
4279 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004280 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004281 branch. */
4282#if defined(TARGET_MIPS)
4283 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4284 env->active_tc.PC -= 4;
4285 env->icount_decr.u16.low++;
4286 env->hflags &= ~MIPS_HFLAG_BMASK;
4287 }
4288#elif defined(TARGET_SH4)
4289 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4290 && n > 1) {
4291 env->pc -= 2;
4292 env->icount_decr.u16.low++;
4293 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4294 }
4295#endif
4296 /* This should never happen. */
4297 if (n > CF_COUNT_MASK)
4298 cpu_abort(env, "TB too big during recompile");
4299
4300 cflags = n | CF_LAST_IO;
4301 pc = tb->pc;
4302 cs_base = tb->cs_base;
4303 flags = tb->flags;
4304 tb_phys_invalidate(tb, -1);
4305 /* FIXME: In theory this could raise an exception. In practice
4306 we have already translated the block once so it's probably ok. */
4307 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004308 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004309 the first in the TB) then we end up generating a whole new TB and
4310 repeating the fault, which is horribly inefficient.
4311 Better would be to execute just this insn uncached, or generate a
4312 second new TB. */
4313 cpu_resume_from_signal(env, NULL);
4314}
4315
Paul Brookb3755a92010-03-12 16:54:58 +00004316#if !defined(CONFIG_USER_ONLY)
4317
Stefan Weil055403b2010-10-22 23:03:32 +02004318void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004319{
4320 int i, target_code_size, max_target_code_size;
4321 int direct_jmp_count, direct_jmp2_count, cross_page;
4322 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004323
bellarde3db7222005-01-26 22:00:47 +00004324 target_code_size = 0;
4325 max_target_code_size = 0;
4326 cross_page = 0;
4327 direct_jmp_count = 0;
4328 direct_jmp2_count = 0;
4329 for(i = 0; i < nb_tbs; i++) {
4330 tb = &tbs[i];
4331 target_code_size += tb->size;
4332 if (tb->size > max_target_code_size)
4333 max_target_code_size = tb->size;
4334 if (tb->page_addr[1] != -1)
4335 cross_page++;
4336 if (tb->tb_next_offset[0] != 0xffff) {
4337 direct_jmp_count++;
4338 if (tb->tb_next_offset[1] != 0xffff) {
4339 direct_jmp2_count++;
4340 }
4341 }
4342 }
4343 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004344 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004345 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004346 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4347 cpu_fprintf(f, "TB count %d/%d\n",
4348 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004349 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004350 nb_tbs ? target_code_size / nb_tbs : 0,
4351 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004352 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004353 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4354 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004355 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4356 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004357 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4358 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004359 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004360 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4361 direct_jmp2_count,
4362 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004363 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004364 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4365 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4366 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004367 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004368}
4369
Avi Kivityd39e8222012-01-01 23:35:10 +02004370/* NOTE: this function can trigger an exception */
4371/* NOTE2: the returned address is not exactly the physical address: it
4372 is the offset relative to phys_ram_base */
4373tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4374{
4375 int mmu_idx, page_index, pd;
4376 void *p;
4377
4378 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4379 mmu_idx = cpu_mmu_index(env1);
4380 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4381 (addr & TARGET_PAGE_MASK))) {
4382 ldub_code(addr);
4383 }
4384 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004385 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
Avi Kivity75c578d2012-01-02 15:40:52 +02004386 && !is_romd(pd)) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004387#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4388 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4389#else
4390 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4391#endif
4392 }
4393 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4394 return qemu_ram_addr_from_host_nofail(p);
4395}
4396
bellard61382a52003-10-27 21:22:23 +00004397#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004398#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004399#define GETPC() NULL
4400#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004401#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004402
4403#define SHIFT 0
4404#include "softmmu_template.h"
4405
4406#define SHIFT 1
4407#include "softmmu_template.h"
4408
4409#define SHIFT 2
4410#include "softmmu_template.h"
4411
4412#define SHIFT 3
4413#include "softmmu_template.h"
4414
4415#undef env
4416
4417#endif