blob: a61a89ff443a930d0f1335d716d9ad4c440e46c3 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800163/* The bits remaining after N lower levels of page tables. */
164#define P_L1_BITS_REM \
165 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
169/* Size of the L1 page table. Avoid silly small sizes. */
170#if P_L1_BITS_REM < 4
171#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
172#else
173#define P_L1_BITS P_L1_BITS_REM
174#endif
175
176#if V_L1_BITS_REM < 4
177#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
178#else
179#define V_L1_BITS V_L1_BITS_REM
180#endif
181
182#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
183#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
184
185#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
186#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
187
bellard83fb7ad2004-07-05 21:25:26 +0000188unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000189unsigned long qemu_host_page_size;
190unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000191
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800192/* This is a multi-level map on the virtual address space.
193 The bottom level has pointers to PageDesc. */
194static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000195
pbrooke2eef172008-06-08 01:09:01 +0000196#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000197typedef struct PhysPageDesc {
198 /* offset in host memory of the page + io_index in the low bits */
199 ram_addr_t phys_offset;
200 ram_addr_t region_offset;
201} PhysPageDesc;
202
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800203/* This is a multi-level map on the physical address space.
204 The bottom level has pointers to PhysPageDesc. */
205static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000206
pbrooke2eef172008-06-08 01:09:01 +0000207static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300208static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000209
bellard33417e72003-08-10 21:47:01 +0000210/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200211MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000212static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200213static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000214#endif
bellard33417e72003-08-10 21:47:01 +0000215
bellard34865132003-10-05 14:28:56 +0000216/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200217#ifdef WIN32
218static const char *logfilename = "qemu.log";
219#else
blueswir1d9b630f2008-10-05 09:57:08 +0000220static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200221#endif
bellard34865132003-10-05 14:28:56 +0000222FILE *logfile;
223int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000224static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000225
bellarde3db7222005-01-26 22:00:47 +0000226/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000227#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000228static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000229#endif
bellarde3db7222005-01-26 22:00:47 +0000230static int tb_flush_count;
231static int tb_phys_invalidate_count;
232
bellard7cb69ca2008-05-10 10:55:51 +0000233#ifdef _WIN32
234static void map_exec(void *addr, long size)
235{
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
239
240}
241#else
242static void map_exec(void *addr, long size)
243{
bellard43694152008-05-29 09:35:57 +0000244 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000245
bellard43694152008-05-29 09:35:57 +0000246 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000247 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000248 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000249
250 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000251 end += page_size - 1;
252 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000253
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
256}
257#endif
258
bellardb346ff42003-06-15 20:05:50 +0000259static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000260{
bellard83fb7ad2004-07-05 21:25:26 +0000261 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000262 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000263#ifdef _WIN32
264 {
265 SYSTEM_INFO system_info;
266
267 GetSystemInfo(&system_info);
268 qemu_real_host_page_size = system_info.dwPageSize;
269 }
270#else
271 qemu_real_host_page_size = getpagesize();
272#endif
bellard83fb7ad2004-07-05 21:25:26 +0000273 if (qemu_host_page_size == 0)
274 qemu_host_page_size = qemu_real_host_page_size;
275 if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000277 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000278
Paul Brook2e9a5712010-05-05 16:32:59 +0100279#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000280 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100281#ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry *freep;
283 int i, cnt;
284
285 freep = kinfo_getvmmap(getpid(), &cnt);
286 if (freep) {
287 mmap_lock();
288 for (i = 0; i < cnt; i++) {
289 unsigned long startaddr, endaddr;
290
291 startaddr = freep[i].kve_start;
292 endaddr = freep[i].kve_end;
293 if (h2g_valid(startaddr)) {
294 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295
296 if (h2g_valid(endaddr)) {
297 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100299 } else {
300#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100303#endif
304 }
305 }
306 }
307 free(freep);
308 mmap_unlock();
309 }
310#else
balrog50a95692007-12-12 01:16:23 +0000311 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000312
pbrook07765902008-05-31 16:33:53 +0000313 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800314
Aurelien Jarnofd436902010-04-10 17:20:36 +0200315 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000316 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800317 mmap_lock();
318
balrog50a95692007-12-12 01:16:23 +0000319 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800320 unsigned long startaddr, endaddr;
321 int n;
322
323 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324
325 if (n == 2 && h2g_valid(startaddr)) {
326 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327
328 if (h2g_valid(endaddr)) {
329 endaddr = h2g(endaddr);
330 } else {
331 endaddr = ~0ul;
332 }
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000334 }
335 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800336
balrog50a95692007-12-12 01:16:23 +0000337 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800338 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000339 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100340#endif
balrog50a95692007-12-12 01:16:23 +0000341 }
342#endif
bellard54936002003-05-13 00:25:15 +0000343}
344
Paul Brook41c1b1c2010-03-12 16:54:58 +0000345static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000346{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000347 PageDesc *pd;
348 void **lp;
349 int i;
350
pbrook17e23772008-06-09 13:47:45 +0000351#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500352 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353# define ALLOC(P, SIZE) \
354 do { \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800357 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000358#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500360 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000361#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800363 /* Level 1. Always allocated. */
364 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365
366 /* Level 2..N-1. */
367 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 void **p = *lp;
369
370 if (p == NULL) {
371 if (!alloc) {
372 return NULL;
373 }
374 ALLOC(p, sizeof(void *) * L2_SIZE);
375 *lp = p;
376 }
377
378 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000379 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800380
381 pd = *lp;
382 if (pd == NULL) {
383 if (!alloc) {
384 return NULL;
385 }
386 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 *lp = pd;
388 }
389
390#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800391
392 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000393}
394
Paul Brook41c1b1c2010-03-12 16:54:58 +0000395static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000396{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800397 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000398}
399
Paul Brook6d9a1302010-02-28 23:55:53 +0000400#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500401static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000402{
pbrooke3f4e2a2006-04-08 20:02:06 +0000403 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800404 void **lp;
405 int i;
bellard92e873b2004-05-21 14:52:29 +0000406
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800407 /* Level 1. Always allocated. */
408 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000409
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800410 /* Level 2..N-1. */
411 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
412 void **p = *lp;
413 if (p == NULL) {
414 if (!alloc) {
415 return NULL;
416 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500417 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800418 }
419 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000420 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800421
pbrooke3f4e2a2006-04-08 20:02:06 +0000422 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800423 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000424 int i;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200425 int first_index = index & ~(L2_SIZE - 1);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800426
427 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000428 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800429 }
430
Anthony Liguori7267c092011-08-20 22:09:37 -0500431 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800432
pbrook67c4d232009-02-23 13:16:07 +0000433 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200434 pd[i].phys_offset = io_mem_unassigned.ram_addr;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200435 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000436 }
bellard92e873b2004-05-21 14:52:29 +0000437 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800438
439 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000440}
441
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200442static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000443{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200444 PhysPageDesc *p = phys_page_find_alloc(index, 0);
445
446 if (p) {
447 return *p;
448 } else {
449 return (PhysPageDesc) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200450 .phys_offset = io_mem_unassigned.ram_addr,
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200451 .region_offset = index << TARGET_PAGE_BITS,
452 };
453 }
bellard92e873b2004-05-21 14:52:29 +0000454}
455
Anthony Liguoric227f092009-10-01 16:12:16 -0500456static void tlb_protect_code(ram_addr_t ram_addr);
457static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000458 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000459#define mmap_lock() do { } while(0)
460#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000461#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000462
bellard43694152008-05-29 09:35:57 +0000463#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
464
465#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100466/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000467 user mode. It will change when a dedicated libc will be used */
468#define USE_STATIC_CODE_GEN_BUFFER
469#endif
470
471#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200472static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
473 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000474#endif
475
blueswir18fcd3692008-08-17 20:26:25 +0000476static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000477{
bellard43694152008-05-29 09:35:57 +0000478#ifdef USE_STATIC_CODE_GEN_BUFFER
479 code_gen_buffer = static_code_gen_buffer;
480 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
481 map_exec(code_gen_buffer, code_gen_buffer_size);
482#else
bellard26a5f132008-05-28 12:30:31 +0000483 code_gen_buffer_size = tb_size;
484 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000485#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000486 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
487#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100488 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000489 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000490#endif
bellard26a5f132008-05-28 12:30:31 +0000491 }
492 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
493 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
494 /* The code gen buffer location may have constraints depending on
495 the host cpu and OS */
496#if defined(__linux__)
497 {
498 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000499 void *start = NULL;
500
bellard26a5f132008-05-28 12:30:31 +0000501 flags = MAP_PRIVATE | MAP_ANONYMOUS;
502#if defined(__x86_64__)
503 flags |= MAP_32BIT;
504 /* Cannot map more than that */
505 if (code_gen_buffer_size > (800 * 1024 * 1024))
506 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000507#elif defined(__sparc_v9__)
508 // Map the buffer below 2G, so we can use direct calls and branches
509 flags |= MAP_FIXED;
510 start = (void *) 0x60000000UL;
511 if (code_gen_buffer_size > (512 * 1024 * 1024))
512 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000513#elif defined(__arm__)
Dr. David Alan Gilbert222f23f2011-12-12 16:37:31 +0100514 /* Keep the buffer no bigger than 16GB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000515 if (code_gen_buffer_size > 16 * 1024 * 1024)
516 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700517#elif defined(__s390x__)
518 /* Map the buffer so that we can use direct calls and branches. */
519 /* We have a +- 4GB range on the branches; leave some slop. */
520 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
521 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
522 }
523 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000524#endif
blueswir1141ac462008-07-26 15:05:57 +0000525 code_gen_buffer = mmap(start, code_gen_buffer_size,
526 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000527 flags, -1, 0);
528 if (code_gen_buffer == MAP_FAILED) {
529 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
530 exit(1);
531 }
532 }
Bradcbb608a2010-12-20 21:25:40 -0500533#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000534 || defined(__DragonFly__) || defined(__OpenBSD__) \
535 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000536 {
537 int flags;
538 void *addr = NULL;
539 flags = MAP_PRIVATE | MAP_ANONYMOUS;
540#if defined(__x86_64__)
541 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
542 * 0x40000000 is free */
543 flags |= MAP_FIXED;
544 addr = (void *)0x40000000;
545 /* Cannot map more than that */
546 if (code_gen_buffer_size > (800 * 1024 * 1024))
547 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000548#elif defined(__sparc_v9__)
549 // Map the buffer below 2G, so we can use direct calls and branches
550 flags |= MAP_FIXED;
551 addr = (void *) 0x60000000UL;
552 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
553 code_gen_buffer_size = (512 * 1024 * 1024);
554 }
aliguori06e67a82008-09-27 15:32:41 +0000555#endif
556 code_gen_buffer = mmap(addr, code_gen_buffer_size,
557 PROT_WRITE | PROT_READ | PROT_EXEC,
558 flags, -1, 0);
559 if (code_gen_buffer == MAP_FAILED) {
560 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
561 exit(1);
562 }
563 }
bellard26a5f132008-05-28 12:30:31 +0000564#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500565 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000566 map_exec(code_gen_buffer, code_gen_buffer_size);
567#endif
bellard43694152008-05-29 09:35:57 +0000568#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000569 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100570 code_gen_buffer_max_size = code_gen_buffer_size -
571 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000572 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500573 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000574}
575
576/* Must be called before using the QEMU cpus. 'tb_size' is the size
577 (in bytes) allocated to the translation buffer. Zero means default
578 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200579void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000580{
bellard26a5f132008-05-28 12:30:31 +0000581 cpu_gen_init();
582 code_gen_alloc(tb_size);
583 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000584 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700585#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
586 /* There's no guest base to take into account, so go ahead and
587 initialize the prologue now. */
588 tcg_prologue_init(&tcg_ctx);
589#endif
bellard26a5f132008-05-28 12:30:31 +0000590}
591
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200592bool tcg_enabled(void)
593{
594 return code_gen_buffer != NULL;
595}
596
597void cpu_exec_init_all(void)
598{
599#if !defined(CONFIG_USER_ONLY)
600 memory_map_init();
601 io_mem_init();
602#endif
603}
604
pbrook9656f322008-07-01 20:01:19 +0000605#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
606
Juan Quintelae59fb372009-09-29 22:48:21 +0200607static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200608{
609 CPUState *env = opaque;
610
aurel323098dba2009-03-07 21:28:24 +0000611 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
612 version_id is increased. */
613 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000614 tlb_flush(env, 1);
615
616 return 0;
617}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200618
619static const VMStateDescription vmstate_cpu_common = {
620 .name = "cpu_common",
621 .version_id = 1,
622 .minimum_version_id = 1,
623 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200624 .post_load = cpu_common_post_load,
625 .fields = (VMStateField []) {
626 VMSTATE_UINT32(halted, CPUState),
627 VMSTATE_UINT32(interrupt_request, CPUState),
628 VMSTATE_END_OF_LIST()
629 }
630};
pbrook9656f322008-07-01 20:01:19 +0000631#endif
632
Glauber Costa950f1472009-06-09 12:15:18 -0400633CPUState *qemu_get_cpu(int cpu)
634{
635 CPUState *env = first_cpu;
636
637 while (env) {
638 if (env->cpu_index == cpu)
639 break;
640 env = env->next_cpu;
641 }
642
643 return env;
644}
645
bellard6a00d602005-11-21 23:25:50 +0000646void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000647{
bellard6a00d602005-11-21 23:25:50 +0000648 CPUState **penv;
649 int cpu_index;
650
pbrookc2764712009-03-07 15:24:59 +0000651#if defined(CONFIG_USER_ONLY)
652 cpu_list_lock();
653#endif
bellard6a00d602005-11-21 23:25:50 +0000654 env->next_cpu = NULL;
655 penv = &first_cpu;
656 cpu_index = 0;
657 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700658 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000659 cpu_index++;
660 }
661 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000662 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000663 QTAILQ_INIT(&env->breakpoints);
664 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100665#ifndef CONFIG_USER_ONLY
666 env->thread_id = qemu_get_thread_id();
667#endif
bellard6a00d602005-11-21 23:25:50 +0000668 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000669#if defined(CONFIG_USER_ONLY)
670 cpu_list_unlock();
671#endif
pbrookb3c77242008-06-30 16:31:04 +0000672#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600673 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
674 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000675 cpu_save, cpu_load, env);
676#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000677}
678
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100679/* Allocate a new translation block. Flush the translation buffer if
680 too many translation blocks or too much generated code. */
681static TranslationBlock *tb_alloc(target_ulong pc)
682{
683 TranslationBlock *tb;
684
685 if (nb_tbs >= code_gen_max_blocks ||
686 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
687 return NULL;
688 tb = &tbs[nb_tbs++];
689 tb->pc = pc;
690 tb->cflags = 0;
691 return tb;
692}
693
694void tb_free(TranslationBlock *tb)
695{
696 /* In practice this is mostly used for single use temporary TB
697 Ignore the hard cases and just back up if this TB happens to
698 be the last one generated. */
699 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
700 code_gen_ptr = tb->tc_ptr;
701 nb_tbs--;
702 }
703}
704
bellard9fa3e852004-01-04 18:06:42 +0000705static inline void invalidate_page_bitmap(PageDesc *p)
706{
707 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500708 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000709 p->code_bitmap = NULL;
710 }
711 p->code_write_count = 0;
712}
713
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800714/* Set to NULL all the 'first_tb' fields in all PageDescs. */
715
716static void page_flush_tb_1 (int level, void **lp)
717{
718 int i;
719
720 if (*lp == NULL) {
721 return;
722 }
723 if (level == 0) {
724 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000725 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800726 pd[i].first_tb = NULL;
727 invalidate_page_bitmap(pd + i);
728 }
729 } else {
730 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000731 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800732 page_flush_tb_1 (level - 1, pp + i);
733 }
734 }
735}
736
bellardfd6ce8f2003-05-14 19:00:11 +0000737static void page_flush_tb(void)
738{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800739 int i;
740 for (i = 0; i < V_L1_SIZE; i++) {
741 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000742 }
743}
744
745/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000746/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000747void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000748{
bellard6a00d602005-11-21 23:25:50 +0000749 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000750#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000751 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
752 (unsigned long)(code_gen_ptr - code_gen_buffer),
753 nb_tbs, nb_tbs > 0 ?
754 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000755#endif
bellard26a5f132008-05-28 12:30:31 +0000756 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000757 cpu_abort(env1, "Internal error: code buffer overflow\n");
758
bellardfd6ce8f2003-05-14 19:00:11 +0000759 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000760
bellard6a00d602005-11-21 23:25:50 +0000761 for(env = first_cpu; env != NULL; env = env->next_cpu) {
762 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
763 }
bellard9fa3e852004-01-04 18:06:42 +0000764
bellard8a8a6082004-10-03 13:36:49 +0000765 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000766 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000767
bellardfd6ce8f2003-05-14 19:00:11 +0000768 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000769 /* XXX: flush processor icache at this point if cache flush is
770 expensive */
bellarde3db7222005-01-26 22:00:47 +0000771 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000772}
773
774#ifdef DEBUG_TB_CHECK
775
j_mayerbc98a7e2007-04-04 07:55:12 +0000776static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000777{
778 TranslationBlock *tb;
779 int i;
780 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000781 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
782 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000783 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
784 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000785 printf("ERROR invalidate: address=" TARGET_FMT_lx
786 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000787 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000788 }
789 }
790 }
791}
792
793/* verify that all the pages have correct rights for code */
794static void tb_page_check(void)
795{
796 TranslationBlock *tb;
797 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000798
pbrook99773bd2006-04-16 15:14:59 +0000799 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
800 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000801 flags1 = page_get_flags(tb->pc);
802 flags2 = page_get_flags(tb->pc + tb->size - 1);
803 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
804 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000805 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000806 }
807 }
808 }
809}
810
811#endif
812
813/* invalidate one TB */
814static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
815 int next_offset)
816{
817 TranslationBlock *tb1;
818 for(;;) {
819 tb1 = *ptb;
820 if (tb1 == tb) {
821 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
822 break;
823 }
824 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
825 }
826}
827
bellard9fa3e852004-01-04 18:06:42 +0000828static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
829{
830 TranslationBlock *tb1;
831 unsigned int n1;
832
833 for(;;) {
834 tb1 = *ptb;
835 n1 = (long)tb1 & 3;
836 tb1 = (TranslationBlock *)((long)tb1 & ~3);
837 if (tb1 == tb) {
838 *ptb = tb1->page_next[n1];
839 break;
840 }
841 ptb = &tb1->page_next[n1];
842 }
843}
844
bellardd4e81642003-05-25 16:46:15 +0000845static inline void tb_jmp_remove(TranslationBlock *tb, int n)
846{
847 TranslationBlock *tb1, **ptb;
848 unsigned int n1;
849
850 ptb = &tb->jmp_next[n];
851 tb1 = *ptb;
852 if (tb1) {
853 /* find tb(n) in circular list */
854 for(;;) {
855 tb1 = *ptb;
856 n1 = (long)tb1 & 3;
857 tb1 = (TranslationBlock *)((long)tb1 & ~3);
858 if (n1 == n && tb1 == tb)
859 break;
860 if (n1 == 2) {
861 ptb = &tb1->jmp_first;
862 } else {
863 ptb = &tb1->jmp_next[n1];
864 }
865 }
866 /* now we can suppress tb(n) from the list */
867 *ptb = tb->jmp_next[n];
868
869 tb->jmp_next[n] = NULL;
870 }
871}
872
873/* reset the jump entry 'n' of a TB so that it is not chained to
874 another TB */
875static inline void tb_reset_jump(TranslationBlock *tb, int n)
876{
877 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
878}
879
Paul Brook41c1b1c2010-03-12 16:54:58 +0000880void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000881{
bellard6a00d602005-11-21 23:25:50 +0000882 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000883 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000884 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000885 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000886 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000887
bellard9fa3e852004-01-04 18:06:42 +0000888 /* remove the TB from the hash list */
889 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
890 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000891 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000892 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000893
bellard9fa3e852004-01-04 18:06:42 +0000894 /* remove the TB from the page list */
895 if (tb->page_addr[0] != page_addr) {
896 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
897 tb_page_remove(&p->first_tb, tb);
898 invalidate_page_bitmap(p);
899 }
900 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
901 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
902 tb_page_remove(&p->first_tb, tb);
903 invalidate_page_bitmap(p);
904 }
905
bellard8a40a182005-11-20 10:35:40 +0000906 tb_invalidated_flag = 1;
907
908 /* remove the TB from the hash list */
909 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000910 for(env = first_cpu; env != NULL; env = env->next_cpu) {
911 if (env->tb_jmp_cache[h] == tb)
912 env->tb_jmp_cache[h] = NULL;
913 }
bellard8a40a182005-11-20 10:35:40 +0000914
915 /* suppress this TB from the two jump lists */
916 tb_jmp_remove(tb, 0);
917 tb_jmp_remove(tb, 1);
918
919 /* suppress any remaining jumps to this TB */
920 tb1 = tb->jmp_first;
921 for(;;) {
922 n1 = (long)tb1 & 3;
923 if (n1 == 2)
924 break;
925 tb1 = (TranslationBlock *)((long)tb1 & ~3);
926 tb2 = tb1->jmp_next[n1];
927 tb_reset_jump(tb1, n1);
928 tb1->jmp_next[n1] = NULL;
929 tb1 = tb2;
930 }
931 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
932
bellarde3db7222005-01-26 22:00:47 +0000933 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000934}
935
936static inline void set_bits(uint8_t *tab, int start, int len)
937{
938 int end, mask, end1;
939
940 end = start + len;
941 tab += start >> 3;
942 mask = 0xff << (start & 7);
943 if ((start & ~7) == (end & ~7)) {
944 if (start < end) {
945 mask &= ~(0xff << (end & 7));
946 *tab |= mask;
947 }
948 } else {
949 *tab++ |= mask;
950 start = (start + 8) & ~7;
951 end1 = end & ~7;
952 while (start < end1) {
953 *tab++ = 0xff;
954 start += 8;
955 }
956 if (start < end) {
957 mask = ~(0xff << (end & 7));
958 *tab |= mask;
959 }
960 }
961}
962
963static void build_page_bitmap(PageDesc *p)
964{
965 int n, tb_start, tb_end;
966 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000967
Anthony Liguori7267c092011-08-20 22:09:37 -0500968 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000969
970 tb = p->first_tb;
971 while (tb != NULL) {
972 n = (long)tb & 3;
973 tb = (TranslationBlock *)((long)tb & ~3);
974 /* NOTE: this is subtle as a TB may span two physical pages */
975 if (n == 0) {
976 /* NOTE: tb_end may be after the end of the page, but
977 it is not a problem */
978 tb_start = tb->pc & ~TARGET_PAGE_MASK;
979 tb_end = tb_start + tb->size;
980 if (tb_end > TARGET_PAGE_SIZE)
981 tb_end = TARGET_PAGE_SIZE;
982 } else {
983 tb_start = 0;
984 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
985 }
986 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
987 tb = tb->page_next[n];
988 }
989}
990
pbrook2e70f6e2008-06-29 01:03:05 +0000991TranslationBlock *tb_gen_code(CPUState *env,
992 target_ulong pc, target_ulong cs_base,
993 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000994{
995 TranslationBlock *tb;
996 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000997 tb_page_addr_t phys_pc, phys_page2;
998 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000999 int code_gen_size;
1000
Paul Brook41c1b1c2010-03-12 16:54:58 +00001001 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001002 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001003 if (!tb) {
1004 /* flush must be done */
1005 tb_flush(env);
1006 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001007 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001008 /* Don't forget to invalidate previous TB info. */
1009 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001010 }
1011 tc_ptr = code_gen_ptr;
1012 tb->tc_ptr = tc_ptr;
1013 tb->cs_base = cs_base;
1014 tb->flags = flags;
1015 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001016 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001017 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001018
bellardd720b932004-04-25 17:57:43 +00001019 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001020 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001021 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001022 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001023 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001024 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001025 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001026 return tb;
bellardd720b932004-04-25 17:57:43 +00001027}
ths3b46e622007-09-17 08:09:54 +00001028
bellard9fa3e852004-01-04 18:06:42 +00001029/* invalidate all TBs which intersect with the target physical page
1030 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001031 the same physical page. 'is_cpu_write_access' should be true if called
1032 from a real cpu write access: the virtual CPU will exit the current
1033 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001034void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001035 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001036{
aliguori6b917542008-11-18 19:46:41 +00001037 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001038 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001039 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001040 PageDesc *p;
1041 int n;
1042#ifdef TARGET_HAS_PRECISE_SMC
1043 int current_tb_not_found = is_cpu_write_access;
1044 TranslationBlock *current_tb = NULL;
1045 int current_tb_modified = 0;
1046 target_ulong current_pc = 0;
1047 target_ulong current_cs_base = 0;
1048 int current_flags = 0;
1049#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001050
1051 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001052 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001053 return;
ths5fafdf22007-09-16 21:08:06 +00001054 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001055 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1056 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001057 /* build code bitmap */
1058 build_page_bitmap(p);
1059 }
1060
1061 /* we remove all the TBs in the range [start, end[ */
1062 /* XXX: see if in some cases it could be faster to invalidate all the code */
1063 tb = p->first_tb;
1064 while (tb != NULL) {
1065 n = (long)tb & 3;
1066 tb = (TranslationBlock *)((long)tb & ~3);
1067 tb_next = tb->page_next[n];
1068 /* NOTE: this is subtle as a TB may span two physical pages */
1069 if (n == 0) {
1070 /* NOTE: tb_end may be after the end of the page, but
1071 it is not a problem */
1072 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1073 tb_end = tb_start + tb->size;
1074 } else {
1075 tb_start = tb->page_addr[1];
1076 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1077 }
1078 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001079#ifdef TARGET_HAS_PRECISE_SMC
1080 if (current_tb_not_found) {
1081 current_tb_not_found = 0;
1082 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001083 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001084 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001085 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001086 }
1087 }
1088 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001089 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001090 /* If we are modifying the current TB, we must stop
1091 its execution. We could be more precise by checking
1092 that the modification is after the current PC, but it
1093 would require a specialized function to partially
1094 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001095
bellardd720b932004-04-25 17:57:43 +00001096 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001097 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001098 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1099 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001100 }
1101#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001102 /* we need to do that to handle the case where a signal
1103 occurs while doing tb_phys_invalidate() */
1104 saved_tb = NULL;
1105 if (env) {
1106 saved_tb = env->current_tb;
1107 env->current_tb = NULL;
1108 }
bellard9fa3e852004-01-04 18:06:42 +00001109 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001110 if (env) {
1111 env->current_tb = saved_tb;
1112 if (env->interrupt_request && env->current_tb)
1113 cpu_interrupt(env, env->interrupt_request);
1114 }
bellard9fa3e852004-01-04 18:06:42 +00001115 }
1116 tb = tb_next;
1117 }
1118#if !defined(CONFIG_USER_ONLY)
1119 /* if no code remaining, no need to continue to use slow writes */
1120 if (!p->first_tb) {
1121 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001122 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001123 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001124 }
1125 }
1126#endif
1127#ifdef TARGET_HAS_PRECISE_SMC
1128 if (current_tb_modified) {
1129 /* we generate a block containing just the instruction
1130 modifying the memory. It will ensure that it cannot modify
1131 itself */
bellardea1c1802004-06-14 18:56:36 +00001132 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001133 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001134 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001135 }
1136#endif
1137}
1138
1139/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001140static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001141{
1142 PageDesc *p;
1143 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001144#if 0
bellarda4193c82004-06-03 14:01:43 +00001145 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001146 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1147 cpu_single_env->mem_io_vaddr, len,
1148 cpu_single_env->eip,
1149 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001150 }
1151#endif
bellard9fa3e852004-01-04 18:06:42 +00001152 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001153 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001154 return;
1155 if (p->code_bitmap) {
1156 offset = start & ~TARGET_PAGE_MASK;
1157 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1158 if (b & ((1 << len) - 1))
1159 goto do_invalidate;
1160 } else {
1161 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001162 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001163 }
1164}
1165
bellard9fa3e852004-01-04 18:06:42 +00001166#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001167static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001168 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001169{
aliguori6b917542008-11-18 19:46:41 +00001170 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001171 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001172 int n;
bellardd720b932004-04-25 17:57:43 +00001173#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001174 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001175 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001176 int current_tb_modified = 0;
1177 target_ulong current_pc = 0;
1178 target_ulong current_cs_base = 0;
1179 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001180#endif
bellard9fa3e852004-01-04 18:06:42 +00001181
1182 addr &= TARGET_PAGE_MASK;
1183 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001184 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001185 return;
1186 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001187#ifdef TARGET_HAS_PRECISE_SMC
1188 if (tb && pc != 0) {
1189 current_tb = tb_find_pc(pc);
1190 }
1191#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001192 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001193 n = (long)tb & 3;
1194 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001195#ifdef TARGET_HAS_PRECISE_SMC
1196 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001197 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001198 /* If we are modifying the current TB, we must stop
1199 its execution. We could be more precise by checking
1200 that the modification is after the current PC, but it
1201 would require a specialized function to partially
1202 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001203
bellardd720b932004-04-25 17:57:43 +00001204 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001205 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001206 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1207 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001208 }
1209#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001210 tb_phys_invalidate(tb, addr);
1211 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001212 }
1213 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001214#ifdef TARGET_HAS_PRECISE_SMC
1215 if (current_tb_modified) {
1216 /* we generate a block containing just the instruction
1217 modifying the memory. It will ensure that it cannot modify
1218 itself */
bellardea1c1802004-06-14 18:56:36 +00001219 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001220 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001221 cpu_resume_from_signal(env, puc);
1222 }
1223#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001224}
bellard9fa3e852004-01-04 18:06:42 +00001225#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001226
1227/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001228static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001229 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001230{
1231 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001232#ifndef CONFIG_USER_ONLY
1233 bool page_already_protected;
1234#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001235
bellard9fa3e852004-01-04 18:06:42 +00001236 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001237 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001238 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001239#ifndef CONFIG_USER_ONLY
1240 page_already_protected = p->first_tb != NULL;
1241#endif
bellard9fa3e852004-01-04 18:06:42 +00001242 p->first_tb = (TranslationBlock *)((long)tb | n);
1243 invalidate_page_bitmap(p);
1244
bellard107db442004-06-22 18:48:46 +00001245#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001246
bellard9fa3e852004-01-04 18:06:42 +00001247#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001248 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001249 target_ulong addr;
1250 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001251 int prot;
1252
bellardfd6ce8f2003-05-14 19:00:11 +00001253 /* force the host page as non writable (writes will have a
1254 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001255 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001256 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001257 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1258 addr += TARGET_PAGE_SIZE) {
1259
1260 p2 = page_find (addr >> TARGET_PAGE_BITS);
1261 if (!p2)
1262 continue;
1263 prot |= p2->flags;
1264 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001265 }
ths5fafdf22007-09-16 21:08:06 +00001266 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001267 (prot & PAGE_BITS) & ~PAGE_WRITE);
1268#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001269 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001270 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001271#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001272 }
bellard9fa3e852004-01-04 18:06:42 +00001273#else
1274 /* if some code is already present, then the pages are already
1275 protected. So we handle the case where only the first TB is
1276 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001277 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001278 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001279 }
1280#endif
bellardd720b932004-04-25 17:57:43 +00001281
1282#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001283}
1284
bellard9fa3e852004-01-04 18:06:42 +00001285/* add a new TB and link it to the physical page tables. phys_page2 is
1286 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001287void tb_link_page(TranslationBlock *tb,
1288 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001289{
bellard9fa3e852004-01-04 18:06:42 +00001290 unsigned int h;
1291 TranslationBlock **ptb;
1292
pbrookc8a706f2008-06-02 16:16:42 +00001293 /* Grab the mmap lock to stop another thread invalidating this TB
1294 before we are done. */
1295 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001296 /* add in the physical hash table */
1297 h = tb_phys_hash_func(phys_pc);
1298 ptb = &tb_phys_hash[h];
1299 tb->phys_hash_next = *ptb;
1300 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001301
1302 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001303 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1304 if (phys_page2 != -1)
1305 tb_alloc_page(tb, 1, phys_page2);
1306 else
1307 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001308
bellardd4e81642003-05-25 16:46:15 +00001309 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1310 tb->jmp_next[0] = NULL;
1311 tb->jmp_next[1] = NULL;
1312
1313 /* init original jump addresses */
1314 if (tb->tb_next_offset[0] != 0xffff)
1315 tb_reset_jump(tb, 0);
1316 if (tb->tb_next_offset[1] != 0xffff)
1317 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001318
1319#ifdef DEBUG_TB_CHECK
1320 tb_page_check();
1321#endif
pbrookc8a706f2008-06-02 16:16:42 +00001322 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001323}
1324
bellarda513fe12003-05-27 23:29:48 +00001325/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1326 tb[1].tc_ptr. Return NULL if not found */
1327TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1328{
1329 int m_min, m_max, m;
1330 unsigned long v;
1331 TranslationBlock *tb;
1332
1333 if (nb_tbs <= 0)
1334 return NULL;
1335 if (tc_ptr < (unsigned long)code_gen_buffer ||
1336 tc_ptr >= (unsigned long)code_gen_ptr)
1337 return NULL;
1338 /* binary search (cf Knuth) */
1339 m_min = 0;
1340 m_max = nb_tbs - 1;
1341 while (m_min <= m_max) {
1342 m = (m_min + m_max) >> 1;
1343 tb = &tbs[m];
1344 v = (unsigned long)tb->tc_ptr;
1345 if (v == tc_ptr)
1346 return tb;
1347 else if (tc_ptr < v) {
1348 m_max = m - 1;
1349 } else {
1350 m_min = m + 1;
1351 }
ths5fafdf22007-09-16 21:08:06 +00001352 }
bellarda513fe12003-05-27 23:29:48 +00001353 return &tbs[m_max];
1354}
bellard75012672003-06-21 13:11:07 +00001355
bellardea041c02003-06-25 16:16:50 +00001356static void tb_reset_jump_recursive(TranslationBlock *tb);
1357
1358static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1359{
1360 TranslationBlock *tb1, *tb_next, **ptb;
1361 unsigned int n1;
1362
1363 tb1 = tb->jmp_next[n];
1364 if (tb1 != NULL) {
1365 /* find head of list */
1366 for(;;) {
1367 n1 = (long)tb1 & 3;
1368 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1369 if (n1 == 2)
1370 break;
1371 tb1 = tb1->jmp_next[n1];
1372 }
1373 /* we are now sure now that tb jumps to tb1 */
1374 tb_next = tb1;
1375
1376 /* remove tb from the jmp_first list */
1377 ptb = &tb_next->jmp_first;
1378 for(;;) {
1379 tb1 = *ptb;
1380 n1 = (long)tb1 & 3;
1381 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1382 if (n1 == n && tb1 == tb)
1383 break;
1384 ptb = &tb1->jmp_next[n1];
1385 }
1386 *ptb = tb->jmp_next[n];
1387 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001388
bellardea041c02003-06-25 16:16:50 +00001389 /* suppress the jump to next tb in generated code */
1390 tb_reset_jump(tb, n);
1391
bellard01243112004-01-04 15:48:17 +00001392 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001393 tb_reset_jump_recursive(tb_next);
1394 }
1395}
1396
1397static void tb_reset_jump_recursive(TranslationBlock *tb)
1398{
1399 tb_reset_jump_recursive2(tb, 0);
1400 tb_reset_jump_recursive2(tb, 1);
1401}
1402
bellard1fddef42005-04-17 19:16:13 +00001403#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001404#if defined(CONFIG_USER_ONLY)
1405static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1406{
1407 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1408}
1409#else
bellardd720b932004-04-25 17:57:43 +00001410static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1411{
Anthony Liguoric227f092009-10-01 16:12:16 -05001412 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001413 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001414 ram_addr_t ram_addr;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001415 PhysPageDesc p;
bellardd720b932004-04-25 17:57:43 +00001416
pbrookc2f07f82006-04-08 17:14:56 +00001417 addr = cpu_get_phys_page_debug(env, pc);
1418 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001419 pd = p.phys_offset;
pbrookc2f07f82006-04-08 17:14:56 +00001420 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001421 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001422}
bellardc27004e2005-01-03 23:35:10 +00001423#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001424#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001425
Paul Brookc527ee82010-03-01 03:31:14 +00001426#if defined(CONFIG_USER_ONLY)
1427void cpu_watchpoint_remove_all(CPUState *env, int mask)
1428
1429{
1430}
1431
1432int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1433 int flags, CPUWatchpoint **watchpoint)
1434{
1435 return -ENOSYS;
1436}
1437#else
pbrook6658ffb2007-03-16 23:58:11 +00001438/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001439int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1440 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001441{
aliguorib4051332008-11-18 20:14:20 +00001442 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001443 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001444
aliguorib4051332008-11-18 20:14:20 +00001445 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1446 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1447 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1448 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1449 return -EINVAL;
1450 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001451 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001452
aliguoria1d1bb32008-11-18 20:07:32 +00001453 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001454 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001455 wp->flags = flags;
1456
aliguori2dc9f412008-11-18 20:56:59 +00001457 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001458 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001459 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001460 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001461 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001462
pbrook6658ffb2007-03-16 23:58:11 +00001463 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001464
1465 if (watchpoint)
1466 *watchpoint = wp;
1467 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001468}
1469
aliguoria1d1bb32008-11-18 20:07:32 +00001470/* Remove a specific watchpoint. */
1471int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1472 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001473{
aliguorib4051332008-11-18 20:14:20 +00001474 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001475 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001476
Blue Swirl72cf2d42009-09-12 07:36:22 +00001477 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001478 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001479 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001480 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001481 return 0;
1482 }
1483 }
aliguoria1d1bb32008-11-18 20:07:32 +00001484 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001485}
1486
aliguoria1d1bb32008-11-18 20:07:32 +00001487/* Remove a specific watchpoint by reference. */
1488void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1489{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001490 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001491
aliguoria1d1bb32008-11-18 20:07:32 +00001492 tlb_flush_page(env, watchpoint->vaddr);
1493
Anthony Liguori7267c092011-08-20 22:09:37 -05001494 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001495}
1496
aliguoria1d1bb32008-11-18 20:07:32 +00001497/* Remove all matching watchpoints. */
1498void cpu_watchpoint_remove_all(CPUState *env, int mask)
1499{
aliguoric0ce9982008-11-25 22:13:57 +00001500 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001501
Blue Swirl72cf2d42009-09-12 07:36:22 +00001502 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001503 if (wp->flags & mask)
1504 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001505 }
aliguoria1d1bb32008-11-18 20:07:32 +00001506}
Paul Brookc527ee82010-03-01 03:31:14 +00001507#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001508
1509/* Add a breakpoint. */
1510int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1511 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001512{
bellard1fddef42005-04-17 19:16:13 +00001513#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001514 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001515
Anthony Liguori7267c092011-08-20 22:09:37 -05001516 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001517
1518 bp->pc = pc;
1519 bp->flags = flags;
1520
aliguori2dc9f412008-11-18 20:56:59 +00001521 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001522 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001523 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001524 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001525 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001526
1527 breakpoint_invalidate(env, pc);
1528
1529 if (breakpoint)
1530 *breakpoint = bp;
1531 return 0;
1532#else
1533 return -ENOSYS;
1534#endif
1535}
1536
1537/* Remove a specific breakpoint. */
1538int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1539{
1540#if defined(TARGET_HAS_ICE)
1541 CPUBreakpoint *bp;
1542
Blue Swirl72cf2d42009-09-12 07:36:22 +00001543 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001544 if (bp->pc == pc && bp->flags == flags) {
1545 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001546 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001547 }
bellard4c3a88a2003-07-26 12:06:08 +00001548 }
aliguoria1d1bb32008-11-18 20:07:32 +00001549 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001550#else
aliguoria1d1bb32008-11-18 20:07:32 +00001551 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001552#endif
1553}
1554
aliguoria1d1bb32008-11-18 20:07:32 +00001555/* Remove a specific breakpoint by reference. */
1556void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001557{
bellard1fddef42005-04-17 19:16:13 +00001558#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001559 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001560
aliguoria1d1bb32008-11-18 20:07:32 +00001561 breakpoint_invalidate(env, breakpoint->pc);
1562
Anthony Liguori7267c092011-08-20 22:09:37 -05001563 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001564#endif
1565}
1566
1567/* Remove all matching breakpoints. */
1568void cpu_breakpoint_remove_all(CPUState *env, int mask)
1569{
1570#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001571 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001572
Blue Swirl72cf2d42009-09-12 07:36:22 +00001573 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001574 if (bp->flags & mask)
1575 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001576 }
bellard4c3a88a2003-07-26 12:06:08 +00001577#endif
1578}
1579
bellardc33a3462003-07-29 20:50:33 +00001580/* enable or disable single step mode. EXCP_DEBUG is returned by the
1581 CPU loop after each instruction */
1582void cpu_single_step(CPUState *env, int enabled)
1583{
bellard1fddef42005-04-17 19:16:13 +00001584#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001585 if (env->singlestep_enabled != enabled) {
1586 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001587 if (kvm_enabled())
1588 kvm_update_guest_debug(env, 0);
1589 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001590 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001591 /* XXX: only flush what is necessary */
1592 tb_flush(env);
1593 }
bellardc33a3462003-07-29 20:50:33 +00001594 }
1595#endif
1596}
1597
bellard34865132003-10-05 14:28:56 +00001598/* enable or disable low levels log */
1599void cpu_set_log(int log_flags)
1600{
1601 loglevel = log_flags;
1602 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001603 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001604 if (!logfile) {
1605 perror(logfilename);
1606 _exit(1);
1607 }
bellard9fa3e852004-01-04 18:06:42 +00001608#if !defined(CONFIG_SOFTMMU)
1609 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1610 {
blueswir1b55266b2008-09-20 08:07:15 +00001611 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001612 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1613 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001614#elif defined(_WIN32)
1615 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1616 setvbuf(logfile, NULL, _IONBF, 0);
1617#else
bellard34865132003-10-05 14:28:56 +00001618 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001619#endif
pbrooke735b912007-06-30 13:53:24 +00001620 log_append = 1;
1621 }
1622 if (!loglevel && logfile) {
1623 fclose(logfile);
1624 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001625 }
1626}
1627
1628void cpu_set_log_filename(const char *filename)
1629{
1630 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001631 if (logfile) {
1632 fclose(logfile);
1633 logfile = NULL;
1634 }
1635 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001636}
bellardc33a3462003-07-29 20:50:33 +00001637
aurel323098dba2009-03-07 21:28:24 +00001638static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001639{
pbrookd5975362008-06-07 20:50:51 +00001640 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1641 problem and hope the cpu will stop of its own accord. For userspace
1642 emulation this often isn't actually as bad as it sounds. Often
1643 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001644 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001645 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001646
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001647 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001648 tb = env->current_tb;
1649 /* if the cpu is currently executing code, we must unlink it and
1650 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001651 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001652 env->current_tb = NULL;
1653 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001654 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001655 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001656}
1657
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001658#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001659/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001660static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001661{
1662 int old_mask;
1663
1664 old_mask = env->interrupt_request;
1665 env->interrupt_request |= mask;
1666
aliguori8edac962009-04-24 18:03:45 +00001667 /*
1668 * If called from iothread context, wake the target cpu in
1669 * case its halted.
1670 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001671 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001672 qemu_cpu_kick(env);
1673 return;
1674 }
aliguori8edac962009-04-24 18:03:45 +00001675
pbrook2e70f6e2008-06-29 01:03:05 +00001676 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001677 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001678 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001679 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001680 cpu_abort(env, "Raised interrupt while not in I/O function");
1681 }
pbrook2e70f6e2008-06-29 01:03:05 +00001682 } else {
aurel323098dba2009-03-07 21:28:24 +00001683 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001684 }
1685}
1686
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001687CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1688
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001689#else /* CONFIG_USER_ONLY */
1690
1691void cpu_interrupt(CPUState *env, int mask)
1692{
1693 env->interrupt_request |= mask;
1694 cpu_unlink_tb(env);
1695}
1696#endif /* CONFIG_USER_ONLY */
1697
bellardb54ad042004-05-20 13:42:52 +00001698void cpu_reset_interrupt(CPUState *env, int mask)
1699{
1700 env->interrupt_request &= ~mask;
1701}
1702
aurel323098dba2009-03-07 21:28:24 +00001703void cpu_exit(CPUState *env)
1704{
1705 env->exit_request = 1;
1706 cpu_unlink_tb(env);
1707}
1708
blueswir1c7cd6a32008-10-02 18:27:46 +00001709const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001710 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001711 "show generated host assembly code for each compiled TB" },
1712 { CPU_LOG_TB_IN_ASM, "in_asm",
1713 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001714 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001715 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001716 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001717 "show micro ops "
1718#ifdef TARGET_I386
1719 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001720#endif
blueswir1e01a1152008-03-14 17:37:11 +00001721 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001722 { CPU_LOG_INT, "int",
1723 "show interrupts/exceptions in short format" },
1724 { CPU_LOG_EXEC, "exec",
1725 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001726 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001727 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001728#ifdef TARGET_I386
1729 { CPU_LOG_PCALL, "pcall",
1730 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001731 { CPU_LOG_RESET, "cpu_reset",
1732 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001733#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001734#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001735 { CPU_LOG_IOPORT, "ioport",
1736 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001737#endif
bellardf193c792004-03-21 17:06:25 +00001738 { 0, NULL, NULL },
1739};
1740
1741static int cmp1(const char *s1, int n, const char *s2)
1742{
1743 if (strlen(s2) != n)
1744 return 0;
1745 return memcmp(s1, s2, n) == 0;
1746}
ths3b46e622007-09-17 08:09:54 +00001747
bellardf193c792004-03-21 17:06:25 +00001748/* takes a comma separated list of log masks. Return 0 if error. */
1749int cpu_str_to_log_mask(const char *str)
1750{
blueswir1c7cd6a32008-10-02 18:27:46 +00001751 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001752 int mask;
1753 const char *p, *p1;
1754
1755 p = str;
1756 mask = 0;
1757 for(;;) {
1758 p1 = strchr(p, ',');
1759 if (!p1)
1760 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001761 if(cmp1(p,p1-p,"all")) {
1762 for(item = cpu_log_items; item->mask != 0; item++) {
1763 mask |= item->mask;
1764 }
1765 } else {
1766 for(item = cpu_log_items; item->mask != 0; item++) {
1767 if (cmp1(p, p1 - p, item->name))
1768 goto found;
1769 }
1770 return 0;
bellardf193c792004-03-21 17:06:25 +00001771 }
bellardf193c792004-03-21 17:06:25 +00001772 found:
1773 mask |= item->mask;
1774 if (*p1 != ',')
1775 break;
1776 p = p1 + 1;
1777 }
1778 return mask;
1779}
bellardea041c02003-06-25 16:16:50 +00001780
bellard75012672003-06-21 13:11:07 +00001781void cpu_abort(CPUState *env, const char *fmt, ...)
1782{
1783 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001784 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001785
1786 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001787 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001788 fprintf(stderr, "qemu: fatal: ");
1789 vfprintf(stderr, fmt, ap);
1790 fprintf(stderr, "\n");
1791#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001792 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1793#else
1794 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001795#endif
aliguori93fcfe32009-01-15 22:34:14 +00001796 if (qemu_log_enabled()) {
1797 qemu_log("qemu: fatal: ");
1798 qemu_log_vprintf(fmt, ap2);
1799 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001800#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001801 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001802#else
aliguori93fcfe32009-01-15 22:34:14 +00001803 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001804#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001805 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001806 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001807 }
pbrook493ae1f2007-11-23 16:53:59 +00001808 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001809 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001810#if defined(CONFIG_USER_ONLY)
1811 {
1812 struct sigaction act;
1813 sigfillset(&act.sa_mask);
1814 act.sa_handler = SIG_DFL;
1815 sigaction(SIGABRT, &act, NULL);
1816 }
1817#endif
bellard75012672003-06-21 13:11:07 +00001818 abort();
1819}
1820
thsc5be9f02007-02-28 20:20:53 +00001821CPUState *cpu_copy(CPUState *env)
1822{
ths01ba9812007-12-09 02:22:57 +00001823 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001824 CPUState *next_cpu = new_env->next_cpu;
1825 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001826#if defined(TARGET_HAS_ICE)
1827 CPUBreakpoint *bp;
1828 CPUWatchpoint *wp;
1829#endif
1830
thsc5be9f02007-02-28 20:20:53 +00001831 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001832
1833 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001834 new_env->next_cpu = next_cpu;
1835 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001836
1837 /* Clone all break/watchpoints.
1838 Note: Once we support ptrace with hw-debug register access, make sure
1839 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001840 QTAILQ_INIT(&env->breakpoints);
1841 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001842#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001843 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001844 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1845 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001846 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001847 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1848 wp->flags, NULL);
1849 }
1850#endif
1851
thsc5be9f02007-02-28 20:20:53 +00001852 return new_env;
1853}
1854
bellard01243112004-01-04 15:48:17 +00001855#if !defined(CONFIG_USER_ONLY)
1856
edgar_igl5c751e92008-05-06 08:44:21 +00001857static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1858{
1859 unsigned int i;
1860
1861 /* Discard jump cache entries for any tb which might potentially
1862 overlap the flushed page. */
1863 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1864 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001865 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001866
1867 i = tb_jmp_cache_hash_page(addr);
1868 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001869 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001870}
1871
Igor Kovalenko08738982009-07-12 02:15:40 +04001872static CPUTLBEntry s_cputlb_empty_entry = {
1873 .addr_read = -1,
1874 .addr_write = -1,
1875 .addr_code = -1,
1876 .addend = -1,
1877};
1878
bellardee8b7022004-02-03 23:35:10 +00001879/* NOTE: if flush_global is true, also flush global entries (not
1880 implemented yet) */
1881void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001882{
bellard33417e72003-08-10 21:47:01 +00001883 int i;
bellard01243112004-01-04 15:48:17 +00001884
bellard9fa3e852004-01-04 18:06:42 +00001885#if defined(DEBUG_TLB)
1886 printf("tlb_flush:\n");
1887#endif
bellard01243112004-01-04 15:48:17 +00001888 /* must reset current TB so that interrupts cannot modify the
1889 links while we are modifying them */
1890 env->current_tb = NULL;
1891
bellard33417e72003-08-10 21:47:01 +00001892 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001893 int mmu_idx;
1894 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001895 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001896 }
bellard33417e72003-08-10 21:47:01 +00001897 }
bellard9fa3e852004-01-04 18:06:42 +00001898
bellard8a40a182005-11-20 10:35:40 +00001899 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001900
Paul Brookd4c430a2010-03-17 02:14:28 +00001901 env->tlb_flush_addr = -1;
1902 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001903 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001904}
1905
bellard274da6b2004-05-20 21:56:27 +00001906static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001907{
ths5fafdf22007-09-16 21:08:06 +00001908 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001909 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001910 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001911 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001912 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001913 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001914 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001915 }
bellard61382a52003-10-27 21:22:23 +00001916}
1917
bellard2e126692004-04-25 21:28:44 +00001918void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001919{
bellard8a40a182005-11-20 10:35:40 +00001920 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001921 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001922
bellard9fa3e852004-01-04 18:06:42 +00001923#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001924 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001925#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001926 /* Check if we need to flush due to large pages. */
1927 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1928#if defined(DEBUG_TLB)
1929 printf("tlb_flush_page: forced full flush ("
1930 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1931 env->tlb_flush_addr, env->tlb_flush_mask);
1932#endif
1933 tlb_flush(env, 1);
1934 return;
1935 }
bellard01243112004-01-04 15:48:17 +00001936 /* must reset current TB so that interrupts cannot modify the
1937 links while we are modifying them */
1938 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001939
bellard61382a52003-10-27 21:22:23 +00001940 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001941 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001942 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1943 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001944
edgar_igl5c751e92008-05-06 08:44:21 +00001945 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001946}
1947
bellard9fa3e852004-01-04 18:06:42 +00001948/* update the TLBs so that writes to code in the virtual page 'addr'
1949 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001950static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001951{
ths5fafdf22007-09-16 21:08:06 +00001952 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001953 ram_addr + TARGET_PAGE_SIZE,
1954 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001955}
1956
bellard9fa3e852004-01-04 18:06:42 +00001957/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001958 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001959static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001960 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001961{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001962 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00001963}
1964
ths5fafdf22007-09-16 21:08:06 +00001965static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001966 unsigned long start, unsigned long length)
1967{
1968 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001969 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00001970 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001971 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001972 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001973 }
1974 }
1975}
1976
pbrook5579c7f2009-04-11 14:47:08 +00001977/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001978void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001979 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001980{
1981 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001982 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001983 int i;
bellard1ccde1c2004-02-06 19:46:14 +00001984
1985 start &= TARGET_PAGE_MASK;
1986 end = TARGET_PAGE_ALIGN(end);
1987
1988 length = end - start;
1989 if (length == 0)
1990 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001991 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001992
bellard1ccde1c2004-02-06 19:46:14 +00001993 /* we modify the TLB cache so that the dirty bit will be set again
1994 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001995 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001996 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001997 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001998 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001999 != (end - 1) - start) {
2000 abort();
2001 }
2002
bellard6a00d602005-11-21 23:25:50 +00002003 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002004 int mmu_idx;
2005 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2006 for(i = 0; i < CPU_TLB_SIZE; i++)
2007 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2008 start1, length);
2009 }
bellard6a00d602005-11-21 23:25:50 +00002010 }
bellard1ccde1c2004-02-06 19:46:14 +00002011}
2012
aliguori74576192008-10-06 14:02:03 +00002013int cpu_physical_memory_set_dirty_tracking(int enable)
2014{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002015 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002016 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002017 return ret;
aliguori74576192008-10-06 14:02:03 +00002018}
2019
bellard3a7d9292005-08-21 09:26:42 +00002020static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2021{
Anthony Liguoric227f092009-10-01 16:12:16 -05002022 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002023 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002024
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002025 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002026 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2027 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002028 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002029 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002030 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002031 }
2032 }
2033}
2034
2035/* update the TLB according to the current state of the dirty bits */
2036void cpu_tlb_update_dirty(CPUState *env)
2037{
2038 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002039 int mmu_idx;
2040 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2041 for(i = 0; i < CPU_TLB_SIZE; i++)
2042 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2043 }
bellard3a7d9292005-08-21 09:26:42 +00002044}
2045
pbrook0f459d12008-06-09 00:20:13 +00002046static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002047{
pbrook0f459d12008-06-09 00:20:13 +00002048 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2049 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002050}
2051
pbrook0f459d12008-06-09 00:20:13 +00002052/* update the TLB corresponding to virtual page vaddr
2053 so that it is no longer dirty */
2054static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002055{
bellard1ccde1c2004-02-06 19:46:14 +00002056 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002057 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002058
pbrook0f459d12008-06-09 00:20:13 +00002059 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002060 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002061 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2062 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002063}
2064
Paul Brookd4c430a2010-03-17 02:14:28 +00002065/* Our TLB does not support large pages, so remember the area covered by
2066 large pages and trigger a full TLB flush if these are invalidated. */
2067static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2068 target_ulong size)
2069{
2070 target_ulong mask = ~(size - 1);
2071
2072 if (env->tlb_flush_addr == (target_ulong)-1) {
2073 env->tlb_flush_addr = vaddr & mask;
2074 env->tlb_flush_mask = mask;
2075 return;
2076 }
2077 /* Extend the existing region to include the new page.
2078 This is a compromise between unnecessary flushes and the cost
2079 of maintaining a full variable size TLB. */
2080 mask &= env->tlb_flush_mask;
2081 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2082 mask <<= 1;
2083 }
2084 env->tlb_flush_addr &= mask;
2085 env->tlb_flush_mask = mask;
2086}
2087
Avi Kivity1d393fa2012-01-01 21:15:42 +02002088static bool is_ram_rom(ram_addr_t pd)
2089{
2090 pd &= ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002091 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002092}
2093
2094static bool is_ram_rom_romd(ram_addr_t pd)
2095{
2096 return is_ram_rom(pd) || (pd & IO_MEM_ROMD);
2097}
2098
Paul Brookd4c430a2010-03-17 02:14:28 +00002099/* Add a new TLB entry. At most one entry for a given virtual address
2100 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2101 supplied size is only used by tlb_flush_page. */
2102void tlb_set_page(CPUState *env, target_ulong vaddr,
2103 target_phys_addr_t paddr, int prot,
2104 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002105{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002106 PhysPageDesc p;
bellard4f2ac232004-04-26 19:44:02 +00002107 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002108 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002109 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002110 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002111 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002112 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002113 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002114 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002115
Paul Brookd4c430a2010-03-17 02:14:28 +00002116 assert(size >= TARGET_PAGE_SIZE);
2117 if (size != TARGET_PAGE_SIZE) {
2118 tlb_add_large_page(env, vaddr, size);
2119 }
bellard92e873b2004-05-21 14:52:29 +00002120 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002121 pd = p.phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002122#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002123 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2124 " prot=%x idx=%d pd=0x%08lx\n",
2125 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002126#endif
2127
pbrook0f459d12008-06-09 00:20:13 +00002128 address = vaddr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002129 if (!is_ram_rom_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002130 /* IO memory case (romd handled later) */
2131 address |= TLB_MMIO;
2132 }
pbrook5579c7f2009-04-11 14:47:08 +00002133 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002134 if (is_ram_rom(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002135 /* Normal RAM. */
2136 iotlb = pd & TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002137 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2138 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002139 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002140 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002141 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002142 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002143 It would be nice to pass an offset from the base address
2144 of that region. This would avoid having to special case RAM,
2145 and avoid full address decoding in every device.
2146 We can't use the high bits of pd for this because
2147 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002148 iotlb = (pd & ~TARGET_PAGE_MASK);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002149 iotlb += p.region_offset;
pbrook0f459d12008-06-09 00:20:13 +00002150 }
pbrook6658ffb2007-03-16 23:58:11 +00002151
pbrook0f459d12008-06-09 00:20:13 +00002152 code_address = address;
2153 /* Make accesses to pages with watchpoints go via the
2154 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002155 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002156 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002157 /* Avoid trapping reads of pages with a write breakpoint. */
2158 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivity1ec9b902012-01-02 12:47:48 +02002159 iotlb = io_mem_watch.ram_addr + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002160 address |= TLB_MMIO;
2161 break;
2162 }
pbrook6658ffb2007-03-16 23:58:11 +00002163 }
pbrook0f459d12008-06-09 00:20:13 +00002164 }
balrogd79acba2007-06-26 20:01:13 +00002165
pbrook0f459d12008-06-09 00:20:13 +00002166 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2167 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2168 te = &env->tlb_table[mmu_idx][index];
2169 te->addend = addend - vaddr;
2170 if (prot & PAGE_READ) {
2171 te->addr_read = address;
2172 } else {
2173 te->addr_read = -1;
2174 }
edgar_igl5c751e92008-05-06 08:44:21 +00002175
pbrook0f459d12008-06-09 00:20:13 +00002176 if (prot & PAGE_EXEC) {
2177 te->addr_code = code_address;
2178 } else {
2179 te->addr_code = -1;
2180 }
2181 if (prot & PAGE_WRITE) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002182 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr ||
pbrook0f459d12008-06-09 00:20:13 +00002183 (pd & IO_MEM_ROMD)) {
2184 /* Write access calls the I/O callback. */
2185 te->addr_write = address | TLB_MMIO;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002186 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
pbrook0f459d12008-06-09 00:20:13 +00002187 !cpu_physical_memory_is_dirty(pd)) {
2188 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002189 } else {
pbrook0f459d12008-06-09 00:20:13 +00002190 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002191 }
pbrook0f459d12008-06-09 00:20:13 +00002192 } else {
2193 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002194 }
bellard9fa3e852004-01-04 18:06:42 +00002195}
2196
bellard01243112004-01-04 15:48:17 +00002197#else
2198
bellardee8b7022004-02-03 23:35:10 +00002199void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002200{
2201}
2202
bellard2e126692004-04-25 21:28:44 +00002203void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002204{
2205}
2206
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002207/*
2208 * Walks guest process memory "regions" one by one
2209 * and calls callback function 'fn' for each region.
2210 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002211
2212struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002213{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002214 walk_memory_regions_fn fn;
2215 void *priv;
2216 unsigned long start;
2217 int prot;
2218};
bellard9fa3e852004-01-04 18:06:42 +00002219
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002220static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002221 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002222{
2223 if (data->start != -1ul) {
2224 int rc = data->fn(data->priv, data->start, end, data->prot);
2225 if (rc != 0) {
2226 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002227 }
bellard33417e72003-08-10 21:47:01 +00002228 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002229
2230 data->start = (new_prot ? end : -1ul);
2231 data->prot = new_prot;
2232
2233 return 0;
2234}
2235
2236static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002237 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002238{
Paul Brookb480d9b2010-03-12 23:23:29 +00002239 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002240 int i, rc;
2241
2242 if (*lp == NULL) {
2243 return walk_memory_regions_end(data, base, 0);
2244 }
2245
2246 if (level == 0) {
2247 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002248 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002249 int prot = pd[i].flags;
2250
2251 pa = base | (i << TARGET_PAGE_BITS);
2252 if (prot != data->prot) {
2253 rc = walk_memory_regions_end(data, pa, prot);
2254 if (rc != 0) {
2255 return rc;
2256 }
2257 }
2258 }
2259 } else {
2260 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002261 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002262 pa = base | ((abi_ulong)i <<
2263 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002264 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2265 if (rc != 0) {
2266 return rc;
2267 }
2268 }
2269 }
2270
2271 return 0;
2272}
2273
2274int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2275{
2276 struct walk_memory_regions_data data;
2277 unsigned long i;
2278
2279 data.fn = fn;
2280 data.priv = priv;
2281 data.start = -1ul;
2282 data.prot = 0;
2283
2284 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002285 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002286 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2287 if (rc != 0) {
2288 return rc;
2289 }
2290 }
2291
2292 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002293}
2294
Paul Brookb480d9b2010-03-12 23:23:29 +00002295static int dump_region(void *priv, abi_ulong start,
2296 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002297{
2298 FILE *f = (FILE *)priv;
2299
Paul Brookb480d9b2010-03-12 23:23:29 +00002300 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2301 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002302 start, end, end - start,
2303 ((prot & PAGE_READ) ? 'r' : '-'),
2304 ((prot & PAGE_WRITE) ? 'w' : '-'),
2305 ((prot & PAGE_EXEC) ? 'x' : '-'));
2306
2307 return (0);
2308}
2309
2310/* dump memory mappings */
2311void page_dump(FILE *f)
2312{
2313 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2314 "start", "end", "size", "prot");
2315 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002316}
2317
pbrook53a59602006-03-25 19:31:22 +00002318int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002319{
bellard9fa3e852004-01-04 18:06:42 +00002320 PageDesc *p;
2321
2322 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002323 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002324 return 0;
2325 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002326}
2327
Richard Henderson376a7902010-03-10 15:57:04 -08002328/* Modify the flags of a page and invalidate the code if necessary.
2329 The flag PAGE_WRITE_ORG is positioned automatically depending
2330 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002331void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002332{
Richard Henderson376a7902010-03-10 15:57:04 -08002333 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002334
Richard Henderson376a7902010-03-10 15:57:04 -08002335 /* This function should never be called with addresses outside the
2336 guest address space. If this assert fires, it probably indicates
2337 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002338#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2339 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002340#endif
2341 assert(start < end);
2342
bellard9fa3e852004-01-04 18:06:42 +00002343 start = start & TARGET_PAGE_MASK;
2344 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002345
2346 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002347 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002348 }
2349
2350 for (addr = start, len = end - start;
2351 len != 0;
2352 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2353 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2354
2355 /* If the write protection bit is set, then we invalidate
2356 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002357 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002358 (flags & PAGE_WRITE) &&
2359 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002360 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002361 }
2362 p->flags = flags;
2363 }
bellard9fa3e852004-01-04 18:06:42 +00002364}
2365
ths3d97b402007-11-02 19:02:07 +00002366int page_check_range(target_ulong start, target_ulong len, int flags)
2367{
2368 PageDesc *p;
2369 target_ulong end;
2370 target_ulong addr;
2371
Richard Henderson376a7902010-03-10 15:57:04 -08002372 /* This function should never be called with addresses outside the
2373 guest address space. If this assert fires, it probably indicates
2374 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002375#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2376 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002377#endif
2378
Richard Henderson3e0650a2010-03-29 10:54:42 -07002379 if (len == 0) {
2380 return 0;
2381 }
Richard Henderson376a7902010-03-10 15:57:04 -08002382 if (start + len - 1 < start) {
2383 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002384 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002385 }
balrog55f280c2008-10-28 10:24:11 +00002386
ths3d97b402007-11-02 19:02:07 +00002387 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2388 start = start & TARGET_PAGE_MASK;
2389
Richard Henderson376a7902010-03-10 15:57:04 -08002390 for (addr = start, len = end - start;
2391 len != 0;
2392 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002393 p = page_find(addr >> TARGET_PAGE_BITS);
2394 if( !p )
2395 return -1;
2396 if( !(p->flags & PAGE_VALID) )
2397 return -1;
2398
bellarddae32702007-11-14 10:51:00 +00002399 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002400 return -1;
bellarddae32702007-11-14 10:51:00 +00002401 if (flags & PAGE_WRITE) {
2402 if (!(p->flags & PAGE_WRITE_ORG))
2403 return -1;
2404 /* unprotect the page if it was put read-only because it
2405 contains translated code */
2406 if (!(p->flags & PAGE_WRITE)) {
2407 if (!page_unprotect(addr, 0, NULL))
2408 return -1;
2409 }
2410 return 0;
2411 }
ths3d97b402007-11-02 19:02:07 +00002412 }
2413 return 0;
2414}
2415
bellard9fa3e852004-01-04 18:06:42 +00002416/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002417 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002418int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002419{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002420 unsigned int prot;
2421 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002422 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002423
pbrookc8a706f2008-06-02 16:16:42 +00002424 /* Technically this isn't safe inside a signal handler. However we
2425 know this only ever happens in a synchronous SEGV handler, so in
2426 practice it seems to be ok. */
2427 mmap_lock();
2428
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002429 p = page_find(address >> TARGET_PAGE_BITS);
2430 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002431 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002432 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002433 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002434
bellard9fa3e852004-01-04 18:06:42 +00002435 /* if the page was really writable, then we change its
2436 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002437 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2438 host_start = address & qemu_host_page_mask;
2439 host_end = host_start + qemu_host_page_size;
2440
2441 prot = 0;
2442 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2443 p = page_find(addr >> TARGET_PAGE_BITS);
2444 p->flags |= PAGE_WRITE;
2445 prot |= p->flags;
2446
bellard9fa3e852004-01-04 18:06:42 +00002447 /* and since the content will be modified, we must invalidate
2448 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002449 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002450#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002451 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002452#endif
bellard9fa3e852004-01-04 18:06:42 +00002453 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002454 mprotect((void *)g2h(host_start), qemu_host_page_size,
2455 prot & PAGE_BITS);
2456
2457 mmap_unlock();
2458 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002459 }
pbrookc8a706f2008-06-02 16:16:42 +00002460 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002461 return 0;
2462}
2463
bellard6a00d602005-11-21 23:25:50 +00002464static inline void tlb_set_dirty(CPUState *env,
2465 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002466{
2467}
bellard9fa3e852004-01-04 18:06:42 +00002468#endif /* defined(CONFIG_USER_ONLY) */
2469
pbrooke2eef172008-06-08 01:09:01 +00002470#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002471
Paul Brookc04b2b72010-03-01 03:31:14 +00002472#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2473typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002474 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002475 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002476 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2477 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002478} subpage_t;
2479
Anthony Liguoric227f092009-10-01 16:12:16 -05002480static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2481 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002482static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2483 ram_addr_t orig_memory,
2484 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002485#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2486 need_subpage) \
2487 do { \
2488 if (addr > start_addr) \
2489 start_addr2 = 0; \
2490 else { \
2491 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2492 if (start_addr2 > 0) \
2493 need_subpage = 1; \
2494 } \
2495 \
blueswir149e9fba2007-05-30 17:25:06 +00002496 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002497 end_addr2 = TARGET_PAGE_SIZE - 1; \
2498 else { \
2499 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2500 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2501 need_subpage = 1; \
2502 } \
2503 } while (0)
2504
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002505/* register physical memory.
2506 For RAM, 'size' must be a multiple of the target page size.
2507 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002508 io memory page. The address used when calling the IO function is
2509 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002510 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002511 before calculating this offset. This should not be a problem unless
2512 the low bits of start_addr and region_offset differ. */
Avi Kivitydd811242012-01-02 12:17:03 +02002513void cpu_register_physical_memory_log(MemoryRegionSection *section,
2514 bool readable, bool readonly)
bellard33417e72003-08-10 21:47:01 +00002515{
Avi Kivitydd811242012-01-02 12:17:03 +02002516 target_phys_addr_t start_addr = section->offset_within_address_space;
2517 ram_addr_t size = section->size;
2518 ram_addr_t phys_offset = section->mr->ram_addr;
2519 ram_addr_t region_offset = section->offset_within_region;
Anthony Liguoric227f092009-10-01 16:12:16 -05002520 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002521 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002522 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002523 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002524 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002525
Avi Kivitydd811242012-01-02 12:17:03 +02002526 if (memory_region_is_ram(section->mr)) {
2527 phys_offset += region_offset;
2528 region_offset = 0;
2529 }
2530
2531 if (!readable) {
2532 phys_offset &= ~TARGET_PAGE_MASK & ~IO_MEM_ROMD;
2533 }
2534
2535 if (readonly) {
2536 phys_offset |= io_mem_rom.ram_addr;
2537 }
2538
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002539 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002540
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002541 if (phys_offset == io_mem_unassigned.ram_addr) {
pbrook67c4d232009-02-23 13:16:07 +00002542 region_offset = start_addr;
2543 }
pbrook8da3ff12008-12-01 18:59:50 +00002544 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002545 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002546 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002547
2548 addr = start_addr;
2549 do {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002550 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002551 if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002552 ram_addr_t orig_memory = p->phys_offset;
2553 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002554 int need_subpage = 0;
2555
2556 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2557 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002558 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002559 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2560 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002561 &p->phys_offset, orig_memory,
2562 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002563 } else {
Avi Kivitya621f382012-01-02 13:12:08 +02002564 MemoryRegion *mr
2565 = io_mem_region[(orig_memory & ~TARGET_PAGE_MASK)
2566 >> IO_MEM_SHIFT];
2567 subpage = container_of(mr, subpage_t, iomem);
blueswir1db7b5422007-05-26 17:36:03 +00002568 }
pbrook8da3ff12008-12-01 18:59:50 +00002569 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2570 region_offset);
2571 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002572 } else {
2573 p->phys_offset = phys_offset;
Avi Kivity2774c6d2012-01-01 18:24:24 +02002574 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002575 if (is_ram_rom_romd(phys_offset))
blueswir1db7b5422007-05-26 17:36:03 +00002576 phys_offset += TARGET_PAGE_SIZE;
2577 }
2578 } else {
2579 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2580 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002581 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002582 if (is_ram_rom_romd(phys_offset)) {
blueswir1db7b5422007-05-26 17:36:03 +00002583 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002584 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002585 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002586 int need_subpage = 0;
2587
2588 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2589 end_addr2, need_subpage);
2590
Richard Hendersonf6405242010-04-22 16:47:31 -07002591 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002592 subpage = subpage_init((addr & TARGET_PAGE_MASK),
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002593 &p->phys_offset,
2594 io_mem_unassigned.ram_addr,
pbrook67c4d232009-02-23 13:16:07 +00002595 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002596 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002597 phys_offset, region_offset);
2598 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002599 }
2600 }
2601 }
pbrook8da3ff12008-12-01 18:59:50 +00002602 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002603 addr += TARGET_PAGE_SIZE;
2604 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002605
bellard9d420372006-06-25 22:25:22 +00002606 /* since each CPU stores ram addresses in its TLB cache, we must
2607 reset the modified entries */
2608 /* XXX: slow ! */
2609 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2610 tlb_flush(env, 1);
2611 }
bellard33417e72003-08-10 21:47:01 +00002612}
2613
Anthony Liguoric227f092009-10-01 16:12:16 -05002614void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002615{
2616 if (kvm_enabled())
2617 kvm_coalesce_mmio_region(addr, size);
2618}
2619
Anthony Liguoric227f092009-10-01 16:12:16 -05002620void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002621{
2622 if (kvm_enabled())
2623 kvm_uncoalesce_mmio_region(addr, size);
2624}
2625
Sheng Yang62a27442010-01-26 19:21:16 +08002626void qemu_flush_coalesced_mmio_buffer(void)
2627{
2628 if (kvm_enabled())
2629 kvm_flush_coalesced_mmio_buffer();
2630}
2631
Marcelo Tosattic9027602010-03-01 20:25:08 -03002632#if defined(__linux__) && !defined(TARGET_S390X)
2633
2634#include <sys/vfs.h>
2635
2636#define HUGETLBFS_MAGIC 0x958458f6
2637
2638static long gethugepagesize(const char *path)
2639{
2640 struct statfs fs;
2641 int ret;
2642
2643 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002644 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002645 } while (ret != 0 && errno == EINTR);
2646
2647 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002648 perror(path);
2649 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002650 }
2651
2652 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002653 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002654
2655 return fs.f_bsize;
2656}
2657
Alex Williamson04b16652010-07-02 11:13:17 -06002658static void *file_ram_alloc(RAMBlock *block,
2659 ram_addr_t memory,
2660 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002661{
2662 char *filename;
2663 void *area;
2664 int fd;
2665#ifdef MAP_POPULATE
2666 int flags;
2667#endif
2668 unsigned long hpagesize;
2669
2670 hpagesize = gethugepagesize(path);
2671 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002672 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002673 }
2674
2675 if (memory < hpagesize) {
2676 return NULL;
2677 }
2678
2679 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2680 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2681 return NULL;
2682 }
2683
2684 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002685 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002686 }
2687
2688 fd = mkstemp(filename);
2689 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002690 perror("unable to create backing store for hugepages");
2691 free(filename);
2692 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002693 }
2694 unlink(filename);
2695 free(filename);
2696
2697 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2698
2699 /*
2700 * ftruncate is not supported by hugetlbfs in older
2701 * hosts, so don't bother bailing out on errors.
2702 * If anything goes wrong with it under other filesystems,
2703 * mmap will fail.
2704 */
2705 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002706 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002707
2708#ifdef MAP_POPULATE
2709 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2710 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2711 * to sidestep this quirk.
2712 */
2713 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2714 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2715#else
2716 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2717#endif
2718 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002719 perror("file_ram_alloc: can't mmap RAM pages");
2720 close(fd);
2721 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002722 }
Alex Williamson04b16652010-07-02 11:13:17 -06002723 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002724 return area;
2725}
2726#endif
2727
Alex Williamsond17b5282010-06-25 11:08:38 -06002728static ram_addr_t find_ram_offset(ram_addr_t size)
2729{
Alex Williamson04b16652010-07-02 11:13:17 -06002730 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002731 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002732
2733 if (QLIST_EMPTY(&ram_list.blocks))
2734 return 0;
2735
2736 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002737 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002738
2739 end = block->offset + block->length;
2740
2741 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2742 if (next_block->offset >= end) {
2743 next = MIN(next, next_block->offset);
2744 }
2745 }
2746 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002747 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002748 mingap = next - end;
2749 }
2750 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002751
2752 if (offset == RAM_ADDR_MAX) {
2753 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2754 (uint64_t)size);
2755 abort();
2756 }
2757
Alex Williamson04b16652010-07-02 11:13:17 -06002758 return offset;
2759}
2760
2761static ram_addr_t last_ram_offset(void)
2762{
Alex Williamsond17b5282010-06-25 11:08:38 -06002763 RAMBlock *block;
2764 ram_addr_t last = 0;
2765
2766 QLIST_FOREACH(block, &ram_list.blocks, next)
2767 last = MAX(last, block->offset + block->length);
2768
2769 return last;
2770}
2771
Avi Kivityc5705a72011-12-20 15:59:12 +02002772void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002773{
2774 RAMBlock *new_block, *block;
2775
Avi Kivityc5705a72011-12-20 15:59:12 +02002776 new_block = NULL;
2777 QLIST_FOREACH(block, &ram_list.blocks, next) {
2778 if (block->offset == addr) {
2779 new_block = block;
2780 break;
2781 }
2782 }
2783 assert(new_block);
2784 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002785
2786 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2787 char *id = dev->parent_bus->info->get_dev_path(dev);
2788 if (id) {
2789 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002790 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002791 }
2792 }
2793 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2794
2795 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002796 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002797 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2798 new_block->idstr);
2799 abort();
2800 }
2801 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002802}
2803
2804ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2805 MemoryRegion *mr)
2806{
2807 RAMBlock *new_block;
2808
2809 size = TARGET_PAGE_ALIGN(size);
2810 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002811
Avi Kivity7c637362011-12-21 13:09:49 +02002812 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002813 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002814 if (host) {
2815 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002816 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002817 } else {
2818 if (mem_path) {
2819#if defined (__linux__) && !defined(TARGET_S390X)
2820 new_block->host = file_ram_alloc(new_block, size, mem_path);
2821 if (!new_block->host) {
2822 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002823 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002824 }
2825#else
2826 fprintf(stderr, "-mem-path option unsupported\n");
2827 exit(1);
2828#endif
2829 } else {
2830#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002831 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2832 an system defined value, which is at least 256GB. Larger systems
2833 have larger values. We put the guest between the end of data
2834 segment (system break) and this value. We use 32GB as a base to
2835 have enough room for the system break to grow. */
2836 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002837 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002838 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002839 if (new_block->host == MAP_FAILED) {
2840 fprintf(stderr, "Allocating RAM failed\n");
2841 abort();
2842 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002843#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002844 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002845 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002846 } else {
2847 new_block->host = qemu_vmalloc(size);
2848 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002849#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002850 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002851 }
2852 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002853 new_block->length = size;
2854
2855 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2856
Anthony Liguori7267c092011-08-20 22:09:37 -05002857 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002858 last_ram_offset() >> TARGET_PAGE_BITS);
2859 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2860 0xff, size >> TARGET_PAGE_BITS);
2861
2862 if (kvm_enabled())
2863 kvm_setup_guest_memory(new_block->host, size);
2864
2865 return new_block->offset;
2866}
2867
Avi Kivityc5705a72011-12-20 15:59:12 +02002868ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002869{
Avi Kivityc5705a72011-12-20 15:59:12 +02002870 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002871}
bellarde9a1ab12007-02-08 23:08:38 +00002872
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002873void qemu_ram_free_from_ptr(ram_addr_t addr)
2874{
2875 RAMBlock *block;
2876
2877 QLIST_FOREACH(block, &ram_list.blocks, next) {
2878 if (addr == block->offset) {
2879 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002880 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002881 return;
2882 }
2883 }
2884}
2885
Anthony Liguoric227f092009-10-01 16:12:16 -05002886void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002887{
Alex Williamson04b16652010-07-02 11:13:17 -06002888 RAMBlock *block;
2889
2890 QLIST_FOREACH(block, &ram_list.blocks, next) {
2891 if (addr == block->offset) {
2892 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002893 if (block->flags & RAM_PREALLOC_MASK) {
2894 ;
2895 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002896#if defined (__linux__) && !defined(TARGET_S390X)
2897 if (block->fd) {
2898 munmap(block->host, block->length);
2899 close(block->fd);
2900 } else {
2901 qemu_vfree(block->host);
2902 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002903#else
2904 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002905#endif
2906 } else {
2907#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2908 munmap(block->host, block->length);
2909#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002910 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002911 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002912 } else {
2913 qemu_vfree(block->host);
2914 }
Alex Williamson04b16652010-07-02 11:13:17 -06002915#endif
2916 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002917 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002918 return;
2919 }
2920 }
2921
bellarde9a1ab12007-02-08 23:08:38 +00002922}
2923
Huang Yingcd19cfa2011-03-02 08:56:19 +01002924#ifndef _WIN32
2925void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2926{
2927 RAMBlock *block;
2928 ram_addr_t offset;
2929 int flags;
2930 void *area, *vaddr;
2931
2932 QLIST_FOREACH(block, &ram_list.blocks, next) {
2933 offset = addr - block->offset;
2934 if (offset < block->length) {
2935 vaddr = block->host + offset;
2936 if (block->flags & RAM_PREALLOC_MASK) {
2937 ;
2938 } else {
2939 flags = MAP_FIXED;
2940 munmap(vaddr, length);
2941 if (mem_path) {
2942#if defined(__linux__) && !defined(TARGET_S390X)
2943 if (block->fd) {
2944#ifdef MAP_POPULATE
2945 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2946 MAP_PRIVATE;
2947#else
2948 flags |= MAP_PRIVATE;
2949#endif
2950 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2951 flags, block->fd, offset);
2952 } else {
2953 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2954 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2955 flags, -1, 0);
2956 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002957#else
2958 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002959#endif
2960 } else {
2961#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2962 flags |= MAP_SHARED | MAP_ANONYMOUS;
2963 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2964 flags, -1, 0);
2965#else
2966 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2967 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2968 flags, -1, 0);
2969#endif
2970 }
2971 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002972 fprintf(stderr, "Could not remap addr: "
2973 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002974 length, addr);
2975 exit(1);
2976 }
2977 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2978 }
2979 return;
2980 }
2981 }
2982}
2983#endif /* !_WIN32 */
2984
pbrookdc828ca2009-04-09 22:21:07 +00002985/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002986 With the exception of the softmmu code in this file, this should
2987 only be used for local memory (e.g. video ram) that the device owns,
2988 and knows it isn't going to access beyond the end of the block.
2989
2990 It should not be used for general purpose DMA.
2991 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2992 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002993void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002994{
pbrook94a6b542009-04-11 17:15:54 +00002995 RAMBlock *block;
2996
Alex Williamsonf471a172010-06-11 11:11:42 -06002997 QLIST_FOREACH(block, &ram_list.blocks, next) {
2998 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002999 /* Move this entry to to start of the list. */
3000 if (block != QLIST_FIRST(&ram_list.blocks)) {
3001 QLIST_REMOVE(block, next);
3002 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3003 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003004 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003005 /* We need to check if the requested address is in the RAM
3006 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003007 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003008 */
3009 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003010 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003011 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003012 block->host =
3013 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003014 }
3015 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003016 return block->host + (addr - block->offset);
3017 }
pbrook94a6b542009-04-11 17:15:54 +00003018 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003019
3020 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3021 abort();
3022
3023 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003024}
3025
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003026/* Return a host pointer to ram allocated with qemu_ram_alloc.
3027 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3028 */
3029void *qemu_safe_ram_ptr(ram_addr_t addr)
3030{
3031 RAMBlock *block;
3032
3033 QLIST_FOREACH(block, &ram_list.blocks, next) {
3034 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003035 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003036 /* We need to check if the requested address is in the RAM
3037 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003038 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003039 */
3040 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003041 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003042 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003043 block->host =
3044 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003045 }
3046 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003047 return block->host + (addr - block->offset);
3048 }
3049 }
3050
3051 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3052 abort();
3053
3054 return NULL;
3055}
3056
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003057/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3058 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003059void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003060{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003061 if (*size == 0) {
3062 return NULL;
3063 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003064 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003065 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003066 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003067 RAMBlock *block;
3068
3069 QLIST_FOREACH(block, &ram_list.blocks, next) {
3070 if (addr - block->offset < block->length) {
3071 if (addr - block->offset + *size > block->length)
3072 *size = block->length - addr + block->offset;
3073 return block->host + (addr - block->offset);
3074 }
3075 }
3076
3077 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3078 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003079 }
3080}
3081
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003082void qemu_put_ram_ptr(void *addr)
3083{
3084 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003085}
3086
Marcelo Tosattie8902612010-10-11 15:31:19 -03003087int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003088{
pbrook94a6b542009-04-11 17:15:54 +00003089 RAMBlock *block;
3090 uint8_t *host = ptr;
3091
Jan Kiszka868bb332011-06-21 22:59:09 +02003092 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003093 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003094 return 0;
3095 }
3096
Alex Williamsonf471a172010-06-11 11:11:42 -06003097 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003098 /* This case append when the block is not mapped. */
3099 if (block->host == NULL) {
3100 continue;
3101 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003102 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003103 *ram_addr = block->offset + (host - block->host);
3104 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003105 }
pbrook94a6b542009-04-11 17:15:54 +00003106 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003107
Marcelo Tosattie8902612010-10-11 15:31:19 -03003108 return -1;
3109}
Alex Williamsonf471a172010-06-11 11:11:42 -06003110
Marcelo Tosattie8902612010-10-11 15:31:19 -03003111/* Some of the softmmu routines need to translate from a host pointer
3112 (typically a TLB entry) back to a ram offset. */
3113ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3114{
3115 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003116
Marcelo Tosattie8902612010-10-11 15:31:19 -03003117 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3118 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3119 abort();
3120 }
3121 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003122}
3123
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003124static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3125 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003126{
pbrook67d3b952006-12-18 05:03:52 +00003127#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003128 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003129#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003130#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003131 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003132#endif
3133 return 0;
3134}
3135
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003136static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3137 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003138{
3139#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003140 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003141#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003142#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003143 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003144#endif
3145}
3146
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003147static const MemoryRegionOps unassigned_mem_ops = {
3148 .read = unassigned_mem_read,
3149 .write = unassigned_mem_write,
3150 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003151};
3152
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003153static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3154 unsigned size)
3155{
3156 abort();
3157}
3158
3159static void error_mem_write(void *opaque, target_phys_addr_t addr,
3160 uint64_t value, unsigned size)
3161{
3162 abort();
3163}
3164
3165static const MemoryRegionOps error_mem_ops = {
3166 .read = error_mem_read,
3167 .write = error_mem_write,
3168 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003169};
3170
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003171static const MemoryRegionOps rom_mem_ops = {
3172 .read = error_mem_read,
3173 .write = unassigned_mem_write,
3174 .endianness = DEVICE_NATIVE_ENDIAN,
3175};
3176
3177static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3178 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003179{
bellard3a7d9292005-08-21 09:26:42 +00003180 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003181 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003182 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3183#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003184 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003185 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003186#endif
3187 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003188 switch (size) {
3189 case 1:
3190 stb_p(qemu_get_ram_ptr(ram_addr), val);
3191 break;
3192 case 2:
3193 stw_p(qemu_get_ram_ptr(ram_addr), val);
3194 break;
3195 case 4:
3196 stl_p(qemu_get_ram_ptr(ram_addr), val);
3197 break;
3198 default:
3199 abort();
3200 }
bellardf23db162005-08-21 19:12:28 +00003201 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003202 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003203 /* we remove the notdirty callback only if the code has been
3204 flushed */
3205 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003206 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003207}
3208
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003209static const MemoryRegionOps notdirty_mem_ops = {
3210 .read = error_mem_read,
3211 .write = notdirty_mem_write,
3212 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003213};
3214
pbrook0f459d12008-06-09 00:20:13 +00003215/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003216static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003217{
3218 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003219 target_ulong pc, cs_base;
3220 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003221 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003222 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003223 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003224
aliguori06d55cc2008-11-18 20:24:06 +00003225 if (env->watchpoint_hit) {
3226 /* We re-entered the check after replacing the TB. Now raise
3227 * the debug interrupt so that is will trigger after the
3228 * current instruction. */
3229 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3230 return;
3231 }
pbrook2e70f6e2008-06-29 01:03:05 +00003232 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003233 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003234 if ((vaddr == (wp->vaddr & len_mask) ||
3235 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003236 wp->flags |= BP_WATCHPOINT_HIT;
3237 if (!env->watchpoint_hit) {
3238 env->watchpoint_hit = wp;
3239 tb = tb_find_pc(env->mem_io_pc);
3240 if (!tb) {
3241 cpu_abort(env, "check_watchpoint: could not find TB for "
3242 "pc=%p", (void *)env->mem_io_pc);
3243 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003244 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003245 tb_phys_invalidate(tb, -1);
3246 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3247 env->exception_index = EXCP_DEBUG;
3248 } else {
3249 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3250 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3251 }
3252 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003253 }
aliguori6e140f22008-11-18 20:37:55 +00003254 } else {
3255 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003256 }
3257 }
3258}
3259
pbrook6658ffb2007-03-16 23:58:11 +00003260/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3261 so these check for a hit then pass through to the normal out-of-line
3262 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003263static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3264 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003265{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003266 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3267 switch (size) {
3268 case 1: return ldub_phys(addr);
3269 case 2: return lduw_phys(addr);
3270 case 4: return ldl_phys(addr);
3271 default: abort();
3272 }
pbrook6658ffb2007-03-16 23:58:11 +00003273}
3274
Avi Kivity1ec9b902012-01-02 12:47:48 +02003275static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3276 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003277{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003278 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3279 switch (size) {
3280 case 1: stb_phys(addr, val);
3281 case 2: stw_phys(addr, val);
3282 case 4: stl_phys(addr, val);
3283 default: abort();
3284 }
pbrook6658ffb2007-03-16 23:58:11 +00003285}
3286
Avi Kivity1ec9b902012-01-02 12:47:48 +02003287static const MemoryRegionOps watch_mem_ops = {
3288 .read = watch_mem_read,
3289 .write = watch_mem_write,
3290 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003291};
pbrook6658ffb2007-03-16 23:58:11 +00003292
Avi Kivity70c68e42012-01-02 12:32:48 +02003293static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3294 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003295{
Avi Kivity70c68e42012-01-02 12:32:48 +02003296 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003297 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003298#if defined(DEBUG_SUBPAGE)
3299 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3300 mmio, len, addr, idx);
3301#endif
blueswir1db7b5422007-05-26 17:36:03 +00003302
Richard Hendersonf6405242010-04-22 16:47:31 -07003303 addr += mmio->region_offset[idx];
3304 idx = mmio->sub_io_index[idx];
Avi Kivity70c68e42012-01-02 12:32:48 +02003305 return io_mem_read(idx, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003306}
3307
Avi Kivity70c68e42012-01-02 12:32:48 +02003308static void subpage_write(void *opaque, target_phys_addr_t addr,
3309 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003310{
Avi Kivity70c68e42012-01-02 12:32:48 +02003311 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003312 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003313#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003314 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3315 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003316 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003317#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003318
3319 addr += mmio->region_offset[idx];
3320 idx = mmio->sub_io_index[idx];
Avi Kivity70c68e42012-01-02 12:32:48 +02003321 io_mem_write(idx, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003322}
3323
Avi Kivity70c68e42012-01-02 12:32:48 +02003324static const MemoryRegionOps subpage_ops = {
3325 .read = subpage_read,
3326 .write = subpage_write,
3327 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003328};
3329
Avi Kivityde712f92012-01-02 12:41:07 +02003330static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3331 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003332{
3333 ram_addr_t raddr = addr;
3334 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003335 switch (size) {
3336 case 1: return ldub_p(ptr);
3337 case 2: return lduw_p(ptr);
3338 case 4: return ldl_p(ptr);
3339 default: abort();
3340 }
Andreas Färber56384e82011-11-30 16:26:21 +01003341}
3342
Avi Kivityde712f92012-01-02 12:41:07 +02003343static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3344 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003345{
3346 ram_addr_t raddr = addr;
3347 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003348 switch (size) {
3349 case 1: return stb_p(ptr, value);
3350 case 2: return stw_p(ptr, value);
3351 case 4: return stl_p(ptr, value);
3352 default: abort();
3353 }
Andreas Färber56384e82011-11-30 16:26:21 +01003354}
3355
Avi Kivityde712f92012-01-02 12:41:07 +02003356static const MemoryRegionOps subpage_ram_ops = {
3357 .read = subpage_ram_read,
3358 .write = subpage_ram_write,
3359 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003360};
3361
Anthony Liguoric227f092009-10-01 16:12:16 -05003362static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3363 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003364{
3365 int idx, eidx;
3366
3367 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3368 return -1;
3369 idx = SUBPAGE_IDX(start);
3370 eidx = SUBPAGE_IDX(end);
3371#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003372 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003373 mmio, start, end, idx, eidx, memory);
3374#endif
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003375 if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
Avi Kivityde712f92012-01-02 12:41:07 +02003376 memory = io_mem_subpage_ram.ram_addr;
Andreas Färber56384e82011-11-30 16:26:21 +01003377 }
Richard Hendersonf6405242010-04-22 16:47:31 -07003378 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003379 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003380 mmio->sub_io_index[idx] = memory;
3381 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003382 }
3383
3384 return 0;
3385}
3386
Richard Hendersonf6405242010-04-22 16:47:31 -07003387static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3388 ram_addr_t orig_memory,
3389 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003390{
Anthony Liguoric227f092009-10-01 16:12:16 -05003391 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003392 int subpage_memory;
3393
Anthony Liguori7267c092011-08-20 22:09:37 -05003394 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003395
3396 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003397 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3398 "subpage", TARGET_PAGE_SIZE);
3399 subpage_memory = mmio->iomem.ram_addr;
blueswir1db7b5422007-05-26 17:36:03 +00003400#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003401 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3402 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003403#endif
aliguori1eec6142009-02-05 22:06:18 +00003404 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003405 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003406
3407 return mmio;
3408}
3409
aliguori88715652009-02-11 15:20:58 +00003410static int get_free_io_mem_idx(void)
3411{
3412 int i;
3413
3414 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3415 if (!io_mem_used[i]) {
3416 io_mem_used[i] = 1;
3417 return i;
3418 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003419 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003420 return -1;
3421}
3422
bellard33417e72003-08-10 21:47:01 +00003423/* mem_read and mem_write are arrays of functions containing the
3424 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003425 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003426 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003427 modified. If it is zero, a new io zone is allocated. The return
3428 value can be used with cpu_register_physical_memory(). (-1) is
3429 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003430static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003431{
bellard33417e72003-08-10 21:47:01 +00003432 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003433 io_index = get_free_io_mem_idx();
3434 if (io_index == -1)
3435 return io_index;
bellard33417e72003-08-10 21:47:01 +00003436 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003437 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003438 if (io_index >= IO_MEM_NB_ENTRIES)
3439 return -1;
3440 }
bellardb5ff1b32005-11-26 10:38:39 +00003441
Avi Kivitya621f382012-01-02 13:12:08 +02003442 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003443
3444 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003445}
bellard61382a52003-10-27 21:22:23 +00003446
Avi Kivitya621f382012-01-02 13:12:08 +02003447int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003448{
Avi Kivitya621f382012-01-02 13:12:08 +02003449 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003450}
3451
aliguori88715652009-02-11 15:20:58 +00003452void cpu_unregister_io_memory(int io_table_address)
3453{
aliguori88715652009-02-11 15:20:58 +00003454 int io_index = io_table_address >> IO_MEM_SHIFT;
3455
Avi Kivitya621f382012-01-02 13:12:08 +02003456 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003457 io_mem_used[io_index] = 0;
3458}
3459
Avi Kivitye9179ce2009-06-14 11:38:52 +03003460static void io_mem_init(void)
3461{
3462 int i;
3463
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003464 /* Must be first: */
3465 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3466 assert(io_mem_ram.ram_addr == 0);
3467 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3468 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3469 "unassigned", UINT64_MAX);
3470 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3471 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003472 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3473 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003474 for (i=0; i<5; i++)
3475 io_mem_used[i] = 1;
3476
Avi Kivity1ec9b902012-01-02 12:47:48 +02003477 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3478 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003479}
3480
Avi Kivity62152b82011-07-26 14:26:14 +03003481static void memory_map_init(void)
3482{
Anthony Liguori7267c092011-08-20 22:09:37 -05003483 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003484 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003485 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003486
Anthony Liguori7267c092011-08-20 22:09:37 -05003487 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003488 memory_region_init(system_io, "io", 65536);
3489 set_system_io_map(system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003490}
3491
3492MemoryRegion *get_system_memory(void)
3493{
3494 return system_memory;
3495}
3496
Avi Kivity309cb472011-08-08 16:09:03 +03003497MemoryRegion *get_system_io(void)
3498{
3499 return system_io;
3500}
3501
pbrooke2eef172008-06-08 01:09:01 +00003502#endif /* !defined(CONFIG_USER_ONLY) */
3503
bellard13eb76e2004-01-24 15:23:36 +00003504/* physical memory access (slow version, mainly for debug) */
3505#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003506int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3507 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003508{
3509 int l, flags;
3510 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003511 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003512
3513 while (len > 0) {
3514 page = addr & TARGET_PAGE_MASK;
3515 l = (page + TARGET_PAGE_SIZE) - addr;
3516 if (l > len)
3517 l = len;
3518 flags = page_get_flags(page);
3519 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003520 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003521 if (is_write) {
3522 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003523 return -1;
bellard579a97f2007-11-11 14:26:47 +00003524 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003525 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003526 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003527 memcpy(p, buf, l);
3528 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003529 } else {
3530 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003531 return -1;
bellard579a97f2007-11-11 14:26:47 +00003532 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003533 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003534 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003535 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003536 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003537 }
3538 len -= l;
3539 buf += l;
3540 addr += l;
3541 }
Paul Brooka68fe892010-03-01 00:08:59 +00003542 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003543}
bellard8df1cd02005-01-28 22:37:22 +00003544
bellard13eb76e2004-01-24 15:23:36 +00003545#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003546void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003547 int len, int is_write)
3548{
3549 int l, io_index;
3550 uint8_t *ptr;
3551 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003552 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003553 ram_addr_t pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003554 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003555
bellard13eb76e2004-01-24 15:23:36 +00003556 while (len > 0) {
3557 page = addr & TARGET_PAGE_MASK;
3558 l = (page + TARGET_PAGE_SIZE) - addr;
3559 if (l > len)
3560 l = len;
bellard92e873b2004-05-21 14:52:29 +00003561 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003562 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003563
bellard13eb76e2004-01-24 15:23:36 +00003564 if (is_write) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003565 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003566 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003567 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003568 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bellard6a00d602005-11-21 23:25:50 +00003569 /* XXX: could force cpu_single_env to NULL to avoid
3570 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003571 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003572 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003573 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003574 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003575 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003576 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003577 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003578 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003579 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003580 l = 2;
3581 } else {
bellard1c213d12005-09-03 10:49:04 +00003582 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003583 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003584 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003585 l = 1;
3586 }
3587 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003588 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003589 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003590 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003591 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003592 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003593 if (!cpu_physical_memory_is_dirty(addr1)) {
3594 /* invalidate code */
3595 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3596 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003597 cpu_physical_memory_set_dirty_flags(
3598 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003599 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003600 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003601 }
3602 } else {
Avi Kivity1d393fa2012-01-01 21:15:42 +02003603 if (!is_ram_rom_romd(pd)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003604 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003605 /* I/O case */
3606 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003607 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
aurel326c2934d2009-02-18 21:37:17 +00003608 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003609 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003610 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003611 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003612 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003613 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003614 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003615 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003616 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003617 l = 2;
3618 } else {
bellard1c213d12005-09-03 10:49:04 +00003619 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003620 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003621 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003622 l = 1;
3623 }
3624 } else {
3625 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003626 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3627 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3628 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003629 }
3630 }
3631 len -= l;
3632 buf += l;
3633 addr += l;
3634 }
3635}
bellard8df1cd02005-01-28 22:37:22 +00003636
bellardd0ecd2a2006-04-23 17:14:48 +00003637/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003638void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003639 const uint8_t *buf, int len)
3640{
3641 int l;
3642 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003643 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003644 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003645 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003646
bellardd0ecd2a2006-04-23 17:14:48 +00003647 while (len > 0) {
3648 page = addr & TARGET_PAGE_MASK;
3649 l = (page + TARGET_PAGE_SIZE) - addr;
3650 if (l > len)
3651 l = len;
3652 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003653 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003654
Avi Kivity1d393fa2012-01-01 21:15:42 +02003655 if (!is_ram_rom_romd(pd)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003656 /* do nothing */
3657 } else {
3658 unsigned long addr1;
3659 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3660 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003661 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003662 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003663 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003664 }
3665 len -= l;
3666 buf += l;
3667 addr += l;
3668 }
3669}
3670
aliguori6d16c2f2009-01-22 16:59:11 +00003671typedef struct {
3672 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003673 target_phys_addr_t addr;
3674 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003675} BounceBuffer;
3676
3677static BounceBuffer bounce;
3678
aliguoriba223c22009-01-22 16:59:16 +00003679typedef struct MapClient {
3680 void *opaque;
3681 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003682 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003683} MapClient;
3684
Blue Swirl72cf2d42009-09-12 07:36:22 +00003685static QLIST_HEAD(map_client_list, MapClient) map_client_list
3686 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003687
3688void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3689{
Anthony Liguori7267c092011-08-20 22:09:37 -05003690 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003691
3692 client->opaque = opaque;
3693 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003694 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003695 return client;
3696}
3697
3698void cpu_unregister_map_client(void *_client)
3699{
3700 MapClient *client = (MapClient *)_client;
3701
Blue Swirl72cf2d42009-09-12 07:36:22 +00003702 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003703 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003704}
3705
3706static void cpu_notify_map_clients(void)
3707{
3708 MapClient *client;
3709
Blue Swirl72cf2d42009-09-12 07:36:22 +00003710 while (!QLIST_EMPTY(&map_client_list)) {
3711 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003712 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003713 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003714 }
3715}
3716
aliguori6d16c2f2009-01-22 16:59:11 +00003717/* Map a physical memory region into a host virtual address.
3718 * May map a subset of the requested range, given by and returned in *plen.
3719 * May return NULL if resources needed to perform the mapping are exhausted.
3720 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003721 * Use cpu_register_map_client() to know when retrying the map operation is
3722 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003723 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003724void *cpu_physical_memory_map(target_phys_addr_t addr,
3725 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003726 int is_write)
3727{
Anthony Liguoric227f092009-10-01 16:12:16 -05003728 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003729 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003730 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003731 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003732 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003733 PhysPageDesc p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003734 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003735 ram_addr_t rlen;
3736 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003737
3738 while (len > 0) {
3739 page = addr & TARGET_PAGE_MASK;
3740 l = (page + TARGET_PAGE_SIZE) - addr;
3741 if (l > len)
3742 l = len;
3743 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003744 pd = p.phys_offset;
aliguori6d16c2f2009-01-22 16:59:11 +00003745
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003746 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003747 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003748 break;
3749 }
3750 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3751 bounce.addr = addr;
3752 bounce.len = l;
3753 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003754 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003755 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003756
3757 *plen = l;
3758 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003759 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003760 if (!todo) {
3761 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3762 }
aliguori6d16c2f2009-01-22 16:59:11 +00003763
3764 len -= l;
3765 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003766 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003767 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003768 rlen = todo;
3769 ret = qemu_ram_ptr_length(raddr, &rlen);
3770 *plen = rlen;
3771 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003772}
3773
3774/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3775 * Will also mark the memory as dirty if is_write == 1. access_len gives
3776 * the amount of memory that was actually read or written by the caller.
3777 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003778void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3779 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003780{
3781 if (buffer != bounce.buffer) {
3782 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003783 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003784 while (access_len) {
3785 unsigned l;
3786 l = TARGET_PAGE_SIZE;
3787 if (l > access_len)
3788 l = access_len;
3789 if (!cpu_physical_memory_is_dirty(addr1)) {
3790 /* invalidate code */
3791 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3792 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003793 cpu_physical_memory_set_dirty_flags(
3794 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003795 }
3796 addr1 += l;
3797 access_len -= l;
3798 }
3799 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003800 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003801 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003802 }
aliguori6d16c2f2009-01-22 16:59:11 +00003803 return;
3804 }
3805 if (is_write) {
3806 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3807 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003808 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003809 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003810 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003811}
bellardd0ecd2a2006-04-23 17:14:48 +00003812
bellard8df1cd02005-01-28 22:37:22 +00003813/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003814static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3815 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003816{
3817 int io_index;
3818 uint8_t *ptr;
3819 uint32_t val;
3820 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003821 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00003822
3823 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003824 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003825
Avi Kivity1d393fa2012-01-01 21:15:42 +02003826 if (!is_ram_rom_romd(pd)) {
bellard8df1cd02005-01-28 22:37:22 +00003827 /* I/O case */
3828 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003829 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02003830 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003831#if defined(TARGET_WORDS_BIGENDIAN)
3832 if (endian == DEVICE_LITTLE_ENDIAN) {
3833 val = bswap32(val);
3834 }
3835#else
3836 if (endian == DEVICE_BIG_ENDIAN) {
3837 val = bswap32(val);
3838 }
3839#endif
bellard8df1cd02005-01-28 22:37:22 +00003840 } else {
3841 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003842 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00003843 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003844 switch (endian) {
3845 case DEVICE_LITTLE_ENDIAN:
3846 val = ldl_le_p(ptr);
3847 break;
3848 case DEVICE_BIG_ENDIAN:
3849 val = ldl_be_p(ptr);
3850 break;
3851 default:
3852 val = ldl_p(ptr);
3853 break;
3854 }
bellard8df1cd02005-01-28 22:37:22 +00003855 }
3856 return val;
3857}
3858
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003859uint32_t ldl_phys(target_phys_addr_t addr)
3860{
3861 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3862}
3863
3864uint32_t ldl_le_phys(target_phys_addr_t addr)
3865{
3866 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3867}
3868
3869uint32_t ldl_be_phys(target_phys_addr_t addr)
3870{
3871 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3872}
3873
bellard84b7b8e2005-11-28 21:19:04 +00003874/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003875static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3876 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003877{
3878 int io_index;
3879 uint8_t *ptr;
3880 uint64_t val;
3881 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003882 PhysPageDesc p;
bellard84b7b8e2005-11-28 21:19:04 +00003883
3884 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003885 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003886
Avi Kivity1d393fa2012-01-01 21:15:42 +02003887 if (!is_ram_rom_romd(pd)) {
bellard84b7b8e2005-11-28 21:19:04 +00003888 /* I/O case */
3889 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003890 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003891
3892 /* XXX This is broken when device endian != cpu endian.
3893 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003894#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02003895 val = io_mem_read(io_index, addr, 4) << 32;
3896 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003897#else
Avi Kivityacbbec52011-11-21 12:27:03 +02003898 val = io_mem_read(io_index, addr, 4);
3899 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003900#endif
3901 } else {
3902 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003903 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00003904 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003905 switch (endian) {
3906 case DEVICE_LITTLE_ENDIAN:
3907 val = ldq_le_p(ptr);
3908 break;
3909 case DEVICE_BIG_ENDIAN:
3910 val = ldq_be_p(ptr);
3911 break;
3912 default:
3913 val = ldq_p(ptr);
3914 break;
3915 }
bellard84b7b8e2005-11-28 21:19:04 +00003916 }
3917 return val;
3918}
3919
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003920uint64_t ldq_phys(target_phys_addr_t addr)
3921{
3922 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3923}
3924
3925uint64_t ldq_le_phys(target_phys_addr_t addr)
3926{
3927 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3928}
3929
3930uint64_t ldq_be_phys(target_phys_addr_t addr)
3931{
3932 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3933}
3934
bellardaab33092005-10-30 20:48:42 +00003935/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003936uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003937{
3938 uint8_t val;
3939 cpu_physical_memory_read(addr, &val, 1);
3940 return val;
3941}
3942
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003943/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003944static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3945 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003946{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003947 int io_index;
3948 uint8_t *ptr;
3949 uint64_t val;
3950 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003951 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003952
3953 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003954 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003955
Avi Kivity1d393fa2012-01-01 21:15:42 +02003956 if (!is_ram_rom_romd(pd)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003957 /* I/O case */
3958 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003959 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02003960 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003961#if defined(TARGET_WORDS_BIGENDIAN)
3962 if (endian == DEVICE_LITTLE_ENDIAN) {
3963 val = bswap16(val);
3964 }
3965#else
3966 if (endian == DEVICE_BIG_ENDIAN) {
3967 val = bswap16(val);
3968 }
3969#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003970 } else {
3971 /* RAM case */
3972 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3973 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003974 switch (endian) {
3975 case DEVICE_LITTLE_ENDIAN:
3976 val = lduw_le_p(ptr);
3977 break;
3978 case DEVICE_BIG_ENDIAN:
3979 val = lduw_be_p(ptr);
3980 break;
3981 default:
3982 val = lduw_p(ptr);
3983 break;
3984 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003985 }
3986 return val;
bellardaab33092005-10-30 20:48:42 +00003987}
3988
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003989uint32_t lduw_phys(target_phys_addr_t addr)
3990{
3991 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3992}
3993
3994uint32_t lduw_le_phys(target_phys_addr_t addr)
3995{
3996 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3997}
3998
3999uint32_t lduw_be_phys(target_phys_addr_t addr)
4000{
4001 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4002}
4003
bellard8df1cd02005-01-28 22:37:22 +00004004/* warning: addr must be aligned. The ram page is not masked as dirty
4005 and the code inside is not invalidated. It is useful if the dirty
4006 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004007void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004008{
4009 int io_index;
4010 uint8_t *ptr;
4011 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004012 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004013
4014 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004015 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004016
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004017 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
bellard8df1cd02005-01-28 22:37:22 +00004018 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004019 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004020 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004021 } else {
aliguori74576192008-10-06 14:02:03 +00004022 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004023 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004024 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004025
4026 if (unlikely(in_migration)) {
4027 if (!cpu_physical_memory_is_dirty(addr1)) {
4028 /* invalidate code */
4029 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4030 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004031 cpu_physical_memory_set_dirty_flags(
4032 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004033 }
4034 }
bellard8df1cd02005-01-28 22:37:22 +00004035 }
4036}
4037
Anthony Liguoric227f092009-10-01 16:12:16 -05004038void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004039{
4040 int io_index;
4041 uint8_t *ptr;
4042 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004043 PhysPageDesc p;
j_mayerbc98a7e2007-04-04 07:55:12 +00004044
4045 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004046 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004047
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004048 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
j_mayerbc98a7e2007-04-04 07:55:12 +00004049 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004050 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004051#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004052 io_mem_write(io_index, addr, val >> 32, 4);
4053 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004054#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004055 io_mem_write(io_index, addr, (uint32_t)val, 4);
4056 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004057#endif
4058 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004059 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004060 (addr & ~TARGET_PAGE_MASK);
4061 stq_p(ptr, val);
4062 }
4063}
4064
bellard8df1cd02005-01-28 22:37:22 +00004065/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004066static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4067 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004068{
4069 int io_index;
4070 uint8_t *ptr;
4071 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004072 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004073
4074 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004075 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004076
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004077 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
bellard8df1cd02005-01-28 22:37:22 +00004078 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004079 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004080#if defined(TARGET_WORDS_BIGENDIAN)
4081 if (endian == DEVICE_LITTLE_ENDIAN) {
4082 val = bswap32(val);
4083 }
4084#else
4085 if (endian == DEVICE_BIG_ENDIAN) {
4086 val = bswap32(val);
4087 }
4088#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004089 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004090 } else {
4091 unsigned long addr1;
4092 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4093 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004094 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004095 switch (endian) {
4096 case DEVICE_LITTLE_ENDIAN:
4097 stl_le_p(ptr, val);
4098 break;
4099 case DEVICE_BIG_ENDIAN:
4100 stl_be_p(ptr, val);
4101 break;
4102 default:
4103 stl_p(ptr, val);
4104 break;
4105 }
bellard3a7d9292005-08-21 09:26:42 +00004106 if (!cpu_physical_memory_is_dirty(addr1)) {
4107 /* invalidate code */
4108 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4109 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004110 cpu_physical_memory_set_dirty_flags(addr1,
4111 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004112 }
bellard8df1cd02005-01-28 22:37:22 +00004113 }
4114}
4115
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004116void stl_phys(target_phys_addr_t addr, uint32_t val)
4117{
4118 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4119}
4120
4121void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4122{
4123 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4124}
4125
4126void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4127{
4128 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4129}
4130
bellardaab33092005-10-30 20:48:42 +00004131/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004132void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004133{
4134 uint8_t v = val;
4135 cpu_physical_memory_write(addr, &v, 1);
4136}
4137
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004138/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004139static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4140 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004141{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004142 int io_index;
4143 uint8_t *ptr;
4144 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004145 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004146
4147 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004148 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004149
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004150 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004151 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004152 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004153#if defined(TARGET_WORDS_BIGENDIAN)
4154 if (endian == DEVICE_LITTLE_ENDIAN) {
4155 val = bswap16(val);
4156 }
4157#else
4158 if (endian == DEVICE_BIG_ENDIAN) {
4159 val = bswap16(val);
4160 }
4161#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004162 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004163 } else {
4164 unsigned long addr1;
4165 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4166 /* RAM case */
4167 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004168 switch (endian) {
4169 case DEVICE_LITTLE_ENDIAN:
4170 stw_le_p(ptr, val);
4171 break;
4172 case DEVICE_BIG_ENDIAN:
4173 stw_be_p(ptr, val);
4174 break;
4175 default:
4176 stw_p(ptr, val);
4177 break;
4178 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004179 if (!cpu_physical_memory_is_dirty(addr1)) {
4180 /* invalidate code */
4181 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4182 /* set dirty bit */
4183 cpu_physical_memory_set_dirty_flags(addr1,
4184 (0xff & ~CODE_DIRTY_FLAG));
4185 }
4186 }
bellardaab33092005-10-30 20:48:42 +00004187}
4188
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004189void stw_phys(target_phys_addr_t addr, uint32_t val)
4190{
4191 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4192}
4193
4194void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4195{
4196 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4197}
4198
4199void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4200{
4201 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4202}
4203
bellardaab33092005-10-30 20:48:42 +00004204/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004205void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004206{
4207 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004208 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004209}
4210
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004211void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4212{
4213 val = cpu_to_le64(val);
4214 cpu_physical_memory_write(addr, &val, 8);
4215}
4216
4217void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4218{
4219 val = cpu_to_be64(val);
4220 cpu_physical_memory_write(addr, &val, 8);
4221}
4222
aliguori5e2972f2009-03-28 17:51:36 +00004223/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004224int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004225 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004226{
4227 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004228 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004229 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004230
4231 while (len > 0) {
4232 page = addr & TARGET_PAGE_MASK;
4233 phys_addr = cpu_get_phys_page_debug(env, page);
4234 /* if no physical page mapped, return an error */
4235 if (phys_addr == -1)
4236 return -1;
4237 l = (page + TARGET_PAGE_SIZE) - addr;
4238 if (l > len)
4239 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004240 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004241 if (is_write)
4242 cpu_physical_memory_write_rom(phys_addr, buf, l);
4243 else
aliguori5e2972f2009-03-28 17:51:36 +00004244 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004245 len -= l;
4246 buf += l;
4247 addr += l;
4248 }
4249 return 0;
4250}
Paul Brooka68fe892010-03-01 00:08:59 +00004251#endif
bellard13eb76e2004-01-24 15:23:36 +00004252
pbrook2e70f6e2008-06-29 01:03:05 +00004253/* in deterministic execution mode, instructions doing device I/Os
4254 must be at the end of the TB */
4255void cpu_io_recompile(CPUState *env, void *retaddr)
4256{
4257 TranslationBlock *tb;
4258 uint32_t n, cflags;
4259 target_ulong pc, cs_base;
4260 uint64_t flags;
4261
4262 tb = tb_find_pc((unsigned long)retaddr);
4263 if (!tb) {
4264 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4265 retaddr);
4266 }
4267 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004268 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004269 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004270 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004271 n = n - env->icount_decr.u16.low;
4272 /* Generate a new TB ending on the I/O insn. */
4273 n++;
4274 /* On MIPS and SH, delay slot instructions can only be restarted if
4275 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004276 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004277 branch. */
4278#if defined(TARGET_MIPS)
4279 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4280 env->active_tc.PC -= 4;
4281 env->icount_decr.u16.low++;
4282 env->hflags &= ~MIPS_HFLAG_BMASK;
4283 }
4284#elif defined(TARGET_SH4)
4285 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4286 && n > 1) {
4287 env->pc -= 2;
4288 env->icount_decr.u16.low++;
4289 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4290 }
4291#endif
4292 /* This should never happen. */
4293 if (n > CF_COUNT_MASK)
4294 cpu_abort(env, "TB too big during recompile");
4295
4296 cflags = n | CF_LAST_IO;
4297 pc = tb->pc;
4298 cs_base = tb->cs_base;
4299 flags = tb->flags;
4300 tb_phys_invalidate(tb, -1);
4301 /* FIXME: In theory this could raise an exception. In practice
4302 we have already translated the block once so it's probably ok. */
4303 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004304 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004305 the first in the TB) then we end up generating a whole new TB and
4306 repeating the fault, which is horribly inefficient.
4307 Better would be to execute just this insn uncached, or generate a
4308 second new TB. */
4309 cpu_resume_from_signal(env, NULL);
4310}
4311
Paul Brookb3755a92010-03-12 16:54:58 +00004312#if !defined(CONFIG_USER_ONLY)
4313
Stefan Weil055403b2010-10-22 23:03:32 +02004314void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004315{
4316 int i, target_code_size, max_target_code_size;
4317 int direct_jmp_count, direct_jmp2_count, cross_page;
4318 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004319
bellarde3db7222005-01-26 22:00:47 +00004320 target_code_size = 0;
4321 max_target_code_size = 0;
4322 cross_page = 0;
4323 direct_jmp_count = 0;
4324 direct_jmp2_count = 0;
4325 for(i = 0; i < nb_tbs; i++) {
4326 tb = &tbs[i];
4327 target_code_size += tb->size;
4328 if (tb->size > max_target_code_size)
4329 max_target_code_size = tb->size;
4330 if (tb->page_addr[1] != -1)
4331 cross_page++;
4332 if (tb->tb_next_offset[0] != 0xffff) {
4333 direct_jmp_count++;
4334 if (tb->tb_next_offset[1] != 0xffff) {
4335 direct_jmp2_count++;
4336 }
4337 }
4338 }
4339 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004340 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004341 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004342 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4343 cpu_fprintf(f, "TB count %d/%d\n",
4344 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004345 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004346 nb_tbs ? target_code_size / nb_tbs : 0,
4347 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004348 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004349 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4350 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004351 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4352 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004353 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4354 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004355 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004356 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4357 direct_jmp2_count,
4358 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004359 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004360 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4361 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4362 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004363 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004364}
4365
Avi Kivityd39e8222012-01-01 23:35:10 +02004366/* NOTE: this function can trigger an exception */
4367/* NOTE2: the returned address is not exactly the physical address: it
4368 is the offset relative to phys_ram_base */
4369tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4370{
4371 int mmu_idx, page_index, pd;
4372 void *p;
4373
4374 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4375 mmu_idx = cpu_mmu_index(env1);
4376 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4377 (addr & TARGET_PAGE_MASK))) {
4378 ldub_code(addr);
4379 }
4380 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004381 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
4382 && !(pd & IO_MEM_ROMD)) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004383#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4384 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4385#else
4386 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4387#endif
4388 }
4389 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4390 return qemu_ram_addr_from_host_nofail(p);
4391}
4392
bellard61382a52003-10-27 21:22:23 +00004393#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004394#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004395#define GETPC() NULL
4396#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004397#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004398
4399#define SHIFT 0
4400#include "softmmu_template.h"
4401
4402#define SHIFT 1
4403#include "softmmu_template.h"
4404
4405#define SHIFT 2
4406#include "softmmu_template.h"
4407
4408#define SHIFT 3
4409#include "softmmu_template.h"
4410
4411#undef env
4412
4413#endif