blob: 536e70f52c331aea2c327392694fe985d471c44e [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Avi Kivity3eef53d2012-02-10 14:57:31 +0200163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000189typedef struct PhysPageDesc {
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset;
192 ram_addr_t region_offset;
193} PhysPageDesc;
194
Avi Kivity4346ae32012-02-10 17:00:01 +0200195typedef struct PhysPageEntry PhysPageEntry;
196
197struct PhysPageEntry {
198 union {
199 PhysPageDesc leaf;
200 PhysPageEntry *node;
201 } u;
202};
203
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800204/* This is a multi-level map on the physical address space.
205 The bottom level has pointers to PhysPageDesc. */
Avi Kivity4346ae32012-02-10 17:00:01 +0200206static PhysPageEntry phys_map;
Paul Brook6d9a1302010-02-28 23:55:53 +0000207
pbrooke2eef172008-06-08 01:09:01 +0000208static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300209static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000210
bellard33417e72003-08-10 21:47:01 +0000211/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200212MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000213static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200214static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000215#endif
bellard33417e72003-08-10 21:47:01 +0000216
bellard34865132003-10-05 14:28:56 +0000217/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200218#ifdef WIN32
219static const char *logfilename = "qemu.log";
220#else
blueswir1d9b630f2008-10-05 09:57:08 +0000221static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200222#endif
bellard34865132003-10-05 14:28:56 +0000223FILE *logfile;
224int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000225static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000226
bellarde3db7222005-01-26 22:00:47 +0000227/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000228#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000229static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000230#endif
bellarde3db7222005-01-26 22:00:47 +0000231static int tb_flush_count;
232static int tb_phys_invalidate_count;
233
bellard7cb69ca2008-05-10 10:55:51 +0000234#ifdef _WIN32
235static void map_exec(void *addr, long size)
236{
237 DWORD old_protect;
238 VirtualProtect(addr, size,
239 PAGE_EXECUTE_READWRITE, &old_protect);
240
241}
242#else
243static void map_exec(void *addr, long size)
244{
bellard43694152008-05-29 09:35:57 +0000245 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000246
bellard43694152008-05-29 09:35:57 +0000247 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000248 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000249 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000250
251 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000252 end += page_size - 1;
253 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000254
255 mprotect((void *)start, end - start,
256 PROT_READ | PROT_WRITE | PROT_EXEC);
257}
258#endif
259
bellardb346ff42003-06-15 20:05:50 +0000260static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000261{
bellard83fb7ad2004-07-05 21:25:26 +0000262 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000263 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000264#ifdef _WIN32
265 {
266 SYSTEM_INFO system_info;
267
268 GetSystemInfo(&system_info);
269 qemu_real_host_page_size = system_info.dwPageSize;
270 }
271#else
272 qemu_real_host_page_size = getpagesize();
273#endif
bellard83fb7ad2004-07-05 21:25:26 +0000274 if (qemu_host_page_size == 0)
275 qemu_host_page_size = qemu_real_host_page_size;
276 if (qemu_host_page_size < TARGET_PAGE_SIZE)
277 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000278 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000279
Paul Brook2e9a5712010-05-05 16:32:59 +0100280#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000281 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100282#ifdef HAVE_KINFO_GETVMMAP
283 struct kinfo_vmentry *freep;
284 int i, cnt;
285
286 freep = kinfo_getvmmap(getpid(), &cnt);
287 if (freep) {
288 mmap_lock();
289 for (i = 0; i < cnt; i++) {
290 unsigned long startaddr, endaddr;
291
292 startaddr = freep[i].kve_start;
293 endaddr = freep[i].kve_end;
294 if (h2g_valid(startaddr)) {
295 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
296
297 if (h2g_valid(endaddr)) {
298 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200299 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100300 } else {
301#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
302 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200303 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100304#endif
305 }
306 }
307 }
308 free(freep);
309 mmap_unlock();
310 }
311#else
balrog50a95692007-12-12 01:16:23 +0000312 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000313
pbrook07765902008-05-31 16:33:53 +0000314 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800315
Aurelien Jarnofd436902010-04-10 17:20:36 +0200316 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000317 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800318 mmap_lock();
319
balrog50a95692007-12-12 01:16:23 +0000320 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800321 unsigned long startaddr, endaddr;
322 int n;
323
324 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
325
326 if (n == 2 && h2g_valid(startaddr)) {
327 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
328
329 if (h2g_valid(endaddr)) {
330 endaddr = h2g(endaddr);
331 } else {
332 endaddr = ~0ul;
333 }
334 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000335 }
336 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800337
balrog50a95692007-12-12 01:16:23 +0000338 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800339 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000340 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100341#endif
balrog50a95692007-12-12 01:16:23 +0000342 }
343#endif
bellard54936002003-05-13 00:25:15 +0000344}
345
Paul Brook41c1b1c2010-03-12 16:54:58 +0000346static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000347{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000348 PageDesc *pd;
349 void **lp;
350 int i;
351
pbrook17e23772008-06-09 13:47:45 +0000352#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500353 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800354# define ALLOC(P, SIZE) \
355 do { \
356 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
357 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800358 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000359#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800360# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500361 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000362#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800363
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800364 /* Level 1. Always allocated. */
365 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
366
367 /* Level 2..N-1. */
368 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
369 void **p = *lp;
370
371 if (p == NULL) {
372 if (!alloc) {
373 return NULL;
374 }
375 ALLOC(p, sizeof(void *) * L2_SIZE);
376 *lp = p;
377 }
378
379 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000380 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800381
382 pd = *lp;
383 if (pd == NULL) {
384 if (!alloc) {
385 return NULL;
386 }
387 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
388 *lp = pd;
389 }
390
391#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800392
393 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000394}
395
Paul Brook41c1b1c2010-03-12 16:54:58 +0000396static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000397{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800398 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000399}
400
Paul Brook6d9a1302010-02-28 23:55:53 +0000401#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500402static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000403{
Avi Kivity4346ae32012-02-10 17:00:01 +0200404 PhysPageEntry *lp, *p;
405 int i, j;
bellard92e873b2004-05-21 14:52:29 +0000406
Avi Kivity3eef53d2012-02-10 14:57:31 +0200407 lp = &phys_map;
bellard108c49b2005-07-24 12:55:09 +0000408
Avi Kivity4346ae32012-02-10 17:00:01 +0200409 /* Level 1..N. */
410 for (i = P_L2_LEVELS - 1; i >= 0; i--) {
411 if (lp->u.node == NULL) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800412 if (!alloc) {
413 return NULL;
414 }
Avi Kivity4346ae32012-02-10 17:00:01 +0200415 lp->u.node = p = g_malloc0(sizeof(PhysPageEntry) * L2_SIZE);
416 if (i == 0) {
417 int first_index = index & ~(L2_SIZE - 1);
418 for (j = 0; j < L2_SIZE; j++) {
419 p[j].u.leaf.phys_offset = io_mem_unassigned.ram_addr;
420 p[j].u.leaf.region_offset
421 = (first_index + j) << TARGET_PAGE_BITS;
422 }
423 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800424 }
Avi Kivity4346ae32012-02-10 17:00:01 +0200425 lp = &lp->u.node[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
bellard108c49b2005-07-24 12:55:09 +0000426 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800427
Avi Kivity4346ae32012-02-10 17:00:01 +0200428 return &lp->u.leaf;
bellard92e873b2004-05-21 14:52:29 +0000429}
430
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200431static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000432{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200433 PhysPageDesc *p = phys_page_find_alloc(index, 0);
434
435 if (p) {
436 return *p;
437 } else {
438 return (PhysPageDesc) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200439 .phys_offset = io_mem_unassigned.ram_addr,
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200440 .region_offset = index << TARGET_PAGE_BITS,
441 };
442 }
bellard92e873b2004-05-21 14:52:29 +0000443}
444
Anthony Liguoric227f092009-10-01 16:12:16 -0500445static void tlb_protect_code(ram_addr_t ram_addr);
446static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000447 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000448#define mmap_lock() do { } while(0)
449#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000450#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000451
bellard43694152008-05-29 09:35:57 +0000452#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
453
454#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100455/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000456 user mode. It will change when a dedicated libc will be used */
457#define USE_STATIC_CODE_GEN_BUFFER
458#endif
459
460#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200461static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
462 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000463#endif
464
blueswir18fcd3692008-08-17 20:26:25 +0000465static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000466{
bellard43694152008-05-29 09:35:57 +0000467#ifdef USE_STATIC_CODE_GEN_BUFFER
468 code_gen_buffer = static_code_gen_buffer;
469 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
470 map_exec(code_gen_buffer, code_gen_buffer_size);
471#else
bellard26a5f132008-05-28 12:30:31 +0000472 code_gen_buffer_size = tb_size;
473 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000474#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000475 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
476#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100477 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000478 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000479#endif
bellard26a5f132008-05-28 12:30:31 +0000480 }
481 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
482 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
483 /* The code gen buffer location may have constraints depending on
484 the host cpu and OS */
485#if defined(__linux__)
486 {
487 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000488 void *start = NULL;
489
bellard26a5f132008-05-28 12:30:31 +0000490 flags = MAP_PRIVATE | MAP_ANONYMOUS;
491#if defined(__x86_64__)
492 flags |= MAP_32BIT;
493 /* Cannot map more than that */
494 if (code_gen_buffer_size > (800 * 1024 * 1024))
495 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000496#elif defined(__sparc_v9__)
497 // Map the buffer below 2G, so we can use direct calls and branches
498 flags |= MAP_FIXED;
499 start = (void *) 0x60000000UL;
500 if (code_gen_buffer_size > (512 * 1024 * 1024))
501 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000502#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100503 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000504 if (code_gen_buffer_size > 16 * 1024 * 1024)
505 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700506#elif defined(__s390x__)
507 /* Map the buffer so that we can use direct calls and branches. */
508 /* We have a +- 4GB range on the branches; leave some slop. */
509 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
510 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
511 }
512 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000513#endif
blueswir1141ac462008-07-26 15:05:57 +0000514 code_gen_buffer = mmap(start, code_gen_buffer_size,
515 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000516 flags, -1, 0);
517 if (code_gen_buffer == MAP_FAILED) {
518 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
519 exit(1);
520 }
521 }
Bradcbb608a2010-12-20 21:25:40 -0500522#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000523 || defined(__DragonFly__) || defined(__OpenBSD__) \
524 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000525 {
526 int flags;
527 void *addr = NULL;
528 flags = MAP_PRIVATE | MAP_ANONYMOUS;
529#if defined(__x86_64__)
530 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
531 * 0x40000000 is free */
532 flags |= MAP_FIXED;
533 addr = (void *)0x40000000;
534 /* Cannot map more than that */
535 if (code_gen_buffer_size > (800 * 1024 * 1024))
536 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000537#elif defined(__sparc_v9__)
538 // Map the buffer below 2G, so we can use direct calls and branches
539 flags |= MAP_FIXED;
540 addr = (void *) 0x60000000UL;
541 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
542 code_gen_buffer_size = (512 * 1024 * 1024);
543 }
aliguori06e67a82008-09-27 15:32:41 +0000544#endif
545 code_gen_buffer = mmap(addr, code_gen_buffer_size,
546 PROT_WRITE | PROT_READ | PROT_EXEC,
547 flags, -1, 0);
548 if (code_gen_buffer == MAP_FAILED) {
549 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
550 exit(1);
551 }
552 }
bellard26a5f132008-05-28 12:30:31 +0000553#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500554 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000555 map_exec(code_gen_buffer, code_gen_buffer_size);
556#endif
bellard43694152008-05-29 09:35:57 +0000557#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000558 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100559 code_gen_buffer_max_size = code_gen_buffer_size -
560 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000561 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500562 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000563}
564
565/* Must be called before using the QEMU cpus. 'tb_size' is the size
566 (in bytes) allocated to the translation buffer. Zero means default
567 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200568void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000569{
bellard26a5f132008-05-28 12:30:31 +0000570 cpu_gen_init();
571 code_gen_alloc(tb_size);
572 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000573 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700574#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
575 /* There's no guest base to take into account, so go ahead and
576 initialize the prologue now. */
577 tcg_prologue_init(&tcg_ctx);
578#endif
bellard26a5f132008-05-28 12:30:31 +0000579}
580
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200581bool tcg_enabled(void)
582{
583 return code_gen_buffer != NULL;
584}
585
586void cpu_exec_init_all(void)
587{
588#if !defined(CONFIG_USER_ONLY)
589 memory_map_init();
590 io_mem_init();
591#endif
592}
593
pbrook9656f322008-07-01 20:01:19 +0000594#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
595
Juan Quintelae59fb372009-09-29 22:48:21 +0200596static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200597{
598 CPUState *env = opaque;
599
aurel323098dba2009-03-07 21:28:24 +0000600 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
601 version_id is increased. */
602 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000603 tlb_flush(env, 1);
604
605 return 0;
606}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200607
608static const VMStateDescription vmstate_cpu_common = {
609 .name = "cpu_common",
610 .version_id = 1,
611 .minimum_version_id = 1,
612 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200613 .post_load = cpu_common_post_load,
614 .fields = (VMStateField []) {
615 VMSTATE_UINT32(halted, CPUState),
616 VMSTATE_UINT32(interrupt_request, CPUState),
617 VMSTATE_END_OF_LIST()
618 }
619};
pbrook9656f322008-07-01 20:01:19 +0000620#endif
621
Glauber Costa950f1472009-06-09 12:15:18 -0400622CPUState *qemu_get_cpu(int cpu)
623{
624 CPUState *env = first_cpu;
625
626 while (env) {
627 if (env->cpu_index == cpu)
628 break;
629 env = env->next_cpu;
630 }
631
632 return env;
633}
634
bellard6a00d602005-11-21 23:25:50 +0000635void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000636{
bellard6a00d602005-11-21 23:25:50 +0000637 CPUState **penv;
638 int cpu_index;
639
pbrookc2764712009-03-07 15:24:59 +0000640#if defined(CONFIG_USER_ONLY)
641 cpu_list_lock();
642#endif
bellard6a00d602005-11-21 23:25:50 +0000643 env->next_cpu = NULL;
644 penv = &first_cpu;
645 cpu_index = 0;
646 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700647 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000648 cpu_index++;
649 }
650 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000651 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000652 QTAILQ_INIT(&env->breakpoints);
653 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100654#ifndef CONFIG_USER_ONLY
655 env->thread_id = qemu_get_thread_id();
656#endif
bellard6a00d602005-11-21 23:25:50 +0000657 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000658#if defined(CONFIG_USER_ONLY)
659 cpu_list_unlock();
660#endif
pbrookb3c77242008-06-30 16:31:04 +0000661#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600662 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
663 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000664 cpu_save, cpu_load, env);
665#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000666}
667
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100668/* Allocate a new translation block. Flush the translation buffer if
669 too many translation blocks or too much generated code. */
670static TranslationBlock *tb_alloc(target_ulong pc)
671{
672 TranslationBlock *tb;
673
674 if (nb_tbs >= code_gen_max_blocks ||
675 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
676 return NULL;
677 tb = &tbs[nb_tbs++];
678 tb->pc = pc;
679 tb->cflags = 0;
680 return tb;
681}
682
683void tb_free(TranslationBlock *tb)
684{
685 /* In practice this is mostly used for single use temporary TB
686 Ignore the hard cases and just back up if this TB happens to
687 be the last one generated. */
688 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
689 code_gen_ptr = tb->tc_ptr;
690 nb_tbs--;
691 }
692}
693
bellard9fa3e852004-01-04 18:06:42 +0000694static inline void invalidate_page_bitmap(PageDesc *p)
695{
696 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500697 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000698 p->code_bitmap = NULL;
699 }
700 p->code_write_count = 0;
701}
702
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800703/* Set to NULL all the 'first_tb' fields in all PageDescs. */
704
705static void page_flush_tb_1 (int level, void **lp)
706{
707 int i;
708
709 if (*lp == NULL) {
710 return;
711 }
712 if (level == 0) {
713 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000714 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800715 pd[i].first_tb = NULL;
716 invalidate_page_bitmap(pd + i);
717 }
718 } else {
719 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000720 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800721 page_flush_tb_1 (level - 1, pp + i);
722 }
723 }
724}
725
bellardfd6ce8f2003-05-14 19:00:11 +0000726static void page_flush_tb(void)
727{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800728 int i;
729 for (i = 0; i < V_L1_SIZE; i++) {
730 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000731 }
732}
733
734/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000735/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000736void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000737{
bellard6a00d602005-11-21 23:25:50 +0000738 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000739#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000740 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
741 (unsigned long)(code_gen_ptr - code_gen_buffer),
742 nb_tbs, nb_tbs > 0 ?
743 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000744#endif
bellard26a5f132008-05-28 12:30:31 +0000745 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000746 cpu_abort(env1, "Internal error: code buffer overflow\n");
747
bellardfd6ce8f2003-05-14 19:00:11 +0000748 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000749
bellard6a00d602005-11-21 23:25:50 +0000750 for(env = first_cpu; env != NULL; env = env->next_cpu) {
751 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
752 }
bellard9fa3e852004-01-04 18:06:42 +0000753
bellard8a8a6082004-10-03 13:36:49 +0000754 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000755 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000756
bellardfd6ce8f2003-05-14 19:00:11 +0000757 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000758 /* XXX: flush processor icache at this point if cache flush is
759 expensive */
bellarde3db7222005-01-26 22:00:47 +0000760 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000761}
762
763#ifdef DEBUG_TB_CHECK
764
j_mayerbc98a7e2007-04-04 07:55:12 +0000765static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000766{
767 TranslationBlock *tb;
768 int i;
769 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000770 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
771 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000772 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
773 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000774 printf("ERROR invalidate: address=" TARGET_FMT_lx
775 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000776 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000777 }
778 }
779 }
780}
781
782/* verify that all the pages have correct rights for code */
783static void tb_page_check(void)
784{
785 TranslationBlock *tb;
786 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000787
pbrook99773bd2006-04-16 15:14:59 +0000788 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
789 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000790 flags1 = page_get_flags(tb->pc);
791 flags2 = page_get_flags(tb->pc + tb->size - 1);
792 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
793 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000794 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000795 }
796 }
797 }
798}
799
800#endif
801
802/* invalidate one TB */
803static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
804 int next_offset)
805{
806 TranslationBlock *tb1;
807 for(;;) {
808 tb1 = *ptb;
809 if (tb1 == tb) {
810 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
811 break;
812 }
813 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
814 }
815}
816
bellard9fa3e852004-01-04 18:06:42 +0000817static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
818{
819 TranslationBlock *tb1;
820 unsigned int n1;
821
822 for(;;) {
823 tb1 = *ptb;
824 n1 = (long)tb1 & 3;
825 tb1 = (TranslationBlock *)((long)tb1 & ~3);
826 if (tb1 == tb) {
827 *ptb = tb1->page_next[n1];
828 break;
829 }
830 ptb = &tb1->page_next[n1];
831 }
832}
833
bellardd4e81642003-05-25 16:46:15 +0000834static inline void tb_jmp_remove(TranslationBlock *tb, int n)
835{
836 TranslationBlock *tb1, **ptb;
837 unsigned int n1;
838
839 ptb = &tb->jmp_next[n];
840 tb1 = *ptb;
841 if (tb1) {
842 /* find tb(n) in circular list */
843 for(;;) {
844 tb1 = *ptb;
845 n1 = (long)tb1 & 3;
846 tb1 = (TranslationBlock *)((long)tb1 & ~3);
847 if (n1 == n && tb1 == tb)
848 break;
849 if (n1 == 2) {
850 ptb = &tb1->jmp_first;
851 } else {
852 ptb = &tb1->jmp_next[n1];
853 }
854 }
855 /* now we can suppress tb(n) from the list */
856 *ptb = tb->jmp_next[n];
857
858 tb->jmp_next[n] = NULL;
859 }
860}
861
862/* reset the jump entry 'n' of a TB so that it is not chained to
863 another TB */
864static inline void tb_reset_jump(TranslationBlock *tb, int n)
865{
866 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
867}
868
Paul Brook41c1b1c2010-03-12 16:54:58 +0000869void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000870{
bellard6a00d602005-11-21 23:25:50 +0000871 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000872 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000873 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000874 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000875 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000876
bellard9fa3e852004-01-04 18:06:42 +0000877 /* remove the TB from the hash list */
878 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
879 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000880 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000881 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000882
bellard9fa3e852004-01-04 18:06:42 +0000883 /* remove the TB from the page list */
884 if (tb->page_addr[0] != page_addr) {
885 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
886 tb_page_remove(&p->first_tb, tb);
887 invalidate_page_bitmap(p);
888 }
889 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
890 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
891 tb_page_remove(&p->first_tb, tb);
892 invalidate_page_bitmap(p);
893 }
894
bellard8a40a182005-11-20 10:35:40 +0000895 tb_invalidated_flag = 1;
896
897 /* remove the TB from the hash list */
898 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000899 for(env = first_cpu; env != NULL; env = env->next_cpu) {
900 if (env->tb_jmp_cache[h] == tb)
901 env->tb_jmp_cache[h] = NULL;
902 }
bellard8a40a182005-11-20 10:35:40 +0000903
904 /* suppress this TB from the two jump lists */
905 tb_jmp_remove(tb, 0);
906 tb_jmp_remove(tb, 1);
907
908 /* suppress any remaining jumps to this TB */
909 tb1 = tb->jmp_first;
910 for(;;) {
911 n1 = (long)tb1 & 3;
912 if (n1 == 2)
913 break;
914 tb1 = (TranslationBlock *)((long)tb1 & ~3);
915 tb2 = tb1->jmp_next[n1];
916 tb_reset_jump(tb1, n1);
917 tb1->jmp_next[n1] = NULL;
918 tb1 = tb2;
919 }
920 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
921
bellarde3db7222005-01-26 22:00:47 +0000922 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000923}
924
925static inline void set_bits(uint8_t *tab, int start, int len)
926{
927 int end, mask, end1;
928
929 end = start + len;
930 tab += start >> 3;
931 mask = 0xff << (start & 7);
932 if ((start & ~7) == (end & ~7)) {
933 if (start < end) {
934 mask &= ~(0xff << (end & 7));
935 *tab |= mask;
936 }
937 } else {
938 *tab++ |= mask;
939 start = (start + 8) & ~7;
940 end1 = end & ~7;
941 while (start < end1) {
942 *tab++ = 0xff;
943 start += 8;
944 }
945 if (start < end) {
946 mask = ~(0xff << (end & 7));
947 *tab |= mask;
948 }
949 }
950}
951
952static void build_page_bitmap(PageDesc *p)
953{
954 int n, tb_start, tb_end;
955 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000956
Anthony Liguori7267c092011-08-20 22:09:37 -0500957 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000958
959 tb = p->first_tb;
960 while (tb != NULL) {
961 n = (long)tb & 3;
962 tb = (TranslationBlock *)((long)tb & ~3);
963 /* NOTE: this is subtle as a TB may span two physical pages */
964 if (n == 0) {
965 /* NOTE: tb_end may be after the end of the page, but
966 it is not a problem */
967 tb_start = tb->pc & ~TARGET_PAGE_MASK;
968 tb_end = tb_start + tb->size;
969 if (tb_end > TARGET_PAGE_SIZE)
970 tb_end = TARGET_PAGE_SIZE;
971 } else {
972 tb_start = 0;
973 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
974 }
975 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
976 tb = tb->page_next[n];
977 }
978}
979
pbrook2e70f6e2008-06-29 01:03:05 +0000980TranslationBlock *tb_gen_code(CPUState *env,
981 target_ulong pc, target_ulong cs_base,
982 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000983{
984 TranslationBlock *tb;
985 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000986 tb_page_addr_t phys_pc, phys_page2;
987 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000988 int code_gen_size;
989
Paul Brook41c1b1c2010-03-12 16:54:58 +0000990 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000991 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000992 if (!tb) {
993 /* flush must be done */
994 tb_flush(env);
995 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000996 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000997 /* Don't forget to invalidate previous TB info. */
998 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000999 }
1000 tc_ptr = code_gen_ptr;
1001 tb->tc_ptr = tc_ptr;
1002 tb->cs_base = cs_base;
1003 tb->flags = flags;
1004 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001005 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001006 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001007
bellardd720b932004-04-25 17:57:43 +00001008 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001009 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001010 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001011 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001012 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001013 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001014 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001015 return tb;
bellardd720b932004-04-25 17:57:43 +00001016}
ths3b46e622007-09-17 08:09:54 +00001017
bellard9fa3e852004-01-04 18:06:42 +00001018/* invalidate all TBs which intersect with the target physical page
1019 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001020 the same physical page. 'is_cpu_write_access' should be true if called
1021 from a real cpu write access: the virtual CPU will exit the current
1022 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001023void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001024 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001025{
aliguori6b917542008-11-18 19:46:41 +00001026 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001027 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001028 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001029 PageDesc *p;
1030 int n;
1031#ifdef TARGET_HAS_PRECISE_SMC
1032 int current_tb_not_found = is_cpu_write_access;
1033 TranslationBlock *current_tb = NULL;
1034 int current_tb_modified = 0;
1035 target_ulong current_pc = 0;
1036 target_ulong current_cs_base = 0;
1037 int current_flags = 0;
1038#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001039
1040 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001041 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001042 return;
ths5fafdf22007-09-16 21:08:06 +00001043 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001044 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1045 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001046 /* build code bitmap */
1047 build_page_bitmap(p);
1048 }
1049
1050 /* we remove all the TBs in the range [start, end[ */
1051 /* XXX: see if in some cases it could be faster to invalidate all the code */
1052 tb = p->first_tb;
1053 while (tb != NULL) {
1054 n = (long)tb & 3;
1055 tb = (TranslationBlock *)((long)tb & ~3);
1056 tb_next = tb->page_next[n];
1057 /* NOTE: this is subtle as a TB may span two physical pages */
1058 if (n == 0) {
1059 /* NOTE: tb_end may be after the end of the page, but
1060 it is not a problem */
1061 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1062 tb_end = tb_start + tb->size;
1063 } else {
1064 tb_start = tb->page_addr[1];
1065 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1066 }
1067 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001068#ifdef TARGET_HAS_PRECISE_SMC
1069 if (current_tb_not_found) {
1070 current_tb_not_found = 0;
1071 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001072 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001073 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001074 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001075 }
1076 }
1077 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001078 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001079 /* If we are modifying the current TB, we must stop
1080 its execution. We could be more precise by checking
1081 that the modification is after the current PC, but it
1082 would require a specialized function to partially
1083 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001084
bellardd720b932004-04-25 17:57:43 +00001085 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001086 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001087 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1088 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001089 }
1090#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001091 /* we need to do that to handle the case where a signal
1092 occurs while doing tb_phys_invalidate() */
1093 saved_tb = NULL;
1094 if (env) {
1095 saved_tb = env->current_tb;
1096 env->current_tb = NULL;
1097 }
bellard9fa3e852004-01-04 18:06:42 +00001098 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001099 if (env) {
1100 env->current_tb = saved_tb;
1101 if (env->interrupt_request && env->current_tb)
1102 cpu_interrupt(env, env->interrupt_request);
1103 }
bellard9fa3e852004-01-04 18:06:42 +00001104 }
1105 tb = tb_next;
1106 }
1107#if !defined(CONFIG_USER_ONLY)
1108 /* if no code remaining, no need to continue to use slow writes */
1109 if (!p->first_tb) {
1110 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001111 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001112 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001113 }
1114 }
1115#endif
1116#ifdef TARGET_HAS_PRECISE_SMC
1117 if (current_tb_modified) {
1118 /* we generate a block containing just the instruction
1119 modifying the memory. It will ensure that it cannot modify
1120 itself */
bellardea1c1802004-06-14 18:56:36 +00001121 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001122 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001123 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001124 }
1125#endif
1126}
1127
1128/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001129static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001130{
1131 PageDesc *p;
1132 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001133#if 0
bellarda4193c82004-06-03 14:01:43 +00001134 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001135 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1136 cpu_single_env->mem_io_vaddr, len,
1137 cpu_single_env->eip,
1138 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001139 }
1140#endif
bellard9fa3e852004-01-04 18:06:42 +00001141 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001142 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001143 return;
1144 if (p->code_bitmap) {
1145 offset = start & ~TARGET_PAGE_MASK;
1146 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1147 if (b & ((1 << len) - 1))
1148 goto do_invalidate;
1149 } else {
1150 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001151 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001152 }
1153}
1154
bellard9fa3e852004-01-04 18:06:42 +00001155#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001156static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001157 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001158{
aliguori6b917542008-11-18 19:46:41 +00001159 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001160 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001161 int n;
bellardd720b932004-04-25 17:57:43 +00001162#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001163 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001164 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001165 int current_tb_modified = 0;
1166 target_ulong current_pc = 0;
1167 target_ulong current_cs_base = 0;
1168 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001169#endif
bellard9fa3e852004-01-04 18:06:42 +00001170
1171 addr &= TARGET_PAGE_MASK;
1172 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001173 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001174 return;
1175 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001176#ifdef TARGET_HAS_PRECISE_SMC
1177 if (tb && pc != 0) {
1178 current_tb = tb_find_pc(pc);
1179 }
1180#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001181 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001182 n = (long)tb & 3;
1183 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001184#ifdef TARGET_HAS_PRECISE_SMC
1185 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001186 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001187 /* If we are modifying the current TB, we must stop
1188 its execution. We could be more precise by checking
1189 that the modification is after the current PC, but it
1190 would require a specialized function to partially
1191 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001192
bellardd720b932004-04-25 17:57:43 +00001193 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001194 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001195 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1196 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001197 }
1198#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001199 tb_phys_invalidate(tb, addr);
1200 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001201 }
1202 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001203#ifdef TARGET_HAS_PRECISE_SMC
1204 if (current_tb_modified) {
1205 /* we generate a block containing just the instruction
1206 modifying the memory. It will ensure that it cannot modify
1207 itself */
bellardea1c1802004-06-14 18:56:36 +00001208 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001209 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001210 cpu_resume_from_signal(env, puc);
1211 }
1212#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001213}
bellard9fa3e852004-01-04 18:06:42 +00001214#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001215
1216/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001217static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001218 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001219{
1220 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001221#ifndef CONFIG_USER_ONLY
1222 bool page_already_protected;
1223#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001224
bellard9fa3e852004-01-04 18:06:42 +00001225 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001226 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001227 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001228#ifndef CONFIG_USER_ONLY
1229 page_already_protected = p->first_tb != NULL;
1230#endif
bellard9fa3e852004-01-04 18:06:42 +00001231 p->first_tb = (TranslationBlock *)((long)tb | n);
1232 invalidate_page_bitmap(p);
1233
bellard107db442004-06-22 18:48:46 +00001234#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001235
bellard9fa3e852004-01-04 18:06:42 +00001236#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001237 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001238 target_ulong addr;
1239 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001240 int prot;
1241
bellardfd6ce8f2003-05-14 19:00:11 +00001242 /* force the host page as non writable (writes will have a
1243 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001244 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001245 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001246 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1247 addr += TARGET_PAGE_SIZE) {
1248
1249 p2 = page_find (addr >> TARGET_PAGE_BITS);
1250 if (!p2)
1251 continue;
1252 prot |= p2->flags;
1253 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001254 }
ths5fafdf22007-09-16 21:08:06 +00001255 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001256 (prot & PAGE_BITS) & ~PAGE_WRITE);
1257#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001258 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001259 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001260#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001261 }
bellard9fa3e852004-01-04 18:06:42 +00001262#else
1263 /* if some code is already present, then the pages are already
1264 protected. So we handle the case where only the first TB is
1265 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001266 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001267 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001268 }
1269#endif
bellardd720b932004-04-25 17:57:43 +00001270
1271#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001272}
1273
bellard9fa3e852004-01-04 18:06:42 +00001274/* add a new TB and link it to the physical page tables. phys_page2 is
1275 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001276void tb_link_page(TranslationBlock *tb,
1277 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001278{
bellard9fa3e852004-01-04 18:06:42 +00001279 unsigned int h;
1280 TranslationBlock **ptb;
1281
pbrookc8a706f2008-06-02 16:16:42 +00001282 /* Grab the mmap lock to stop another thread invalidating this TB
1283 before we are done. */
1284 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001285 /* add in the physical hash table */
1286 h = tb_phys_hash_func(phys_pc);
1287 ptb = &tb_phys_hash[h];
1288 tb->phys_hash_next = *ptb;
1289 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001290
1291 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001292 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1293 if (phys_page2 != -1)
1294 tb_alloc_page(tb, 1, phys_page2);
1295 else
1296 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001297
bellardd4e81642003-05-25 16:46:15 +00001298 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1299 tb->jmp_next[0] = NULL;
1300 tb->jmp_next[1] = NULL;
1301
1302 /* init original jump addresses */
1303 if (tb->tb_next_offset[0] != 0xffff)
1304 tb_reset_jump(tb, 0);
1305 if (tb->tb_next_offset[1] != 0xffff)
1306 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001307
1308#ifdef DEBUG_TB_CHECK
1309 tb_page_check();
1310#endif
pbrookc8a706f2008-06-02 16:16:42 +00001311 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001312}
1313
bellarda513fe12003-05-27 23:29:48 +00001314/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1315 tb[1].tc_ptr. Return NULL if not found */
1316TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1317{
1318 int m_min, m_max, m;
1319 unsigned long v;
1320 TranslationBlock *tb;
1321
1322 if (nb_tbs <= 0)
1323 return NULL;
1324 if (tc_ptr < (unsigned long)code_gen_buffer ||
1325 tc_ptr >= (unsigned long)code_gen_ptr)
1326 return NULL;
1327 /* binary search (cf Knuth) */
1328 m_min = 0;
1329 m_max = nb_tbs - 1;
1330 while (m_min <= m_max) {
1331 m = (m_min + m_max) >> 1;
1332 tb = &tbs[m];
1333 v = (unsigned long)tb->tc_ptr;
1334 if (v == tc_ptr)
1335 return tb;
1336 else if (tc_ptr < v) {
1337 m_max = m - 1;
1338 } else {
1339 m_min = m + 1;
1340 }
ths5fafdf22007-09-16 21:08:06 +00001341 }
bellarda513fe12003-05-27 23:29:48 +00001342 return &tbs[m_max];
1343}
bellard75012672003-06-21 13:11:07 +00001344
bellardea041c02003-06-25 16:16:50 +00001345static void tb_reset_jump_recursive(TranslationBlock *tb);
1346
1347static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1348{
1349 TranslationBlock *tb1, *tb_next, **ptb;
1350 unsigned int n1;
1351
1352 tb1 = tb->jmp_next[n];
1353 if (tb1 != NULL) {
1354 /* find head of list */
1355 for(;;) {
1356 n1 = (long)tb1 & 3;
1357 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1358 if (n1 == 2)
1359 break;
1360 tb1 = tb1->jmp_next[n1];
1361 }
1362 /* we are now sure now that tb jumps to tb1 */
1363 tb_next = tb1;
1364
1365 /* remove tb from the jmp_first list */
1366 ptb = &tb_next->jmp_first;
1367 for(;;) {
1368 tb1 = *ptb;
1369 n1 = (long)tb1 & 3;
1370 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1371 if (n1 == n && tb1 == tb)
1372 break;
1373 ptb = &tb1->jmp_next[n1];
1374 }
1375 *ptb = tb->jmp_next[n];
1376 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001377
bellardea041c02003-06-25 16:16:50 +00001378 /* suppress the jump to next tb in generated code */
1379 tb_reset_jump(tb, n);
1380
bellard01243112004-01-04 15:48:17 +00001381 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001382 tb_reset_jump_recursive(tb_next);
1383 }
1384}
1385
1386static void tb_reset_jump_recursive(TranslationBlock *tb)
1387{
1388 tb_reset_jump_recursive2(tb, 0);
1389 tb_reset_jump_recursive2(tb, 1);
1390}
1391
bellard1fddef42005-04-17 19:16:13 +00001392#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001393#if defined(CONFIG_USER_ONLY)
1394static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1395{
1396 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1397}
1398#else
bellardd720b932004-04-25 17:57:43 +00001399static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1400{
Anthony Liguoric227f092009-10-01 16:12:16 -05001401 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001402 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001403 ram_addr_t ram_addr;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001404 PhysPageDesc p;
bellardd720b932004-04-25 17:57:43 +00001405
pbrookc2f07f82006-04-08 17:14:56 +00001406 addr = cpu_get_phys_page_debug(env, pc);
1407 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001408 pd = p.phys_offset;
pbrookc2f07f82006-04-08 17:14:56 +00001409 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001410 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001411}
bellardc27004e2005-01-03 23:35:10 +00001412#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001413#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001414
Paul Brookc527ee82010-03-01 03:31:14 +00001415#if defined(CONFIG_USER_ONLY)
1416void cpu_watchpoint_remove_all(CPUState *env, int mask)
1417
1418{
1419}
1420
1421int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1422 int flags, CPUWatchpoint **watchpoint)
1423{
1424 return -ENOSYS;
1425}
1426#else
pbrook6658ffb2007-03-16 23:58:11 +00001427/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001428int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1429 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001430{
aliguorib4051332008-11-18 20:14:20 +00001431 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001432 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001433
aliguorib4051332008-11-18 20:14:20 +00001434 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1435 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1436 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1437 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1438 return -EINVAL;
1439 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001440 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001441
aliguoria1d1bb32008-11-18 20:07:32 +00001442 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001443 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001444 wp->flags = flags;
1445
aliguori2dc9f412008-11-18 20:56:59 +00001446 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001447 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001448 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001449 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001450 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001451
pbrook6658ffb2007-03-16 23:58:11 +00001452 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001453
1454 if (watchpoint)
1455 *watchpoint = wp;
1456 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001457}
1458
aliguoria1d1bb32008-11-18 20:07:32 +00001459/* Remove a specific watchpoint. */
1460int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1461 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001462{
aliguorib4051332008-11-18 20:14:20 +00001463 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001464 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001465
Blue Swirl72cf2d42009-09-12 07:36:22 +00001466 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001467 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001468 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001469 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001470 return 0;
1471 }
1472 }
aliguoria1d1bb32008-11-18 20:07:32 +00001473 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001474}
1475
aliguoria1d1bb32008-11-18 20:07:32 +00001476/* Remove a specific watchpoint by reference. */
1477void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1478{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001479 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001480
aliguoria1d1bb32008-11-18 20:07:32 +00001481 tlb_flush_page(env, watchpoint->vaddr);
1482
Anthony Liguori7267c092011-08-20 22:09:37 -05001483 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001484}
1485
aliguoria1d1bb32008-11-18 20:07:32 +00001486/* Remove all matching watchpoints. */
1487void cpu_watchpoint_remove_all(CPUState *env, int mask)
1488{
aliguoric0ce9982008-11-25 22:13:57 +00001489 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001490
Blue Swirl72cf2d42009-09-12 07:36:22 +00001491 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001492 if (wp->flags & mask)
1493 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001494 }
aliguoria1d1bb32008-11-18 20:07:32 +00001495}
Paul Brookc527ee82010-03-01 03:31:14 +00001496#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001497
1498/* Add a breakpoint. */
1499int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1500 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001501{
bellard1fddef42005-04-17 19:16:13 +00001502#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001503 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001504
Anthony Liguori7267c092011-08-20 22:09:37 -05001505 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001506
1507 bp->pc = pc;
1508 bp->flags = flags;
1509
aliguori2dc9f412008-11-18 20:56:59 +00001510 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001511 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001512 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001513 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001514 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001515
1516 breakpoint_invalidate(env, pc);
1517
1518 if (breakpoint)
1519 *breakpoint = bp;
1520 return 0;
1521#else
1522 return -ENOSYS;
1523#endif
1524}
1525
1526/* Remove a specific breakpoint. */
1527int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1528{
1529#if defined(TARGET_HAS_ICE)
1530 CPUBreakpoint *bp;
1531
Blue Swirl72cf2d42009-09-12 07:36:22 +00001532 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001533 if (bp->pc == pc && bp->flags == flags) {
1534 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001535 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001536 }
bellard4c3a88a2003-07-26 12:06:08 +00001537 }
aliguoria1d1bb32008-11-18 20:07:32 +00001538 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001539#else
aliguoria1d1bb32008-11-18 20:07:32 +00001540 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001541#endif
1542}
1543
aliguoria1d1bb32008-11-18 20:07:32 +00001544/* Remove a specific breakpoint by reference. */
1545void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001546{
bellard1fddef42005-04-17 19:16:13 +00001547#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001548 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001549
aliguoria1d1bb32008-11-18 20:07:32 +00001550 breakpoint_invalidate(env, breakpoint->pc);
1551
Anthony Liguori7267c092011-08-20 22:09:37 -05001552 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001553#endif
1554}
1555
1556/* Remove all matching breakpoints. */
1557void cpu_breakpoint_remove_all(CPUState *env, int mask)
1558{
1559#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001560 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001561
Blue Swirl72cf2d42009-09-12 07:36:22 +00001562 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001563 if (bp->flags & mask)
1564 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001565 }
bellard4c3a88a2003-07-26 12:06:08 +00001566#endif
1567}
1568
bellardc33a3462003-07-29 20:50:33 +00001569/* enable or disable single step mode. EXCP_DEBUG is returned by the
1570 CPU loop after each instruction */
1571void cpu_single_step(CPUState *env, int enabled)
1572{
bellard1fddef42005-04-17 19:16:13 +00001573#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001574 if (env->singlestep_enabled != enabled) {
1575 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001576 if (kvm_enabled())
1577 kvm_update_guest_debug(env, 0);
1578 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001579 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001580 /* XXX: only flush what is necessary */
1581 tb_flush(env);
1582 }
bellardc33a3462003-07-29 20:50:33 +00001583 }
1584#endif
1585}
1586
bellard34865132003-10-05 14:28:56 +00001587/* enable or disable low levels log */
1588void cpu_set_log(int log_flags)
1589{
1590 loglevel = log_flags;
1591 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001592 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001593 if (!logfile) {
1594 perror(logfilename);
1595 _exit(1);
1596 }
bellard9fa3e852004-01-04 18:06:42 +00001597#if !defined(CONFIG_SOFTMMU)
1598 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1599 {
blueswir1b55266b2008-09-20 08:07:15 +00001600 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001601 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1602 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001603#elif defined(_WIN32)
1604 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1605 setvbuf(logfile, NULL, _IONBF, 0);
1606#else
bellard34865132003-10-05 14:28:56 +00001607 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001608#endif
pbrooke735b912007-06-30 13:53:24 +00001609 log_append = 1;
1610 }
1611 if (!loglevel && logfile) {
1612 fclose(logfile);
1613 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001614 }
1615}
1616
1617void cpu_set_log_filename(const char *filename)
1618{
1619 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001620 if (logfile) {
1621 fclose(logfile);
1622 logfile = NULL;
1623 }
1624 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001625}
bellardc33a3462003-07-29 20:50:33 +00001626
aurel323098dba2009-03-07 21:28:24 +00001627static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001628{
pbrookd5975362008-06-07 20:50:51 +00001629 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1630 problem and hope the cpu will stop of its own accord. For userspace
1631 emulation this often isn't actually as bad as it sounds. Often
1632 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001633 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001634 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001635
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001636 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001637 tb = env->current_tb;
1638 /* if the cpu is currently executing code, we must unlink it and
1639 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001640 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001641 env->current_tb = NULL;
1642 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001643 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001644 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001645}
1646
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001647#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001648/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001649static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001650{
1651 int old_mask;
1652
1653 old_mask = env->interrupt_request;
1654 env->interrupt_request |= mask;
1655
aliguori8edac962009-04-24 18:03:45 +00001656 /*
1657 * If called from iothread context, wake the target cpu in
1658 * case its halted.
1659 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001660 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001661 qemu_cpu_kick(env);
1662 return;
1663 }
aliguori8edac962009-04-24 18:03:45 +00001664
pbrook2e70f6e2008-06-29 01:03:05 +00001665 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001666 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001667 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001668 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001669 cpu_abort(env, "Raised interrupt while not in I/O function");
1670 }
pbrook2e70f6e2008-06-29 01:03:05 +00001671 } else {
aurel323098dba2009-03-07 21:28:24 +00001672 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001673 }
1674}
1675
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001676CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1677
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001678#else /* CONFIG_USER_ONLY */
1679
1680void cpu_interrupt(CPUState *env, int mask)
1681{
1682 env->interrupt_request |= mask;
1683 cpu_unlink_tb(env);
1684}
1685#endif /* CONFIG_USER_ONLY */
1686
bellardb54ad042004-05-20 13:42:52 +00001687void cpu_reset_interrupt(CPUState *env, int mask)
1688{
1689 env->interrupt_request &= ~mask;
1690}
1691
aurel323098dba2009-03-07 21:28:24 +00001692void cpu_exit(CPUState *env)
1693{
1694 env->exit_request = 1;
1695 cpu_unlink_tb(env);
1696}
1697
blueswir1c7cd6a32008-10-02 18:27:46 +00001698const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001699 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001700 "show generated host assembly code for each compiled TB" },
1701 { CPU_LOG_TB_IN_ASM, "in_asm",
1702 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001703 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001704 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001705 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001706 "show micro ops "
1707#ifdef TARGET_I386
1708 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001709#endif
blueswir1e01a1152008-03-14 17:37:11 +00001710 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001711 { CPU_LOG_INT, "int",
1712 "show interrupts/exceptions in short format" },
1713 { CPU_LOG_EXEC, "exec",
1714 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001715 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001716 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001717#ifdef TARGET_I386
1718 { CPU_LOG_PCALL, "pcall",
1719 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001720 { CPU_LOG_RESET, "cpu_reset",
1721 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001722#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001723#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001724 { CPU_LOG_IOPORT, "ioport",
1725 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001726#endif
bellardf193c792004-03-21 17:06:25 +00001727 { 0, NULL, NULL },
1728};
1729
1730static int cmp1(const char *s1, int n, const char *s2)
1731{
1732 if (strlen(s2) != n)
1733 return 0;
1734 return memcmp(s1, s2, n) == 0;
1735}
ths3b46e622007-09-17 08:09:54 +00001736
bellardf193c792004-03-21 17:06:25 +00001737/* takes a comma separated list of log masks. Return 0 if error. */
1738int cpu_str_to_log_mask(const char *str)
1739{
blueswir1c7cd6a32008-10-02 18:27:46 +00001740 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001741 int mask;
1742 const char *p, *p1;
1743
1744 p = str;
1745 mask = 0;
1746 for(;;) {
1747 p1 = strchr(p, ',');
1748 if (!p1)
1749 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001750 if(cmp1(p,p1-p,"all")) {
1751 for(item = cpu_log_items; item->mask != 0; item++) {
1752 mask |= item->mask;
1753 }
1754 } else {
1755 for(item = cpu_log_items; item->mask != 0; item++) {
1756 if (cmp1(p, p1 - p, item->name))
1757 goto found;
1758 }
1759 return 0;
bellardf193c792004-03-21 17:06:25 +00001760 }
bellardf193c792004-03-21 17:06:25 +00001761 found:
1762 mask |= item->mask;
1763 if (*p1 != ',')
1764 break;
1765 p = p1 + 1;
1766 }
1767 return mask;
1768}
bellardea041c02003-06-25 16:16:50 +00001769
bellard75012672003-06-21 13:11:07 +00001770void cpu_abort(CPUState *env, const char *fmt, ...)
1771{
1772 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001773 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001774
1775 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001776 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001777 fprintf(stderr, "qemu: fatal: ");
1778 vfprintf(stderr, fmt, ap);
1779 fprintf(stderr, "\n");
1780#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001781 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1782#else
1783 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001784#endif
aliguori93fcfe32009-01-15 22:34:14 +00001785 if (qemu_log_enabled()) {
1786 qemu_log("qemu: fatal: ");
1787 qemu_log_vprintf(fmt, ap2);
1788 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001789#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001790 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001791#else
aliguori93fcfe32009-01-15 22:34:14 +00001792 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001793#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001794 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001795 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001796 }
pbrook493ae1f2007-11-23 16:53:59 +00001797 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001798 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001799#if defined(CONFIG_USER_ONLY)
1800 {
1801 struct sigaction act;
1802 sigfillset(&act.sa_mask);
1803 act.sa_handler = SIG_DFL;
1804 sigaction(SIGABRT, &act, NULL);
1805 }
1806#endif
bellard75012672003-06-21 13:11:07 +00001807 abort();
1808}
1809
thsc5be9f02007-02-28 20:20:53 +00001810CPUState *cpu_copy(CPUState *env)
1811{
ths01ba9812007-12-09 02:22:57 +00001812 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001813 CPUState *next_cpu = new_env->next_cpu;
1814 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001815#if defined(TARGET_HAS_ICE)
1816 CPUBreakpoint *bp;
1817 CPUWatchpoint *wp;
1818#endif
1819
thsc5be9f02007-02-28 20:20:53 +00001820 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001821
1822 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001823 new_env->next_cpu = next_cpu;
1824 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001825
1826 /* Clone all break/watchpoints.
1827 Note: Once we support ptrace with hw-debug register access, make sure
1828 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001829 QTAILQ_INIT(&env->breakpoints);
1830 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001831#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001832 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001833 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1834 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001835 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001836 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1837 wp->flags, NULL);
1838 }
1839#endif
1840
thsc5be9f02007-02-28 20:20:53 +00001841 return new_env;
1842}
1843
bellard01243112004-01-04 15:48:17 +00001844#if !defined(CONFIG_USER_ONLY)
1845
edgar_igl5c751e92008-05-06 08:44:21 +00001846static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1847{
1848 unsigned int i;
1849
1850 /* Discard jump cache entries for any tb which might potentially
1851 overlap the flushed page. */
1852 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1853 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001854 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001855
1856 i = tb_jmp_cache_hash_page(addr);
1857 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001858 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001859}
1860
Igor Kovalenko08738982009-07-12 02:15:40 +04001861static CPUTLBEntry s_cputlb_empty_entry = {
1862 .addr_read = -1,
1863 .addr_write = -1,
1864 .addr_code = -1,
1865 .addend = -1,
1866};
1867
Peter Maydell771124e2012-01-17 13:23:13 +00001868/* NOTE:
1869 * If flush_global is true (the usual case), flush all tlb entries.
1870 * If flush_global is false, flush (at least) all tlb entries not
1871 * marked global.
1872 *
1873 * Since QEMU doesn't currently implement a global/not-global flag
1874 * for tlb entries, at the moment tlb_flush() will also flush all
1875 * tlb entries in the flush_global == false case. This is OK because
1876 * CPU architectures generally permit an implementation to drop
1877 * entries from the TLB at any time, so flushing more entries than
1878 * required is only an efficiency issue, not a correctness issue.
1879 */
bellardee8b7022004-02-03 23:35:10 +00001880void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001881{
bellard33417e72003-08-10 21:47:01 +00001882 int i;
bellard01243112004-01-04 15:48:17 +00001883
bellard9fa3e852004-01-04 18:06:42 +00001884#if defined(DEBUG_TLB)
1885 printf("tlb_flush:\n");
1886#endif
bellard01243112004-01-04 15:48:17 +00001887 /* must reset current TB so that interrupts cannot modify the
1888 links while we are modifying them */
1889 env->current_tb = NULL;
1890
bellard33417e72003-08-10 21:47:01 +00001891 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001892 int mmu_idx;
1893 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001894 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001895 }
bellard33417e72003-08-10 21:47:01 +00001896 }
bellard9fa3e852004-01-04 18:06:42 +00001897
bellard8a40a182005-11-20 10:35:40 +00001898 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001899
Paul Brookd4c430a2010-03-17 02:14:28 +00001900 env->tlb_flush_addr = -1;
1901 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001902 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001903}
1904
bellard274da6b2004-05-20 21:56:27 +00001905static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001906{
ths5fafdf22007-09-16 21:08:06 +00001907 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001908 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001909 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001910 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001911 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001912 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001913 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001914 }
bellard61382a52003-10-27 21:22:23 +00001915}
1916
bellard2e126692004-04-25 21:28:44 +00001917void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001918{
bellard8a40a182005-11-20 10:35:40 +00001919 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001920 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001921
bellard9fa3e852004-01-04 18:06:42 +00001922#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001923 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001924#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001925 /* Check if we need to flush due to large pages. */
1926 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1927#if defined(DEBUG_TLB)
1928 printf("tlb_flush_page: forced full flush ("
1929 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1930 env->tlb_flush_addr, env->tlb_flush_mask);
1931#endif
1932 tlb_flush(env, 1);
1933 return;
1934 }
bellard01243112004-01-04 15:48:17 +00001935 /* must reset current TB so that interrupts cannot modify the
1936 links while we are modifying them */
1937 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001938
bellard61382a52003-10-27 21:22:23 +00001939 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001940 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001941 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1942 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001943
edgar_igl5c751e92008-05-06 08:44:21 +00001944 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001945}
1946
bellard9fa3e852004-01-04 18:06:42 +00001947/* update the TLBs so that writes to code in the virtual page 'addr'
1948 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001949static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001950{
ths5fafdf22007-09-16 21:08:06 +00001951 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001952 ram_addr + TARGET_PAGE_SIZE,
1953 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001954}
1955
bellard9fa3e852004-01-04 18:06:42 +00001956/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001957 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001958static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001959 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001960{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001961 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00001962}
1963
ths5fafdf22007-09-16 21:08:06 +00001964static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001965 unsigned long start, unsigned long length)
1966{
1967 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001968 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00001969 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001970 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001971 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001972 }
1973 }
1974}
1975
pbrook5579c7f2009-04-11 14:47:08 +00001976/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001977void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001978 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001979{
1980 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001981 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001982 int i;
bellard1ccde1c2004-02-06 19:46:14 +00001983
1984 start &= TARGET_PAGE_MASK;
1985 end = TARGET_PAGE_ALIGN(end);
1986
1987 length = end - start;
1988 if (length == 0)
1989 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001990 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001991
bellard1ccde1c2004-02-06 19:46:14 +00001992 /* we modify the TLB cache so that the dirty bit will be set again
1993 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001994 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001995 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001996 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001997 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001998 != (end - 1) - start) {
1999 abort();
2000 }
2001
bellard6a00d602005-11-21 23:25:50 +00002002 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002003 int mmu_idx;
2004 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2005 for(i = 0; i < CPU_TLB_SIZE; i++)
2006 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2007 start1, length);
2008 }
bellard6a00d602005-11-21 23:25:50 +00002009 }
bellard1ccde1c2004-02-06 19:46:14 +00002010}
2011
aliguori74576192008-10-06 14:02:03 +00002012int cpu_physical_memory_set_dirty_tracking(int enable)
2013{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002014 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002015 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002016 return ret;
aliguori74576192008-10-06 14:02:03 +00002017}
2018
bellard3a7d9292005-08-21 09:26:42 +00002019static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2020{
Anthony Liguoric227f092009-10-01 16:12:16 -05002021 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002022 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002023
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002024 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002025 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2026 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002027 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002028 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002029 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002030 }
2031 }
2032}
2033
2034/* update the TLB according to the current state of the dirty bits */
2035void cpu_tlb_update_dirty(CPUState *env)
2036{
2037 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002038 int mmu_idx;
2039 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2040 for(i = 0; i < CPU_TLB_SIZE; i++)
2041 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2042 }
bellard3a7d9292005-08-21 09:26:42 +00002043}
2044
pbrook0f459d12008-06-09 00:20:13 +00002045static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002046{
pbrook0f459d12008-06-09 00:20:13 +00002047 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2048 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002049}
2050
pbrook0f459d12008-06-09 00:20:13 +00002051/* update the TLB corresponding to virtual page vaddr
2052 so that it is no longer dirty */
2053static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002054{
bellard1ccde1c2004-02-06 19:46:14 +00002055 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002056 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002057
pbrook0f459d12008-06-09 00:20:13 +00002058 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002059 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002060 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2061 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002062}
2063
Paul Brookd4c430a2010-03-17 02:14:28 +00002064/* Our TLB does not support large pages, so remember the area covered by
2065 large pages and trigger a full TLB flush if these are invalidated. */
2066static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2067 target_ulong size)
2068{
2069 target_ulong mask = ~(size - 1);
2070
2071 if (env->tlb_flush_addr == (target_ulong)-1) {
2072 env->tlb_flush_addr = vaddr & mask;
2073 env->tlb_flush_mask = mask;
2074 return;
2075 }
2076 /* Extend the existing region to include the new page.
2077 This is a compromise between unnecessary flushes and the cost
2078 of maintaining a full variable size TLB. */
2079 mask &= env->tlb_flush_mask;
2080 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2081 mask <<= 1;
2082 }
2083 env->tlb_flush_addr &= mask;
2084 env->tlb_flush_mask = mask;
2085}
2086
Avi Kivity1d393fa2012-01-01 21:15:42 +02002087static bool is_ram_rom(ram_addr_t pd)
2088{
2089 pd &= ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002090 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002091}
2092
Avi Kivity75c578d2012-01-02 15:40:52 +02002093static bool is_romd(ram_addr_t pd)
2094{
2095 MemoryRegion *mr;
2096
2097 pd &= ~TARGET_PAGE_MASK;
Avi Kivity11c7ef02012-01-02 17:21:07 +02002098 mr = io_mem_region[pd];
Avi Kivity75c578d2012-01-02 15:40:52 +02002099 return mr->rom_device && mr->readable;
2100}
2101
Avi Kivity1d393fa2012-01-01 21:15:42 +02002102static bool is_ram_rom_romd(ram_addr_t pd)
2103{
Avi Kivity75c578d2012-01-02 15:40:52 +02002104 return is_ram_rom(pd) || is_romd(pd);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002105}
2106
Paul Brookd4c430a2010-03-17 02:14:28 +00002107/* Add a new TLB entry. At most one entry for a given virtual address
2108 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2109 supplied size is only used by tlb_flush_page. */
2110void tlb_set_page(CPUState *env, target_ulong vaddr,
2111 target_phys_addr_t paddr, int prot,
2112 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002113{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002114 PhysPageDesc p;
bellard4f2ac232004-04-26 19:44:02 +00002115 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002116 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002117 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002118 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002119 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002120 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002121 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002122 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002123
Paul Brookd4c430a2010-03-17 02:14:28 +00002124 assert(size >= TARGET_PAGE_SIZE);
2125 if (size != TARGET_PAGE_SIZE) {
2126 tlb_add_large_page(env, vaddr, size);
2127 }
bellard92e873b2004-05-21 14:52:29 +00002128 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002129 pd = p.phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002130#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002131 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2132 " prot=%x idx=%d pd=0x%08lx\n",
2133 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002134#endif
2135
pbrook0f459d12008-06-09 00:20:13 +00002136 address = vaddr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002137 if (!is_ram_rom_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002138 /* IO memory case (romd handled later) */
2139 address |= TLB_MMIO;
2140 }
pbrook5579c7f2009-04-11 14:47:08 +00002141 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002142 if (is_ram_rom(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002143 /* Normal RAM. */
2144 iotlb = pd & TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002145 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2146 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002147 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002148 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002149 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002150 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002151 It would be nice to pass an offset from the base address
2152 of that region. This would avoid having to special case RAM,
2153 and avoid full address decoding in every device.
2154 We can't use the high bits of pd for this because
2155 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002156 iotlb = (pd & ~TARGET_PAGE_MASK);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002157 iotlb += p.region_offset;
pbrook0f459d12008-06-09 00:20:13 +00002158 }
pbrook6658ffb2007-03-16 23:58:11 +00002159
pbrook0f459d12008-06-09 00:20:13 +00002160 code_address = address;
2161 /* Make accesses to pages with watchpoints go via the
2162 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002163 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002164 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002165 /* Avoid trapping reads of pages with a write breakpoint. */
2166 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivity1ec9b902012-01-02 12:47:48 +02002167 iotlb = io_mem_watch.ram_addr + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002168 address |= TLB_MMIO;
2169 break;
2170 }
pbrook6658ffb2007-03-16 23:58:11 +00002171 }
pbrook0f459d12008-06-09 00:20:13 +00002172 }
balrogd79acba2007-06-26 20:01:13 +00002173
pbrook0f459d12008-06-09 00:20:13 +00002174 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2175 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2176 te = &env->tlb_table[mmu_idx][index];
2177 te->addend = addend - vaddr;
2178 if (prot & PAGE_READ) {
2179 te->addr_read = address;
2180 } else {
2181 te->addr_read = -1;
2182 }
edgar_igl5c751e92008-05-06 08:44:21 +00002183
pbrook0f459d12008-06-09 00:20:13 +00002184 if (prot & PAGE_EXEC) {
2185 te->addr_code = code_address;
2186 } else {
2187 te->addr_code = -1;
2188 }
2189 if (prot & PAGE_WRITE) {
Avi Kivity75c578d2012-01-02 15:40:52 +02002190 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr || is_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002191 /* Write access calls the I/O callback. */
2192 te->addr_write = address | TLB_MMIO;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002193 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
pbrook0f459d12008-06-09 00:20:13 +00002194 !cpu_physical_memory_is_dirty(pd)) {
2195 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002196 } else {
pbrook0f459d12008-06-09 00:20:13 +00002197 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002198 }
pbrook0f459d12008-06-09 00:20:13 +00002199 } else {
2200 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002201 }
bellard9fa3e852004-01-04 18:06:42 +00002202}
2203
bellard01243112004-01-04 15:48:17 +00002204#else
2205
bellardee8b7022004-02-03 23:35:10 +00002206void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002207{
2208}
2209
bellard2e126692004-04-25 21:28:44 +00002210void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002211{
2212}
2213
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002214/*
2215 * Walks guest process memory "regions" one by one
2216 * and calls callback function 'fn' for each region.
2217 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002218
2219struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002220{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002221 walk_memory_regions_fn fn;
2222 void *priv;
2223 unsigned long start;
2224 int prot;
2225};
bellard9fa3e852004-01-04 18:06:42 +00002226
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002227static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002228 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002229{
2230 if (data->start != -1ul) {
2231 int rc = data->fn(data->priv, data->start, end, data->prot);
2232 if (rc != 0) {
2233 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002234 }
bellard33417e72003-08-10 21:47:01 +00002235 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002236
2237 data->start = (new_prot ? end : -1ul);
2238 data->prot = new_prot;
2239
2240 return 0;
2241}
2242
2243static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002244 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002245{
Paul Brookb480d9b2010-03-12 23:23:29 +00002246 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002247 int i, rc;
2248
2249 if (*lp == NULL) {
2250 return walk_memory_regions_end(data, base, 0);
2251 }
2252
2253 if (level == 0) {
2254 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002255 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002256 int prot = pd[i].flags;
2257
2258 pa = base | (i << TARGET_PAGE_BITS);
2259 if (prot != data->prot) {
2260 rc = walk_memory_regions_end(data, pa, prot);
2261 if (rc != 0) {
2262 return rc;
2263 }
2264 }
2265 }
2266 } else {
2267 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002268 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002269 pa = base | ((abi_ulong)i <<
2270 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002271 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2272 if (rc != 0) {
2273 return rc;
2274 }
2275 }
2276 }
2277
2278 return 0;
2279}
2280
2281int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2282{
2283 struct walk_memory_regions_data data;
2284 unsigned long i;
2285
2286 data.fn = fn;
2287 data.priv = priv;
2288 data.start = -1ul;
2289 data.prot = 0;
2290
2291 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002292 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002293 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2294 if (rc != 0) {
2295 return rc;
2296 }
2297 }
2298
2299 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002300}
2301
Paul Brookb480d9b2010-03-12 23:23:29 +00002302static int dump_region(void *priv, abi_ulong start,
2303 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002304{
2305 FILE *f = (FILE *)priv;
2306
Paul Brookb480d9b2010-03-12 23:23:29 +00002307 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2308 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002309 start, end, end - start,
2310 ((prot & PAGE_READ) ? 'r' : '-'),
2311 ((prot & PAGE_WRITE) ? 'w' : '-'),
2312 ((prot & PAGE_EXEC) ? 'x' : '-'));
2313
2314 return (0);
2315}
2316
2317/* dump memory mappings */
2318void page_dump(FILE *f)
2319{
2320 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2321 "start", "end", "size", "prot");
2322 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002323}
2324
pbrook53a59602006-03-25 19:31:22 +00002325int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002326{
bellard9fa3e852004-01-04 18:06:42 +00002327 PageDesc *p;
2328
2329 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002330 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002331 return 0;
2332 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002333}
2334
Richard Henderson376a7902010-03-10 15:57:04 -08002335/* Modify the flags of a page and invalidate the code if necessary.
2336 The flag PAGE_WRITE_ORG is positioned automatically depending
2337 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002338void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002339{
Richard Henderson376a7902010-03-10 15:57:04 -08002340 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002341
Richard Henderson376a7902010-03-10 15:57:04 -08002342 /* This function should never be called with addresses outside the
2343 guest address space. If this assert fires, it probably indicates
2344 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002345#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2346 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002347#endif
2348 assert(start < end);
2349
bellard9fa3e852004-01-04 18:06:42 +00002350 start = start & TARGET_PAGE_MASK;
2351 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002352
2353 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002354 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002355 }
2356
2357 for (addr = start, len = end - start;
2358 len != 0;
2359 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2360 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2361
2362 /* If the write protection bit is set, then we invalidate
2363 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002364 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002365 (flags & PAGE_WRITE) &&
2366 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002367 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002368 }
2369 p->flags = flags;
2370 }
bellard9fa3e852004-01-04 18:06:42 +00002371}
2372
ths3d97b402007-11-02 19:02:07 +00002373int page_check_range(target_ulong start, target_ulong len, int flags)
2374{
2375 PageDesc *p;
2376 target_ulong end;
2377 target_ulong addr;
2378
Richard Henderson376a7902010-03-10 15:57:04 -08002379 /* This function should never be called with addresses outside the
2380 guest address space. If this assert fires, it probably indicates
2381 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002382#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2383 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002384#endif
2385
Richard Henderson3e0650a2010-03-29 10:54:42 -07002386 if (len == 0) {
2387 return 0;
2388 }
Richard Henderson376a7902010-03-10 15:57:04 -08002389 if (start + len - 1 < start) {
2390 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002391 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002392 }
balrog55f280c2008-10-28 10:24:11 +00002393
ths3d97b402007-11-02 19:02:07 +00002394 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2395 start = start & TARGET_PAGE_MASK;
2396
Richard Henderson376a7902010-03-10 15:57:04 -08002397 for (addr = start, len = end - start;
2398 len != 0;
2399 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002400 p = page_find(addr >> TARGET_PAGE_BITS);
2401 if( !p )
2402 return -1;
2403 if( !(p->flags & PAGE_VALID) )
2404 return -1;
2405
bellarddae32702007-11-14 10:51:00 +00002406 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002407 return -1;
bellarddae32702007-11-14 10:51:00 +00002408 if (flags & PAGE_WRITE) {
2409 if (!(p->flags & PAGE_WRITE_ORG))
2410 return -1;
2411 /* unprotect the page if it was put read-only because it
2412 contains translated code */
2413 if (!(p->flags & PAGE_WRITE)) {
2414 if (!page_unprotect(addr, 0, NULL))
2415 return -1;
2416 }
2417 return 0;
2418 }
ths3d97b402007-11-02 19:02:07 +00002419 }
2420 return 0;
2421}
2422
bellard9fa3e852004-01-04 18:06:42 +00002423/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002424 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002425int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002426{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002427 unsigned int prot;
2428 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002429 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002430
pbrookc8a706f2008-06-02 16:16:42 +00002431 /* Technically this isn't safe inside a signal handler. However we
2432 know this only ever happens in a synchronous SEGV handler, so in
2433 practice it seems to be ok. */
2434 mmap_lock();
2435
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002436 p = page_find(address >> TARGET_PAGE_BITS);
2437 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002438 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002439 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002440 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002441
bellard9fa3e852004-01-04 18:06:42 +00002442 /* if the page was really writable, then we change its
2443 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002444 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2445 host_start = address & qemu_host_page_mask;
2446 host_end = host_start + qemu_host_page_size;
2447
2448 prot = 0;
2449 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2450 p = page_find(addr >> TARGET_PAGE_BITS);
2451 p->flags |= PAGE_WRITE;
2452 prot |= p->flags;
2453
bellard9fa3e852004-01-04 18:06:42 +00002454 /* and since the content will be modified, we must invalidate
2455 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002456 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002457#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002458 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002459#endif
bellard9fa3e852004-01-04 18:06:42 +00002460 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002461 mprotect((void *)g2h(host_start), qemu_host_page_size,
2462 prot & PAGE_BITS);
2463
2464 mmap_unlock();
2465 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002466 }
pbrookc8a706f2008-06-02 16:16:42 +00002467 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002468 return 0;
2469}
2470
bellard6a00d602005-11-21 23:25:50 +00002471static inline void tlb_set_dirty(CPUState *env,
2472 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002473{
2474}
bellard9fa3e852004-01-04 18:06:42 +00002475#endif /* defined(CONFIG_USER_ONLY) */
2476
pbrooke2eef172008-06-08 01:09:01 +00002477#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002478
Paul Brookc04b2b72010-03-01 03:31:14 +00002479#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2480typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002481 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002482 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002483 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2484 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002485} subpage_t;
2486
Anthony Liguoric227f092009-10-01 16:12:16 -05002487static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2488 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002489static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2490 ram_addr_t orig_memory,
2491 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002492#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2493 need_subpage) \
2494 do { \
2495 if (addr > start_addr) \
2496 start_addr2 = 0; \
2497 else { \
2498 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2499 if (start_addr2 > 0) \
2500 need_subpage = 1; \
2501 } \
2502 \
blueswir149e9fba2007-05-30 17:25:06 +00002503 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002504 end_addr2 = TARGET_PAGE_SIZE - 1; \
2505 else { \
2506 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2507 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2508 need_subpage = 1; \
2509 } \
2510 } while (0)
2511
Avi Kivity54688b12012-02-09 17:34:32 +02002512static void destroy_page_desc(PhysPageDesc pd)
2513{
2514 unsigned io_index = pd.phys_offset & ~TARGET_PAGE_MASK;
2515 MemoryRegion *mr = io_mem_region[io_index];
2516
2517 if (mr->subpage) {
2518 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2519 memory_region_destroy(&subpage->iomem);
2520 g_free(subpage);
2521 }
2522}
2523
Avi Kivity4346ae32012-02-10 17:00:01 +02002524static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002525{
2526 unsigned i;
Avi Kivity4346ae32012-02-10 17:00:01 +02002527 PhysPageEntry *p = lp->u.node;
Avi Kivity54688b12012-02-09 17:34:32 +02002528
Avi Kivity4346ae32012-02-10 17:00:01 +02002529 if (!p) {
Avi Kivity54688b12012-02-09 17:34:32 +02002530 return;
2531 }
2532
Avi Kivity4346ae32012-02-10 17:00:01 +02002533 for (i = 0; i < L2_SIZE; ++i) {
2534 if (level > 0) {
Avi Kivity54688b12012-02-09 17:34:32 +02002535 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002536 } else {
2537 destroy_page_desc(p[i].u.leaf);
Avi Kivity54688b12012-02-09 17:34:32 +02002538 }
Avi Kivity54688b12012-02-09 17:34:32 +02002539 }
Avi Kivity4346ae32012-02-10 17:00:01 +02002540 g_free(p);
2541 lp->u.node = NULL;
Avi Kivity54688b12012-02-09 17:34:32 +02002542}
2543
2544static void destroy_all_mappings(void)
2545{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002546 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivity54688b12012-02-09 17:34:32 +02002547}
2548
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002549/* register physical memory.
2550 For RAM, 'size' must be a multiple of the target page size.
2551 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002552 io memory page. The address used when calling the IO function is
2553 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002554 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002555 before calculating this offset. This should not be a problem unless
2556 the low bits of start_addr and region_offset differ. */
Avi Kivitydd811242012-01-02 12:17:03 +02002557void cpu_register_physical_memory_log(MemoryRegionSection *section,
Avi Kivityd7ec83e2012-02-08 17:07:26 +02002558 bool readonly)
bellard33417e72003-08-10 21:47:01 +00002559{
Avi Kivitydd811242012-01-02 12:17:03 +02002560 target_phys_addr_t start_addr = section->offset_within_address_space;
2561 ram_addr_t size = section->size;
2562 ram_addr_t phys_offset = section->mr->ram_addr;
2563 ram_addr_t region_offset = section->offset_within_region;
Anthony Liguoric227f092009-10-01 16:12:16 -05002564 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002565 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002566 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002567 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002568 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002569
Avi Kivitydd811242012-01-02 12:17:03 +02002570 if (memory_region_is_ram(section->mr)) {
2571 phys_offset += region_offset;
2572 region_offset = 0;
2573 }
2574
Avi Kivitydd811242012-01-02 12:17:03 +02002575 if (readonly) {
2576 phys_offset |= io_mem_rom.ram_addr;
2577 }
2578
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002579 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002580
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002581 if (phys_offset == io_mem_unassigned.ram_addr) {
pbrook67c4d232009-02-23 13:16:07 +00002582 region_offset = start_addr;
2583 }
pbrook8da3ff12008-12-01 18:59:50 +00002584 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002585 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002586 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002587
2588 addr = start_addr;
2589 do {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002590 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002591 if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002592 ram_addr_t orig_memory = p->phys_offset;
2593 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002594 int need_subpage = 0;
Avi Kivity11c7ef02012-01-02 17:21:07 +02002595 MemoryRegion *mr = io_mem_region[orig_memory & ~TARGET_PAGE_MASK];
blueswir1db7b5422007-05-26 17:36:03 +00002596
2597 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2598 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002599 if (need_subpage) {
Avi Kivityb3b00c72012-01-02 13:20:11 +02002600 if (!(mr->subpage)) {
blueswir1db7b5422007-05-26 17:36:03 +00002601 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002602 &p->phys_offset, orig_memory,
2603 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002604 } else {
Avi Kivitya621f382012-01-02 13:12:08 +02002605 subpage = container_of(mr, subpage_t, iomem);
blueswir1db7b5422007-05-26 17:36:03 +00002606 }
pbrook8da3ff12008-12-01 18:59:50 +00002607 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2608 region_offset);
2609 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002610 } else {
2611 p->phys_offset = phys_offset;
Avi Kivity2774c6d2012-01-01 18:24:24 +02002612 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002613 if (is_ram_rom_romd(phys_offset))
blueswir1db7b5422007-05-26 17:36:03 +00002614 phys_offset += TARGET_PAGE_SIZE;
2615 }
2616 } else {
2617 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2618 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002619 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002620 if (is_ram_rom_romd(phys_offset)) {
blueswir1db7b5422007-05-26 17:36:03 +00002621 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002622 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002623 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002624 int need_subpage = 0;
2625
2626 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2627 end_addr2, need_subpage);
2628
Richard Hendersonf6405242010-04-22 16:47:31 -07002629 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002630 subpage = subpage_init((addr & TARGET_PAGE_MASK),
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002631 &p->phys_offset,
2632 io_mem_unassigned.ram_addr,
pbrook67c4d232009-02-23 13:16:07 +00002633 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002634 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002635 phys_offset, region_offset);
2636 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002637 }
2638 }
2639 }
pbrook8da3ff12008-12-01 18:59:50 +00002640 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002641 addr += TARGET_PAGE_SIZE;
2642 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002643
bellard9d420372006-06-25 22:25:22 +00002644 /* since each CPU stores ram addresses in its TLB cache, we must
2645 reset the modified entries */
2646 /* XXX: slow ! */
2647 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2648 tlb_flush(env, 1);
2649 }
bellard33417e72003-08-10 21:47:01 +00002650}
2651
Anthony Liguoric227f092009-10-01 16:12:16 -05002652void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002653{
2654 if (kvm_enabled())
2655 kvm_coalesce_mmio_region(addr, size);
2656}
2657
Anthony Liguoric227f092009-10-01 16:12:16 -05002658void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002659{
2660 if (kvm_enabled())
2661 kvm_uncoalesce_mmio_region(addr, size);
2662}
2663
Sheng Yang62a27442010-01-26 19:21:16 +08002664void qemu_flush_coalesced_mmio_buffer(void)
2665{
2666 if (kvm_enabled())
2667 kvm_flush_coalesced_mmio_buffer();
2668}
2669
Marcelo Tosattic9027602010-03-01 20:25:08 -03002670#if defined(__linux__) && !defined(TARGET_S390X)
2671
2672#include <sys/vfs.h>
2673
2674#define HUGETLBFS_MAGIC 0x958458f6
2675
2676static long gethugepagesize(const char *path)
2677{
2678 struct statfs fs;
2679 int ret;
2680
2681 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002682 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002683 } while (ret != 0 && errno == EINTR);
2684
2685 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002686 perror(path);
2687 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002688 }
2689
2690 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002691 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002692
2693 return fs.f_bsize;
2694}
2695
Alex Williamson04b16652010-07-02 11:13:17 -06002696static void *file_ram_alloc(RAMBlock *block,
2697 ram_addr_t memory,
2698 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002699{
2700 char *filename;
2701 void *area;
2702 int fd;
2703#ifdef MAP_POPULATE
2704 int flags;
2705#endif
2706 unsigned long hpagesize;
2707
2708 hpagesize = gethugepagesize(path);
2709 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002710 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002711 }
2712
2713 if (memory < hpagesize) {
2714 return NULL;
2715 }
2716
2717 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2718 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2719 return NULL;
2720 }
2721
2722 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002723 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002724 }
2725
2726 fd = mkstemp(filename);
2727 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002728 perror("unable to create backing store for hugepages");
2729 free(filename);
2730 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002731 }
2732 unlink(filename);
2733 free(filename);
2734
2735 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2736
2737 /*
2738 * ftruncate is not supported by hugetlbfs in older
2739 * hosts, so don't bother bailing out on errors.
2740 * If anything goes wrong with it under other filesystems,
2741 * mmap will fail.
2742 */
2743 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002744 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002745
2746#ifdef MAP_POPULATE
2747 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2748 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2749 * to sidestep this quirk.
2750 */
2751 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2752 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2753#else
2754 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2755#endif
2756 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002757 perror("file_ram_alloc: can't mmap RAM pages");
2758 close(fd);
2759 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002760 }
Alex Williamson04b16652010-07-02 11:13:17 -06002761 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002762 return area;
2763}
2764#endif
2765
Alex Williamsond17b5282010-06-25 11:08:38 -06002766static ram_addr_t find_ram_offset(ram_addr_t size)
2767{
Alex Williamson04b16652010-07-02 11:13:17 -06002768 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002769 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002770
2771 if (QLIST_EMPTY(&ram_list.blocks))
2772 return 0;
2773
2774 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002775 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002776
2777 end = block->offset + block->length;
2778
2779 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2780 if (next_block->offset >= end) {
2781 next = MIN(next, next_block->offset);
2782 }
2783 }
2784 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002785 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002786 mingap = next - end;
2787 }
2788 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002789
2790 if (offset == RAM_ADDR_MAX) {
2791 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2792 (uint64_t)size);
2793 abort();
2794 }
2795
Alex Williamson04b16652010-07-02 11:13:17 -06002796 return offset;
2797}
2798
2799static ram_addr_t last_ram_offset(void)
2800{
Alex Williamsond17b5282010-06-25 11:08:38 -06002801 RAMBlock *block;
2802 ram_addr_t last = 0;
2803
2804 QLIST_FOREACH(block, &ram_list.blocks, next)
2805 last = MAX(last, block->offset + block->length);
2806
2807 return last;
2808}
2809
Avi Kivityc5705a72011-12-20 15:59:12 +02002810void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002811{
2812 RAMBlock *new_block, *block;
2813
Avi Kivityc5705a72011-12-20 15:59:12 +02002814 new_block = NULL;
2815 QLIST_FOREACH(block, &ram_list.blocks, next) {
2816 if (block->offset == addr) {
2817 new_block = block;
2818 break;
2819 }
2820 }
2821 assert(new_block);
2822 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002823
2824 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2825 char *id = dev->parent_bus->info->get_dev_path(dev);
2826 if (id) {
2827 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002828 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002829 }
2830 }
2831 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2832
2833 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002834 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002835 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2836 new_block->idstr);
2837 abort();
2838 }
2839 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002840}
2841
2842ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2843 MemoryRegion *mr)
2844{
2845 RAMBlock *new_block;
2846
2847 size = TARGET_PAGE_ALIGN(size);
2848 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002849
Avi Kivity7c637362011-12-21 13:09:49 +02002850 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002851 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002852 if (host) {
2853 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002854 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002855 } else {
2856 if (mem_path) {
2857#if defined (__linux__) && !defined(TARGET_S390X)
2858 new_block->host = file_ram_alloc(new_block, size, mem_path);
2859 if (!new_block->host) {
2860 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002861 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002862 }
2863#else
2864 fprintf(stderr, "-mem-path option unsupported\n");
2865 exit(1);
2866#endif
2867 } else {
2868#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002869 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2870 an system defined value, which is at least 256GB. Larger systems
2871 have larger values. We put the guest between the end of data
2872 segment (system break) and this value. We use 32GB as a base to
2873 have enough room for the system break to grow. */
2874 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002875 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002876 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002877 if (new_block->host == MAP_FAILED) {
2878 fprintf(stderr, "Allocating RAM failed\n");
2879 abort();
2880 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002881#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002882 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002883 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002884 } else {
2885 new_block->host = qemu_vmalloc(size);
2886 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002887#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002888 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002889 }
2890 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002891 new_block->length = size;
2892
2893 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2894
Anthony Liguori7267c092011-08-20 22:09:37 -05002895 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002896 last_ram_offset() >> TARGET_PAGE_BITS);
2897 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2898 0xff, size >> TARGET_PAGE_BITS);
2899
2900 if (kvm_enabled())
2901 kvm_setup_guest_memory(new_block->host, size);
2902
2903 return new_block->offset;
2904}
2905
Avi Kivityc5705a72011-12-20 15:59:12 +02002906ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002907{
Avi Kivityc5705a72011-12-20 15:59:12 +02002908 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002909}
bellarde9a1ab12007-02-08 23:08:38 +00002910
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002911void qemu_ram_free_from_ptr(ram_addr_t addr)
2912{
2913 RAMBlock *block;
2914
2915 QLIST_FOREACH(block, &ram_list.blocks, next) {
2916 if (addr == block->offset) {
2917 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002918 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002919 return;
2920 }
2921 }
2922}
2923
Anthony Liguoric227f092009-10-01 16:12:16 -05002924void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002925{
Alex Williamson04b16652010-07-02 11:13:17 -06002926 RAMBlock *block;
2927
2928 QLIST_FOREACH(block, &ram_list.blocks, next) {
2929 if (addr == block->offset) {
2930 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002931 if (block->flags & RAM_PREALLOC_MASK) {
2932 ;
2933 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002934#if defined (__linux__) && !defined(TARGET_S390X)
2935 if (block->fd) {
2936 munmap(block->host, block->length);
2937 close(block->fd);
2938 } else {
2939 qemu_vfree(block->host);
2940 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002941#else
2942 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002943#endif
2944 } else {
2945#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2946 munmap(block->host, block->length);
2947#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002948 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002949 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002950 } else {
2951 qemu_vfree(block->host);
2952 }
Alex Williamson04b16652010-07-02 11:13:17 -06002953#endif
2954 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002955 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002956 return;
2957 }
2958 }
2959
bellarde9a1ab12007-02-08 23:08:38 +00002960}
2961
Huang Yingcd19cfa2011-03-02 08:56:19 +01002962#ifndef _WIN32
2963void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2964{
2965 RAMBlock *block;
2966 ram_addr_t offset;
2967 int flags;
2968 void *area, *vaddr;
2969
2970 QLIST_FOREACH(block, &ram_list.blocks, next) {
2971 offset = addr - block->offset;
2972 if (offset < block->length) {
2973 vaddr = block->host + offset;
2974 if (block->flags & RAM_PREALLOC_MASK) {
2975 ;
2976 } else {
2977 flags = MAP_FIXED;
2978 munmap(vaddr, length);
2979 if (mem_path) {
2980#if defined(__linux__) && !defined(TARGET_S390X)
2981 if (block->fd) {
2982#ifdef MAP_POPULATE
2983 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2984 MAP_PRIVATE;
2985#else
2986 flags |= MAP_PRIVATE;
2987#endif
2988 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2989 flags, block->fd, offset);
2990 } else {
2991 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2992 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2993 flags, -1, 0);
2994 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002995#else
2996 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002997#endif
2998 } else {
2999#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3000 flags |= MAP_SHARED | MAP_ANONYMOUS;
3001 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3002 flags, -1, 0);
3003#else
3004 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3005 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3006 flags, -1, 0);
3007#endif
3008 }
3009 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003010 fprintf(stderr, "Could not remap addr: "
3011 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003012 length, addr);
3013 exit(1);
3014 }
3015 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3016 }
3017 return;
3018 }
3019 }
3020}
3021#endif /* !_WIN32 */
3022
pbrookdc828ca2009-04-09 22:21:07 +00003023/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003024 With the exception of the softmmu code in this file, this should
3025 only be used for local memory (e.g. video ram) that the device owns,
3026 and knows it isn't going to access beyond the end of the block.
3027
3028 It should not be used for general purpose DMA.
3029 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3030 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003031void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003032{
pbrook94a6b542009-04-11 17:15:54 +00003033 RAMBlock *block;
3034
Alex Williamsonf471a172010-06-11 11:11:42 -06003035 QLIST_FOREACH(block, &ram_list.blocks, next) {
3036 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003037 /* Move this entry to to start of the list. */
3038 if (block != QLIST_FIRST(&ram_list.blocks)) {
3039 QLIST_REMOVE(block, next);
3040 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3041 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003042 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003043 /* We need to check if the requested address is in the RAM
3044 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003045 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003046 */
3047 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003048 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003049 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003050 block->host =
3051 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003052 }
3053 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003054 return block->host + (addr - block->offset);
3055 }
pbrook94a6b542009-04-11 17:15:54 +00003056 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003057
3058 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3059 abort();
3060
3061 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003062}
3063
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003064/* Return a host pointer to ram allocated with qemu_ram_alloc.
3065 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3066 */
3067void *qemu_safe_ram_ptr(ram_addr_t addr)
3068{
3069 RAMBlock *block;
3070
3071 QLIST_FOREACH(block, &ram_list.blocks, next) {
3072 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003073 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003074 /* We need to check if the requested address is in the RAM
3075 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003076 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003077 */
3078 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003079 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003080 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003081 block->host =
3082 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003083 }
3084 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003085 return block->host + (addr - block->offset);
3086 }
3087 }
3088
3089 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3090 abort();
3091
3092 return NULL;
3093}
3094
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003095/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3096 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003097void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003098{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003099 if (*size == 0) {
3100 return NULL;
3101 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003102 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003103 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003104 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003105 RAMBlock *block;
3106
3107 QLIST_FOREACH(block, &ram_list.blocks, next) {
3108 if (addr - block->offset < block->length) {
3109 if (addr - block->offset + *size > block->length)
3110 *size = block->length - addr + block->offset;
3111 return block->host + (addr - block->offset);
3112 }
3113 }
3114
3115 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3116 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003117 }
3118}
3119
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003120void qemu_put_ram_ptr(void *addr)
3121{
3122 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003123}
3124
Marcelo Tosattie8902612010-10-11 15:31:19 -03003125int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003126{
pbrook94a6b542009-04-11 17:15:54 +00003127 RAMBlock *block;
3128 uint8_t *host = ptr;
3129
Jan Kiszka868bb332011-06-21 22:59:09 +02003130 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003131 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003132 return 0;
3133 }
3134
Alex Williamsonf471a172010-06-11 11:11:42 -06003135 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003136 /* This case append when the block is not mapped. */
3137 if (block->host == NULL) {
3138 continue;
3139 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003140 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003141 *ram_addr = block->offset + (host - block->host);
3142 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003143 }
pbrook94a6b542009-04-11 17:15:54 +00003144 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003145
Marcelo Tosattie8902612010-10-11 15:31:19 -03003146 return -1;
3147}
Alex Williamsonf471a172010-06-11 11:11:42 -06003148
Marcelo Tosattie8902612010-10-11 15:31:19 -03003149/* Some of the softmmu routines need to translate from a host pointer
3150 (typically a TLB entry) back to a ram offset. */
3151ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3152{
3153 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003154
Marcelo Tosattie8902612010-10-11 15:31:19 -03003155 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3156 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3157 abort();
3158 }
3159 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003160}
3161
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003162static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3163 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003164{
pbrook67d3b952006-12-18 05:03:52 +00003165#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003166 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003167#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003168#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003169 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003170#endif
3171 return 0;
3172}
3173
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003174static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3175 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003176{
3177#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003178 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003179#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003180#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003181 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003182#endif
3183}
3184
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003185static const MemoryRegionOps unassigned_mem_ops = {
3186 .read = unassigned_mem_read,
3187 .write = unassigned_mem_write,
3188 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003189};
3190
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003191static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3192 unsigned size)
3193{
3194 abort();
3195}
3196
3197static void error_mem_write(void *opaque, target_phys_addr_t addr,
3198 uint64_t value, unsigned size)
3199{
3200 abort();
3201}
3202
3203static const MemoryRegionOps error_mem_ops = {
3204 .read = error_mem_read,
3205 .write = error_mem_write,
3206 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003207};
3208
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003209static const MemoryRegionOps rom_mem_ops = {
3210 .read = error_mem_read,
3211 .write = unassigned_mem_write,
3212 .endianness = DEVICE_NATIVE_ENDIAN,
3213};
3214
3215static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3216 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003217{
bellard3a7d9292005-08-21 09:26:42 +00003218 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003219 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003220 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3221#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003222 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003223 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003224#endif
3225 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003226 switch (size) {
3227 case 1:
3228 stb_p(qemu_get_ram_ptr(ram_addr), val);
3229 break;
3230 case 2:
3231 stw_p(qemu_get_ram_ptr(ram_addr), val);
3232 break;
3233 case 4:
3234 stl_p(qemu_get_ram_ptr(ram_addr), val);
3235 break;
3236 default:
3237 abort();
3238 }
bellardf23db162005-08-21 19:12:28 +00003239 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003240 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003241 /* we remove the notdirty callback only if the code has been
3242 flushed */
3243 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003244 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003245}
3246
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003247static const MemoryRegionOps notdirty_mem_ops = {
3248 .read = error_mem_read,
3249 .write = notdirty_mem_write,
3250 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003251};
3252
pbrook0f459d12008-06-09 00:20:13 +00003253/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003254static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003255{
3256 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003257 target_ulong pc, cs_base;
3258 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003259 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003260 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003261 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003262
aliguori06d55cc2008-11-18 20:24:06 +00003263 if (env->watchpoint_hit) {
3264 /* We re-entered the check after replacing the TB. Now raise
3265 * the debug interrupt so that is will trigger after the
3266 * current instruction. */
3267 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3268 return;
3269 }
pbrook2e70f6e2008-06-29 01:03:05 +00003270 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003271 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003272 if ((vaddr == (wp->vaddr & len_mask) ||
3273 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003274 wp->flags |= BP_WATCHPOINT_HIT;
3275 if (!env->watchpoint_hit) {
3276 env->watchpoint_hit = wp;
3277 tb = tb_find_pc(env->mem_io_pc);
3278 if (!tb) {
3279 cpu_abort(env, "check_watchpoint: could not find TB for "
3280 "pc=%p", (void *)env->mem_io_pc);
3281 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003282 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003283 tb_phys_invalidate(tb, -1);
3284 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3285 env->exception_index = EXCP_DEBUG;
3286 } else {
3287 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3288 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3289 }
3290 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003291 }
aliguori6e140f22008-11-18 20:37:55 +00003292 } else {
3293 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003294 }
3295 }
3296}
3297
pbrook6658ffb2007-03-16 23:58:11 +00003298/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3299 so these check for a hit then pass through to the normal out-of-line
3300 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003301static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3302 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003303{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003304 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3305 switch (size) {
3306 case 1: return ldub_phys(addr);
3307 case 2: return lduw_phys(addr);
3308 case 4: return ldl_phys(addr);
3309 default: abort();
3310 }
pbrook6658ffb2007-03-16 23:58:11 +00003311}
3312
Avi Kivity1ec9b902012-01-02 12:47:48 +02003313static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3314 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003315{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003316 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3317 switch (size) {
3318 case 1: stb_phys(addr, val);
3319 case 2: stw_phys(addr, val);
3320 case 4: stl_phys(addr, val);
3321 default: abort();
3322 }
pbrook6658ffb2007-03-16 23:58:11 +00003323}
3324
Avi Kivity1ec9b902012-01-02 12:47:48 +02003325static const MemoryRegionOps watch_mem_ops = {
3326 .read = watch_mem_read,
3327 .write = watch_mem_write,
3328 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003329};
pbrook6658ffb2007-03-16 23:58:11 +00003330
Avi Kivity70c68e42012-01-02 12:32:48 +02003331static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3332 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003333{
Avi Kivity70c68e42012-01-02 12:32:48 +02003334 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003335 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003336#if defined(DEBUG_SUBPAGE)
3337 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3338 mmio, len, addr, idx);
3339#endif
blueswir1db7b5422007-05-26 17:36:03 +00003340
Richard Hendersonf6405242010-04-22 16:47:31 -07003341 addr += mmio->region_offset[idx];
3342 idx = mmio->sub_io_index[idx];
Avi Kivity70c68e42012-01-02 12:32:48 +02003343 return io_mem_read(idx, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003344}
3345
Avi Kivity70c68e42012-01-02 12:32:48 +02003346static void subpage_write(void *opaque, target_phys_addr_t addr,
3347 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003348{
Avi Kivity70c68e42012-01-02 12:32:48 +02003349 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003350 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003351#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003352 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3353 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003354 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003355#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003356
3357 addr += mmio->region_offset[idx];
3358 idx = mmio->sub_io_index[idx];
Avi Kivity70c68e42012-01-02 12:32:48 +02003359 io_mem_write(idx, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003360}
3361
Avi Kivity70c68e42012-01-02 12:32:48 +02003362static const MemoryRegionOps subpage_ops = {
3363 .read = subpage_read,
3364 .write = subpage_write,
3365 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003366};
3367
Avi Kivityde712f92012-01-02 12:41:07 +02003368static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3369 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003370{
3371 ram_addr_t raddr = addr;
3372 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003373 switch (size) {
3374 case 1: return ldub_p(ptr);
3375 case 2: return lduw_p(ptr);
3376 case 4: return ldl_p(ptr);
3377 default: abort();
3378 }
Andreas Färber56384e82011-11-30 16:26:21 +01003379}
3380
Avi Kivityde712f92012-01-02 12:41:07 +02003381static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3382 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003383{
3384 ram_addr_t raddr = addr;
3385 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003386 switch (size) {
3387 case 1: return stb_p(ptr, value);
3388 case 2: return stw_p(ptr, value);
3389 case 4: return stl_p(ptr, value);
3390 default: abort();
3391 }
Andreas Färber56384e82011-11-30 16:26:21 +01003392}
3393
Avi Kivityde712f92012-01-02 12:41:07 +02003394static const MemoryRegionOps subpage_ram_ops = {
3395 .read = subpage_ram_read,
3396 .write = subpage_ram_write,
3397 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003398};
3399
Anthony Liguoric227f092009-10-01 16:12:16 -05003400static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3401 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003402{
3403 int idx, eidx;
3404
3405 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3406 return -1;
3407 idx = SUBPAGE_IDX(start);
3408 eidx = SUBPAGE_IDX(end);
3409#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003410 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003411 mmio, start, end, idx, eidx, memory);
3412#endif
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003413 if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
Avi Kivityde712f92012-01-02 12:41:07 +02003414 memory = io_mem_subpage_ram.ram_addr;
Andreas Färber56384e82011-11-30 16:26:21 +01003415 }
Avi Kivity11c7ef02012-01-02 17:21:07 +02003416 memory &= IO_MEM_NB_ENTRIES - 1;
blueswir1db7b5422007-05-26 17:36:03 +00003417 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003418 mmio->sub_io_index[idx] = memory;
3419 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003420 }
3421
3422 return 0;
3423}
3424
Richard Hendersonf6405242010-04-22 16:47:31 -07003425static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3426 ram_addr_t orig_memory,
3427 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003428{
Anthony Liguoric227f092009-10-01 16:12:16 -05003429 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003430 int subpage_memory;
3431
Anthony Liguori7267c092011-08-20 22:09:37 -05003432 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003433
3434 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003435 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3436 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003437 mmio->iomem.subpage = true;
Avi Kivity70c68e42012-01-02 12:32:48 +02003438 subpage_memory = mmio->iomem.ram_addr;
blueswir1db7b5422007-05-26 17:36:03 +00003439#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003440 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3441 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003442#endif
Avi Kivityb3b00c72012-01-02 13:20:11 +02003443 *phys = subpage_memory;
Richard Hendersonf6405242010-04-22 16:47:31 -07003444 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003445
3446 return mmio;
3447}
3448
aliguori88715652009-02-11 15:20:58 +00003449static int get_free_io_mem_idx(void)
3450{
3451 int i;
3452
3453 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3454 if (!io_mem_used[i]) {
3455 io_mem_used[i] = 1;
3456 return i;
3457 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003458 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003459 return -1;
3460}
3461
bellard33417e72003-08-10 21:47:01 +00003462/* mem_read and mem_write are arrays of functions containing the
3463 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003464 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003465 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003466 modified. If it is zero, a new io zone is allocated. The return
3467 value can be used with cpu_register_physical_memory(). (-1) is
3468 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003469static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003470{
bellard33417e72003-08-10 21:47:01 +00003471 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003472 io_index = get_free_io_mem_idx();
3473 if (io_index == -1)
3474 return io_index;
bellard33417e72003-08-10 21:47:01 +00003475 } else {
3476 if (io_index >= IO_MEM_NB_ENTRIES)
3477 return -1;
3478 }
bellardb5ff1b32005-11-26 10:38:39 +00003479
Avi Kivitya621f382012-01-02 13:12:08 +02003480 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003481
Avi Kivity11c7ef02012-01-02 17:21:07 +02003482 return io_index;
bellard33417e72003-08-10 21:47:01 +00003483}
bellard61382a52003-10-27 21:22:23 +00003484
Avi Kivitya621f382012-01-02 13:12:08 +02003485int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003486{
Avi Kivitya621f382012-01-02 13:12:08 +02003487 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003488}
3489
Avi Kivity11c7ef02012-01-02 17:21:07 +02003490void cpu_unregister_io_memory(int io_index)
aliguori88715652009-02-11 15:20:58 +00003491{
Avi Kivitya621f382012-01-02 13:12:08 +02003492 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003493 io_mem_used[io_index] = 0;
3494}
3495
Avi Kivitye9179ce2009-06-14 11:38:52 +03003496static void io_mem_init(void)
3497{
3498 int i;
3499
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003500 /* Must be first: */
3501 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3502 assert(io_mem_ram.ram_addr == 0);
3503 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3504 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3505 "unassigned", UINT64_MAX);
3506 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3507 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003508 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3509 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003510 for (i=0; i<5; i++)
3511 io_mem_used[i] = 1;
3512
Avi Kivity1ec9b902012-01-02 12:47:48 +02003513 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3514 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003515}
3516
Avi Kivity50c1e142012-02-08 21:36:02 +02003517static void core_begin(MemoryListener *listener)
3518{
Avi Kivity54688b12012-02-09 17:34:32 +02003519 destroy_all_mappings();
Avi Kivity50c1e142012-02-08 21:36:02 +02003520}
3521
3522static void core_commit(MemoryListener *listener)
3523{
3524}
3525
Avi Kivity93632742012-02-08 16:54:16 +02003526static void core_region_add(MemoryListener *listener,
3527 MemoryRegionSection *section)
3528{
Avi Kivity4855d412012-02-08 21:16:05 +02003529 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003530}
3531
3532static void core_region_del(MemoryListener *listener,
3533 MemoryRegionSection *section)
3534{
Avi Kivity93632742012-02-08 16:54:16 +02003535}
3536
Avi Kivity50c1e142012-02-08 21:36:02 +02003537static void core_region_nop(MemoryListener *listener,
3538 MemoryRegionSection *section)
3539{
Avi Kivity54688b12012-02-09 17:34:32 +02003540 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003541}
3542
Avi Kivity93632742012-02-08 16:54:16 +02003543static void core_log_start(MemoryListener *listener,
3544 MemoryRegionSection *section)
3545{
3546}
3547
3548static void core_log_stop(MemoryListener *listener,
3549 MemoryRegionSection *section)
3550{
3551}
3552
3553static void core_log_sync(MemoryListener *listener,
3554 MemoryRegionSection *section)
3555{
3556}
3557
3558static void core_log_global_start(MemoryListener *listener)
3559{
3560 cpu_physical_memory_set_dirty_tracking(1);
3561}
3562
3563static void core_log_global_stop(MemoryListener *listener)
3564{
3565 cpu_physical_memory_set_dirty_tracking(0);
3566}
3567
3568static void core_eventfd_add(MemoryListener *listener,
3569 MemoryRegionSection *section,
3570 bool match_data, uint64_t data, int fd)
3571{
3572}
3573
3574static void core_eventfd_del(MemoryListener *listener,
3575 MemoryRegionSection *section,
3576 bool match_data, uint64_t data, int fd)
3577{
3578}
3579
Avi Kivity50c1e142012-02-08 21:36:02 +02003580static void io_begin(MemoryListener *listener)
3581{
3582}
3583
3584static void io_commit(MemoryListener *listener)
3585{
3586}
3587
Avi Kivity4855d412012-02-08 21:16:05 +02003588static void io_region_add(MemoryListener *listener,
3589 MemoryRegionSection *section)
3590{
3591 iorange_init(&section->mr->iorange, &memory_region_iorange_ops,
3592 section->offset_within_address_space, section->size);
3593 ioport_register(&section->mr->iorange);
3594}
3595
3596static void io_region_del(MemoryListener *listener,
3597 MemoryRegionSection *section)
3598{
3599 isa_unassign_ioport(section->offset_within_address_space, section->size);
3600}
3601
Avi Kivity50c1e142012-02-08 21:36:02 +02003602static void io_region_nop(MemoryListener *listener,
3603 MemoryRegionSection *section)
3604{
3605}
3606
Avi Kivity4855d412012-02-08 21:16:05 +02003607static void io_log_start(MemoryListener *listener,
3608 MemoryRegionSection *section)
3609{
3610}
3611
3612static void io_log_stop(MemoryListener *listener,
3613 MemoryRegionSection *section)
3614{
3615}
3616
3617static void io_log_sync(MemoryListener *listener,
3618 MemoryRegionSection *section)
3619{
3620}
3621
3622static void io_log_global_start(MemoryListener *listener)
3623{
3624}
3625
3626static void io_log_global_stop(MemoryListener *listener)
3627{
3628}
3629
3630static void io_eventfd_add(MemoryListener *listener,
3631 MemoryRegionSection *section,
3632 bool match_data, uint64_t data, int fd)
3633{
3634}
3635
3636static void io_eventfd_del(MemoryListener *listener,
3637 MemoryRegionSection *section,
3638 bool match_data, uint64_t data, int fd)
3639{
3640}
3641
Avi Kivity93632742012-02-08 16:54:16 +02003642static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003643 .begin = core_begin,
3644 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003645 .region_add = core_region_add,
3646 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003647 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003648 .log_start = core_log_start,
3649 .log_stop = core_log_stop,
3650 .log_sync = core_log_sync,
3651 .log_global_start = core_log_global_start,
3652 .log_global_stop = core_log_global_stop,
3653 .eventfd_add = core_eventfd_add,
3654 .eventfd_del = core_eventfd_del,
3655 .priority = 0,
3656};
3657
Avi Kivity4855d412012-02-08 21:16:05 +02003658static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003659 .begin = io_begin,
3660 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003661 .region_add = io_region_add,
3662 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003663 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003664 .log_start = io_log_start,
3665 .log_stop = io_log_stop,
3666 .log_sync = io_log_sync,
3667 .log_global_start = io_log_global_start,
3668 .log_global_stop = io_log_global_stop,
3669 .eventfd_add = io_eventfd_add,
3670 .eventfd_del = io_eventfd_del,
3671 .priority = 0,
3672};
3673
Avi Kivity62152b82011-07-26 14:26:14 +03003674static void memory_map_init(void)
3675{
Anthony Liguori7267c092011-08-20 22:09:37 -05003676 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003677 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003678 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003679
Anthony Liguori7267c092011-08-20 22:09:37 -05003680 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003681 memory_region_init(system_io, "io", 65536);
3682 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003683
Avi Kivity4855d412012-02-08 21:16:05 +02003684 memory_listener_register(&core_memory_listener, system_memory);
3685 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003686}
3687
3688MemoryRegion *get_system_memory(void)
3689{
3690 return system_memory;
3691}
3692
Avi Kivity309cb472011-08-08 16:09:03 +03003693MemoryRegion *get_system_io(void)
3694{
3695 return system_io;
3696}
3697
pbrooke2eef172008-06-08 01:09:01 +00003698#endif /* !defined(CONFIG_USER_ONLY) */
3699
bellard13eb76e2004-01-24 15:23:36 +00003700/* physical memory access (slow version, mainly for debug) */
3701#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003702int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3703 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003704{
3705 int l, flags;
3706 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003707 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003708
3709 while (len > 0) {
3710 page = addr & TARGET_PAGE_MASK;
3711 l = (page + TARGET_PAGE_SIZE) - addr;
3712 if (l > len)
3713 l = len;
3714 flags = page_get_flags(page);
3715 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003716 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003717 if (is_write) {
3718 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003719 return -1;
bellard579a97f2007-11-11 14:26:47 +00003720 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003721 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003722 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003723 memcpy(p, buf, l);
3724 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003725 } else {
3726 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003727 return -1;
bellard579a97f2007-11-11 14:26:47 +00003728 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003729 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003730 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003731 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003732 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003733 }
3734 len -= l;
3735 buf += l;
3736 addr += l;
3737 }
Paul Brooka68fe892010-03-01 00:08:59 +00003738 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003739}
bellard8df1cd02005-01-28 22:37:22 +00003740
bellard13eb76e2004-01-24 15:23:36 +00003741#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003742void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003743 int len, int is_write)
3744{
3745 int l, io_index;
3746 uint8_t *ptr;
3747 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003748 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003749 ram_addr_t pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003750 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003751
bellard13eb76e2004-01-24 15:23:36 +00003752 while (len > 0) {
3753 page = addr & TARGET_PAGE_MASK;
3754 l = (page + TARGET_PAGE_SIZE) - addr;
3755 if (l > len)
3756 l = len;
bellard92e873b2004-05-21 14:52:29 +00003757 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003758 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003759
bellard13eb76e2004-01-24 15:23:36 +00003760 if (is_write) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003761 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003762 target_phys_addr_t addr1;
Avi Kivity11c7ef02012-01-02 17:21:07 +02003763 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003764 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bellard6a00d602005-11-21 23:25:50 +00003765 /* XXX: could force cpu_single_env to NULL to avoid
3766 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003767 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003768 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003769 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003770 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003771 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003772 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003773 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003774 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003775 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003776 l = 2;
3777 } else {
bellard1c213d12005-09-03 10:49:04 +00003778 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003779 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003780 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003781 l = 1;
3782 }
3783 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003784 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003785 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003786 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003787 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003788 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003789 if (!cpu_physical_memory_is_dirty(addr1)) {
3790 /* invalidate code */
3791 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3792 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003793 cpu_physical_memory_set_dirty_flags(
3794 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003795 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003796 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003797 }
3798 } else {
Avi Kivity1d393fa2012-01-01 21:15:42 +02003799 if (!is_ram_rom_romd(pd)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003800 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003801 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02003802 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003803 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
aurel326c2934d2009-02-18 21:37:17 +00003804 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003805 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003806 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003807 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003808 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003809 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003810 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003811 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003812 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003813 l = 2;
3814 } else {
bellard1c213d12005-09-03 10:49:04 +00003815 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003816 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003817 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003818 l = 1;
3819 }
3820 } else {
3821 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003822 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3823 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3824 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003825 }
3826 }
3827 len -= l;
3828 buf += l;
3829 addr += l;
3830 }
3831}
bellard8df1cd02005-01-28 22:37:22 +00003832
bellardd0ecd2a2006-04-23 17:14:48 +00003833/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003834void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003835 const uint8_t *buf, int len)
3836{
3837 int l;
3838 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003839 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003840 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003841 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003842
bellardd0ecd2a2006-04-23 17:14:48 +00003843 while (len > 0) {
3844 page = addr & TARGET_PAGE_MASK;
3845 l = (page + TARGET_PAGE_SIZE) - addr;
3846 if (l > len)
3847 l = len;
3848 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003849 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003850
Avi Kivity1d393fa2012-01-01 21:15:42 +02003851 if (!is_ram_rom_romd(pd)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003852 /* do nothing */
3853 } else {
3854 unsigned long addr1;
3855 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3856 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003857 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003858 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003859 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003860 }
3861 len -= l;
3862 buf += l;
3863 addr += l;
3864 }
3865}
3866
aliguori6d16c2f2009-01-22 16:59:11 +00003867typedef struct {
3868 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003869 target_phys_addr_t addr;
3870 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003871} BounceBuffer;
3872
3873static BounceBuffer bounce;
3874
aliguoriba223c22009-01-22 16:59:16 +00003875typedef struct MapClient {
3876 void *opaque;
3877 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003878 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003879} MapClient;
3880
Blue Swirl72cf2d42009-09-12 07:36:22 +00003881static QLIST_HEAD(map_client_list, MapClient) map_client_list
3882 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003883
3884void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3885{
Anthony Liguori7267c092011-08-20 22:09:37 -05003886 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003887
3888 client->opaque = opaque;
3889 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003890 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003891 return client;
3892}
3893
3894void cpu_unregister_map_client(void *_client)
3895{
3896 MapClient *client = (MapClient *)_client;
3897
Blue Swirl72cf2d42009-09-12 07:36:22 +00003898 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003899 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003900}
3901
3902static void cpu_notify_map_clients(void)
3903{
3904 MapClient *client;
3905
Blue Swirl72cf2d42009-09-12 07:36:22 +00003906 while (!QLIST_EMPTY(&map_client_list)) {
3907 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003908 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003909 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003910 }
3911}
3912
aliguori6d16c2f2009-01-22 16:59:11 +00003913/* Map a physical memory region into a host virtual address.
3914 * May map a subset of the requested range, given by and returned in *plen.
3915 * May return NULL if resources needed to perform the mapping are exhausted.
3916 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003917 * Use cpu_register_map_client() to know when retrying the map operation is
3918 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003919 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003920void *cpu_physical_memory_map(target_phys_addr_t addr,
3921 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003922 int is_write)
3923{
Anthony Liguoric227f092009-10-01 16:12:16 -05003924 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003925 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003926 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003927 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003928 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003929 PhysPageDesc p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003930 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003931 ram_addr_t rlen;
3932 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003933
3934 while (len > 0) {
3935 page = addr & TARGET_PAGE_MASK;
3936 l = (page + TARGET_PAGE_SIZE) - addr;
3937 if (l > len)
3938 l = len;
3939 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003940 pd = p.phys_offset;
aliguori6d16c2f2009-01-22 16:59:11 +00003941
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003942 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003943 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003944 break;
3945 }
3946 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3947 bounce.addr = addr;
3948 bounce.len = l;
3949 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003950 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003951 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003952
3953 *plen = l;
3954 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003955 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003956 if (!todo) {
3957 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3958 }
aliguori6d16c2f2009-01-22 16:59:11 +00003959
3960 len -= l;
3961 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003962 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003963 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003964 rlen = todo;
3965 ret = qemu_ram_ptr_length(raddr, &rlen);
3966 *plen = rlen;
3967 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003968}
3969
3970/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3971 * Will also mark the memory as dirty if is_write == 1. access_len gives
3972 * the amount of memory that was actually read or written by the caller.
3973 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003974void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3975 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003976{
3977 if (buffer != bounce.buffer) {
3978 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003979 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003980 while (access_len) {
3981 unsigned l;
3982 l = TARGET_PAGE_SIZE;
3983 if (l > access_len)
3984 l = access_len;
3985 if (!cpu_physical_memory_is_dirty(addr1)) {
3986 /* invalidate code */
3987 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3988 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003989 cpu_physical_memory_set_dirty_flags(
3990 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003991 }
3992 addr1 += l;
3993 access_len -= l;
3994 }
3995 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003996 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003997 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003998 }
aliguori6d16c2f2009-01-22 16:59:11 +00003999 return;
4000 }
4001 if (is_write) {
4002 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4003 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004004 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004005 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004006 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004007}
bellardd0ecd2a2006-04-23 17:14:48 +00004008
bellard8df1cd02005-01-28 22:37:22 +00004009/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004010static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4011 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004012{
4013 int io_index;
4014 uint8_t *ptr;
4015 uint32_t val;
4016 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004017 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004018
4019 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004020 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004021
Avi Kivity1d393fa2012-01-01 21:15:42 +02004022 if (!is_ram_rom_romd(pd)) {
bellard8df1cd02005-01-28 22:37:22 +00004023 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004024 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004025 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004026 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004027#if defined(TARGET_WORDS_BIGENDIAN)
4028 if (endian == DEVICE_LITTLE_ENDIAN) {
4029 val = bswap32(val);
4030 }
4031#else
4032 if (endian == DEVICE_BIG_ENDIAN) {
4033 val = bswap32(val);
4034 }
4035#endif
bellard8df1cd02005-01-28 22:37:22 +00004036 } else {
4037 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004038 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004039 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004040 switch (endian) {
4041 case DEVICE_LITTLE_ENDIAN:
4042 val = ldl_le_p(ptr);
4043 break;
4044 case DEVICE_BIG_ENDIAN:
4045 val = ldl_be_p(ptr);
4046 break;
4047 default:
4048 val = ldl_p(ptr);
4049 break;
4050 }
bellard8df1cd02005-01-28 22:37:22 +00004051 }
4052 return val;
4053}
4054
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004055uint32_t ldl_phys(target_phys_addr_t addr)
4056{
4057 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4058}
4059
4060uint32_t ldl_le_phys(target_phys_addr_t addr)
4061{
4062 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4063}
4064
4065uint32_t ldl_be_phys(target_phys_addr_t addr)
4066{
4067 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4068}
4069
bellard84b7b8e2005-11-28 21:19:04 +00004070/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004071static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4072 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004073{
4074 int io_index;
4075 uint8_t *ptr;
4076 uint64_t val;
4077 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004078 PhysPageDesc p;
bellard84b7b8e2005-11-28 21:19:04 +00004079
4080 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004081 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004082
Avi Kivity1d393fa2012-01-01 21:15:42 +02004083 if (!is_ram_rom_romd(pd)) {
bellard84b7b8e2005-11-28 21:19:04 +00004084 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004085 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004086 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004087
4088 /* XXX This is broken when device endian != cpu endian.
4089 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004090#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004091 val = io_mem_read(io_index, addr, 4) << 32;
4092 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004093#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004094 val = io_mem_read(io_index, addr, 4);
4095 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004096#endif
4097 } else {
4098 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004099 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004100 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004101 switch (endian) {
4102 case DEVICE_LITTLE_ENDIAN:
4103 val = ldq_le_p(ptr);
4104 break;
4105 case DEVICE_BIG_ENDIAN:
4106 val = ldq_be_p(ptr);
4107 break;
4108 default:
4109 val = ldq_p(ptr);
4110 break;
4111 }
bellard84b7b8e2005-11-28 21:19:04 +00004112 }
4113 return val;
4114}
4115
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004116uint64_t ldq_phys(target_phys_addr_t addr)
4117{
4118 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4119}
4120
4121uint64_t ldq_le_phys(target_phys_addr_t addr)
4122{
4123 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4124}
4125
4126uint64_t ldq_be_phys(target_phys_addr_t addr)
4127{
4128 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4129}
4130
bellardaab33092005-10-30 20:48:42 +00004131/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004132uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004133{
4134 uint8_t val;
4135 cpu_physical_memory_read(addr, &val, 1);
4136 return val;
4137}
4138
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004139/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004140static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4141 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004142{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004143 int io_index;
4144 uint8_t *ptr;
4145 uint64_t val;
4146 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004147 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004148
4149 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004150 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004151
Avi Kivity1d393fa2012-01-01 21:15:42 +02004152 if (!is_ram_rom_romd(pd)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004153 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004154 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004155 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004156 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004157#if defined(TARGET_WORDS_BIGENDIAN)
4158 if (endian == DEVICE_LITTLE_ENDIAN) {
4159 val = bswap16(val);
4160 }
4161#else
4162 if (endian == DEVICE_BIG_ENDIAN) {
4163 val = bswap16(val);
4164 }
4165#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004166 } else {
4167 /* RAM case */
4168 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4169 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004170 switch (endian) {
4171 case DEVICE_LITTLE_ENDIAN:
4172 val = lduw_le_p(ptr);
4173 break;
4174 case DEVICE_BIG_ENDIAN:
4175 val = lduw_be_p(ptr);
4176 break;
4177 default:
4178 val = lduw_p(ptr);
4179 break;
4180 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004181 }
4182 return val;
bellardaab33092005-10-30 20:48:42 +00004183}
4184
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004185uint32_t lduw_phys(target_phys_addr_t addr)
4186{
4187 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4188}
4189
4190uint32_t lduw_le_phys(target_phys_addr_t addr)
4191{
4192 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4193}
4194
4195uint32_t lduw_be_phys(target_phys_addr_t addr)
4196{
4197 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4198}
4199
bellard8df1cd02005-01-28 22:37:22 +00004200/* warning: addr must be aligned. The ram page is not masked as dirty
4201 and the code inside is not invalidated. It is useful if the dirty
4202 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004203void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004204{
4205 int io_index;
4206 uint8_t *ptr;
4207 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004208 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004209
4210 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004211 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004212
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004213 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004214 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004215 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004216 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004217 } else {
aliguori74576192008-10-06 14:02:03 +00004218 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004219 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004220 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004221
4222 if (unlikely(in_migration)) {
4223 if (!cpu_physical_memory_is_dirty(addr1)) {
4224 /* invalidate code */
4225 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4226 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004227 cpu_physical_memory_set_dirty_flags(
4228 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004229 }
4230 }
bellard8df1cd02005-01-28 22:37:22 +00004231 }
4232}
4233
Anthony Liguoric227f092009-10-01 16:12:16 -05004234void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004235{
4236 int io_index;
4237 uint8_t *ptr;
4238 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004239 PhysPageDesc p;
j_mayerbc98a7e2007-04-04 07:55:12 +00004240
4241 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004242 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004243
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004244 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004245 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004246 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004247#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004248 io_mem_write(io_index, addr, val >> 32, 4);
4249 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004250#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004251 io_mem_write(io_index, addr, (uint32_t)val, 4);
4252 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004253#endif
4254 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004255 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004256 (addr & ~TARGET_PAGE_MASK);
4257 stq_p(ptr, val);
4258 }
4259}
4260
bellard8df1cd02005-01-28 22:37:22 +00004261/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004262static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4263 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004264{
4265 int io_index;
4266 uint8_t *ptr;
4267 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004268 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004269
4270 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004271 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004272
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004273 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004274 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004275 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004276#if defined(TARGET_WORDS_BIGENDIAN)
4277 if (endian == DEVICE_LITTLE_ENDIAN) {
4278 val = bswap32(val);
4279 }
4280#else
4281 if (endian == DEVICE_BIG_ENDIAN) {
4282 val = bswap32(val);
4283 }
4284#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004285 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004286 } else {
4287 unsigned long addr1;
4288 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4289 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004290 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004291 switch (endian) {
4292 case DEVICE_LITTLE_ENDIAN:
4293 stl_le_p(ptr, val);
4294 break;
4295 case DEVICE_BIG_ENDIAN:
4296 stl_be_p(ptr, val);
4297 break;
4298 default:
4299 stl_p(ptr, val);
4300 break;
4301 }
bellard3a7d9292005-08-21 09:26:42 +00004302 if (!cpu_physical_memory_is_dirty(addr1)) {
4303 /* invalidate code */
4304 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4305 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004306 cpu_physical_memory_set_dirty_flags(addr1,
4307 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004308 }
bellard8df1cd02005-01-28 22:37:22 +00004309 }
4310}
4311
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004312void stl_phys(target_phys_addr_t addr, uint32_t val)
4313{
4314 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4315}
4316
4317void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4318{
4319 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4320}
4321
4322void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4323{
4324 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4325}
4326
bellardaab33092005-10-30 20:48:42 +00004327/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004328void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004329{
4330 uint8_t v = val;
4331 cpu_physical_memory_write(addr, &v, 1);
4332}
4333
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004334/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004335static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4336 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004337{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004338 int io_index;
4339 uint8_t *ptr;
4340 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004341 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004342
4343 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004344 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004345
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004346 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004347 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004348 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004349#if defined(TARGET_WORDS_BIGENDIAN)
4350 if (endian == DEVICE_LITTLE_ENDIAN) {
4351 val = bswap16(val);
4352 }
4353#else
4354 if (endian == DEVICE_BIG_ENDIAN) {
4355 val = bswap16(val);
4356 }
4357#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004358 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004359 } else {
4360 unsigned long addr1;
4361 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4362 /* RAM case */
4363 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004364 switch (endian) {
4365 case DEVICE_LITTLE_ENDIAN:
4366 stw_le_p(ptr, val);
4367 break;
4368 case DEVICE_BIG_ENDIAN:
4369 stw_be_p(ptr, val);
4370 break;
4371 default:
4372 stw_p(ptr, val);
4373 break;
4374 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004375 if (!cpu_physical_memory_is_dirty(addr1)) {
4376 /* invalidate code */
4377 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4378 /* set dirty bit */
4379 cpu_physical_memory_set_dirty_flags(addr1,
4380 (0xff & ~CODE_DIRTY_FLAG));
4381 }
4382 }
bellardaab33092005-10-30 20:48:42 +00004383}
4384
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004385void stw_phys(target_phys_addr_t addr, uint32_t val)
4386{
4387 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4388}
4389
4390void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4391{
4392 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4393}
4394
4395void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4396{
4397 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4398}
4399
bellardaab33092005-10-30 20:48:42 +00004400/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004401void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004402{
4403 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004404 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004405}
4406
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004407void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4408{
4409 val = cpu_to_le64(val);
4410 cpu_physical_memory_write(addr, &val, 8);
4411}
4412
4413void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4414{
4415 val = cpu_to_be64(val);
4416 cpu_physical_memory_write(addr, &val, 8);
4417}
4418
aliguori5e2972f2009-03-28 17:51:36 +00004419/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004420int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004421 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004422{
4423 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004424 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004425 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004426
4427 while (len > 0) {
4428 page = addr & TARGET_PAGE_MASK;
4429 phys_addr = cpu_get_phys_page_debug(env, page);
4430 /* if no physical page mapped, return an error */
4431 if (phys_addr == -1)
4432 return -1;
4433 l = (page + TARGET_PAGE_SIZE) - addr;
4434 if (l > len)
4435 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004436 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004437 if (is_write)
4438 cpu_physical_memory_write_rom(phys_addr, buf, l);
4439 else
aliguori5e2972f2009-03-28 17:51:36 +00004440 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004441 len -= l;
4442 buf += l;
4443 addr += l;
4444 }
4445 return 0;
4446}
Paul Brooka68fe892010-03-01 00:08:59 +00004447#endif
bellard13eb76e2004-01-24 15:23:36 +00004448
pbrook2e70f6e2008-06-29 01:03:05 +00004449/* in deterministic execution mode, instructions doing device I/Os
4450 must be at the end of the TB */
4451void cpu_io_recompile(CPUState *env, void *retaddr)
4452{
4453 TranslationBlock *tb;
4454 uint32_t n, cflags;
4455 target_ulong pc, cs_base;
4456 uint64_t flags;
4457
4458 tb = tb_find_pc((unsigned long)retaddr);
4459 if (!tb) {
4460 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4461 retaddr);
4462 }
4463 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004464 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004465 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004466 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004467 n = n - env->icount_decr.u16.low;
4468 /* Generate a new TB ending on the I/O insn. */
4469 n++;
4470 /* On MIPS and SH, delay slot instructions can only be restarted if
4471 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004472 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004473 branch. */
4474#if defined(TARGET_MIPS)
4475 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4476 env->active_tc.PC -= 4;
4477 env->icount_decr.u16.low++;
4478 env->hflags &= ~MIPS_HFLAG_BMASK;
4479 }
4480#elif defined(TARGET_SH4)
4481 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4482 && n > 1) {
4483 env->pc -= 2;
4484 env->icount_decr.u16.low++;
4485 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4486 }
4487#endif
4488 /* This should never happen. */
4489 if (n > CF_COUNT_MASK)
4490 cpu_abort(env, "TB too big during recompile");
4491
4492 cflags = n | CF_LAST_IO;
4493 pc = tb->pc;
4494 cs_base = tb->cs_base;
4495 flags = tb->flags;
4496 tb_phys_invalidate(tb, -1);
4497 /* FIXME: In theory this could raise an exception. In practice
4498 we have already translated the block once so it's probably ok. */
4499 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004500 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004501 the first in the TB) then we end up generating a whole new TB and
4502 repeating the fault, which is horribly inefficient.
4503 Better would be to execute just this insn uncached, or generate a
4504 second new TB. */
4505 cpu_resume_from_signal(env, NULL);
4506}
4507
Paul Brookb3755a92010-03-12 16:54:58 +00004508#if !defined(CONFIG_USER_ONLY)
4509
Stefan Weil055403b2010-10-22 23:03:32 +02004510void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004511{
4512 int i, target_code_size, max_target_code_size;
4513 int direct_jmp_count, direct_jmp2_count, cross_page;
4514 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004515
bellarde3db7222005-01-26 22:00:47 +00004516 target_code_size = 0;
4517 max_target_code_size = 0;
4518 cross_page = 0;
4519 direct_jmp_count = 0;
4520 direct_jmp2_count = 0;
4521 for(i = 0; i < nb_tbs; i++) {
4522 tb = &tbs[i];
4523 target_code_size += tb->size;
4524 if (tb->size > max_target_code_size)
4525 max_target_code_size = tb->size;
4526 if (tb->page_addr[1] != -1)
4527 cross_page++;
4528 if (tb->tb_next_offset[0] != 0xffff) {
4529 direct_jmp_count++;
4530 if (tb->tb_next_offset[1] != 0xffff) {
4531 direct_jmp2_count++;
4532 }
4533 }
4534 }
4535 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004536 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004537 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004538 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4539 cpu_fprintf(f, "TB count %d/%d\n",
4540 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004541 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004542 nb_tbs ? target_code_size / nb_tbs : 0,
4543 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004544 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004545 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4546 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004547 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4548 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004549 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4550 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004551 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004552 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4553 direct_jmp2_count,
4554 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004555 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004556 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4557 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4558 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004559 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004560}
4561
Avi Kivityd39e8222012-01-01 23:35:10 +02004562/* NOTE: this function can trigger an exception */
4563/* NOTE2: the returned address is not exactly the physical address: it
4564 is the offset relative to phys_ram_base */
4565tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4566{
4567 int mmu_idx, page_index, pd;
4568 void *p;
4569
4570 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4571 mmu_idx = cpu_mmu_index(env1);
4572 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4573 (addr & TARGET_PAGE_MASK))) {
4574 ldub_code(addr);
4575 }
4576 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004577 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
Avi Kivity75c578d2012-01-02 15:40:52 +02004578 && !is_romd(pd)) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004579#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4580 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4581#else
4582 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4583#endif
4584 }
4585 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4586 return qemu_ram_addr_from_host_nofail(p);
4587}
4588
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004589/*
4590 * A helper function for the _utterly broken_ virtio device model to find out if
4591 * it's running on a big endian machine. Don't do this at home kids!
4592 */
4593bool virtio_is_big_endian(void);
4594bool virtio_is_big_endian(void)
4595{
4596#if defined(TARGET_WORDS_BIGENDIAN)
4597 return true;
4598#else
4599 return false;
4600#endif
4601}
4602
bellard61382a52003-10-27 21:22:23 +00004603#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004604#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004605#define GETPC() NULL
4606#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004607#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004608
4609#define SHIFT 0
4610#include "softmmu_template.h"
4611
4612#define SHIFT 1
4613#include "softmmu_template.h"
4614
4615#define SHIFT 2
4616#include "softmmu_template.h"
4617
4618#define SHIFT 3
4619#include "softmmu_template.h"
4620
4621#undef env
4622
4623#endif