blob: 38ef72ba17e07b9ed670367da9835bea07f5de1a [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
122
pbrooke2eef172008-06-08 01:09:01 +0000123#endif
bellard9fa3e852004-01-04 18:06:42 +0000124
bellard6a00d602005-11-21 23:25:50 +0000125CPUState *first_cpu;
126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100128DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000129/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000130 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000133
bellard54936002003-05-13 00:25:15 +0000134typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000135 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000136 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
bellard54936002003-05-13 00:25:15 +0000144} PageDesc;
145
Paul Brook41c1b1c2010-03-12 16:54:58 +0000146/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000153#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000154#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000156#endif
bellard54936002003-05-13 00:25:15 +0000157
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000160#define L2_SIZE (1 << L2_BITS)
161
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800162/* The bits remaining after N lower levels of page tables. */
163#define P_L1_BITS_REM \
164 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
165#define V_L1_BITS_REM \
166 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
167
168/* Size of the L1 page table. Avoid silly small sizes. */
169#if P_L1_BITS_REM < 4
170#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
171#else
172#define P_L1_BITS P_L1_BITS_REM
173#endif
174
175#if V_L1_BITS_REM < 4
176#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
177#else
178#define V_L1_BITS V_L1_BITS_REM
179#endif
180
181#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
182#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
183
184#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
185#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
186
bellard83fb7ad2004-07-05 21:25:26 +0000187unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000188unsigned long qemu_host_page_size;
189unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000190
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800191/* This is a multi-level map on the virtual address space.
192 The bottom level has pointers to PageDesc. */
193static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000194
pbrooke2eef172008-06-08 01:09:01 +0000195#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000196typedef struct PhysPageDesc {
197 /* offset in host memory of the page + io_index in the low bits */
198 ram_addr_t phys_offset;
199 ram_addr_t region_offset;
200} PhysPageDesc;
201
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800202/* This is a multi-level map on the physical address space.
203 The bottom level has pointers to PhysPageDesc. */
204static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000205
pbrooke2eef172008-06-08 01:09:01 +0000206static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300207static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000208
bellard33417e72003-08-10 21:47:01 +0000209/* io memory support */
Avi Kivityacbbec52011-11-21 12:27:03 +0200210CPUWriteMemoryFunc *_io_mem_write[IO_MEM_NB_ENTRIES][4];
211CPUReadMemoryFunc *_io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000212void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000213static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000214static int io_mem_watch;
215#endif
bellard33417e72003-08-10 21:47:01 +0000216
bellard34865132003-10-05 14:28:56 +0000217/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200218#ifdef WIN32
219static const char *logfilename = "qemu.log";
220#else
blueswir1d9b630f2008-10-05 09:57:08 +0000221static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200222#endif
bellard34865132003-10-05 14:28:56 +0000223FILE *logfile;
224int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000225static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000226
bellarde3db7222005-01-26 22:00:47 +0000227/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000228#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000229static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000230#endif
bellarde3db7222005-01-26 22:00:47 +0000231static int tb_flush_count;
232static int tb_phys_invalidate_count;
233
bellard7cb69ca2008-05-10 10:55:51 +0000234#ifdef _WIN32
235static void map_exec(void *addr, long size)
236{
237 DWORD old_protect;
238 VirtualProtect(addr, size,
239 PAGE_EXECUTE_READWRITE, &old_protect);
240
241}
242#else
243static void map_exec(void *addr, long size)
244{
bellard43694152008-05-29 09:35:57 +0000245 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000246
bellard43694152008-05-29 09:35:57 +0000247 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000248 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000249 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000250
251 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000252 end += page_size - 1;
253 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000254
255 mprotect((void *)start, end - start,
256 PROT_READ | PROT_WRITE | PROT_EXEC);
257}
258#endif
259
bellardb346ff42003-06-15 20:05:50 +0000260static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000261{
bellard83fb7ad2004-07-05 21:25:26 +0000262 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000263 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000264#ifdef _WIN32
265 {
266 SYSTEM_INFO system_info;
267
268 GetSystemInfo(&system_info);
269 qemu_real_host_page_size = system_info.dwPageSize;
270 }
271#else
272 qemu_real_host_page_size = getpagesize();
273#endif
bellard83fb7ad2004-07-05 21:25:26 +0000274 if (qemu_host_page_size == 0)
275 qemu_host_page_size = qemu_real_host_page_size;
276 if (qemu_host_page_size < TARGET_PAGE_SIZE)
277 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000278 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000279
Paul Brook2e9a5712010-05-05 16:32:59 +0100280#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000281 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100282#ifdef HAVE_KINFO_GETVMMAP
283 struct kinfo_vmentry *freep;
284 int i, cnt;
285
286 freep = kinfo_getvmmap(getpid(), &cnt);
287 if (freep) {
288 mmap_lock();
289 for (i = 0; i < cnt; i++) {
290 unsigned long startaddr, endaddr;
291
292 startaddr = freep[i].kve_start;
293 endaddr = freep[i].kve_end;
294 if (h2g_valid(startaddr)) {
295 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
296
297 if (h2g_valid(endaddr)) {
298 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200299 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100300 } else {
301#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
302 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200303 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100304#endif
305 }
306 }
307 }
308 free(freep);
309 mmap_unlock();
310 }
311#else
balrog50a95692007-12-12 01:16:23 +0000312 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000313
pbrook07765902008-05-31 16:33:53 +0000314 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800315
Aurelien Jarnofd436902010-04-10 17:20:36 +0200316 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000317 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800318 mmap_lock();
319
balrog50a95692007-12-12 01:16:23 +0000320 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800321 unsigned long startaddr, endaddr;
322 int n;
323
324 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
325
326 if (n == 2 && h2g_valid(startaddr)) {
327 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
328
329 if (h2g_valid(endaddr)) {
330 endaddr = h2g(endaddr);
331 } else {
332 endaddr = ~0ul;
333 }
334 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000335 }
336 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800337
balrog50a95692007-12-12 01:16:23 +0000338 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800339 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000340 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100341#endif
balrog50a95692007-12-12 01:16:23 +0000342 }
343#endif
bellard54936002003-05-13 00:25:15 +0000344}
345
Paul Brook41c1b1c2010-03-12 16:54:58 +0000346static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000347{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000348 PageDesc *pd;
349 void **lp;
350 int i;
351
pbrook17e23772008-06-09 13:47:45 +0000352#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500353 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800354# define ALLOC(P, SIZE) \
355 do { \
356 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
357 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800358 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000359#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800360# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500361 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000362#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800363
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800364 /* Level 1. Always allocated. */
365 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
366
367 /* Level 2..N-1. */
368 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
369 void **p = *lp;
370
371 if (p == NULL) {
372 if (!alloc) {
373 return NULL;
374 }
375 ALLOC(p, sizeof(void *) * L2_SIZE);
376 *lp = p;
377 }
378
379 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000380 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800381
382 pd = *lp;
383 if (pd == NULL) {
384 if (!alloc) {
385 return NULL;
386 }
387 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
388 *lp = pd;
389 }
390
391#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800392
393 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000394}
395
Paul Brook41c1b1c2010-03-12 16:54:58 +0000396static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000397{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800398 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000399}
400
Paul Brook6d9a1302010-02-28 23:55:53 +0000401#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500402static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000403{
pbrooke3f4e2a2006-04-08 20:02:06 +0000404 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800405 void **lp;
406 int i;
bellard92e873b2004-05-21 14:52:29 +0000407
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800408 /* Level 1. Always allocated. */
409 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000410
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800411 /* Level 2..N-1. */
412 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
413 void **p = *lp;
414 if (p == NULL) {
415 if (!alloc) {
416 return NULL;
417 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500418 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800419 }
420 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000421 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800422
pbrooke3f4e2a2006-04-08 20:02:06 +0000423 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800424 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000425 int i;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200426 int first_index = index & ~(L2_SIZE - 1);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800427
428 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000429 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800430 }
431
Anthony Liguori7267c092011-08-20 22:09:37 -0500432 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800433
pbrook67c4d232009-02-23 13:16:07 +0000434 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200435 pd[i].phys_offset = io_mem_unassigned.ram_addr;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200436 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000437 }
bellard92e873b2004-05-21 14:52:29 +0000438 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800439
440 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000441}
442
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200443static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000444{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200445 PhysPageDesc *p = phys_page_find_alloc(index, 0);
446
447 if (p) {
448 return *p;
449 } else {
450 return (PhysPageDesc) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200451 .phys_offset = io_mem_unassigned.ram_addr,
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200452 .region_offset = index << TARGET_PAGE_BITS,
453 };
454 }
bellard92e873b2004-05-21 14:52:29 +0000455}
456
Anthony Liguoric227f092009-10-01 16:12:16 -0500457static void tlb_protect_code(ram_addr_t ram_addr);
458static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000459 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000460#define mmap_lock() do { } while(0)
461#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000462#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000463
bellard43694152008-05-29 09:35:57 +0000464#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
465
466#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100467/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000468 user mode. It will change when a dedicated libc will be used */
469#define USE_STATIC_CODE_GEN_BUFFER
470#endif
471
472#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200473static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
474 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000475#endif
476
blueswir18fcd3692008-08-17 20:26:25 +0000477static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000478{
bellard43694152008-05-29 09:35:57 +0000479#ifdef USE_STATIC_CODE_GEN_BUFFER
480 code_gen_buffer = static_code_gen_buffer;
481 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
482 map_exec(code_gen_buffer, code_gen_buffer_size);
483#else
bellard26a5f132008-05-28 12:30:31 +0000484 code_gen_buffer_size = tb_size;
485 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000486#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000487 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
488#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100489 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000490 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000491#endif
bellard26a5f132008-05-28 12:30:31 +0000492 }
493 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
494 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
495 /* The code gen buffer location may have constraints depending on
496 the host cpu and OS */
497#if defined(__linux__)
498 {
499 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000500 void *start = NULL;
501
bellard26a5f132008-05-28 12:30:31 +0000502 flags = MAP_PRIVATE | MAP_ANONYMOUS;
503#if defined(__x86_64__)
504 flags |= MAP_32BIT;
505 /* Cannot map more than that */
506 if (code_gen_buffer_size > (800 * 1024 * 1024))
507 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000508#elif defined(__sparc_v9__)
509 // Map the buffer below 2G, so we can use direct calls and branches
510 flags |= MAP_FIXED;
511 start = (void *) 0x60000000UL;
512 if (code_gen_buffer_size > (512 * 1024 * 1024))
513 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000514#elif defined(__arm__)
Dr. David Alan Gilbert222f23f2011-12-12 16:37:31 +0100515 /* Keep the buffer no bigger than 16GB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000516 if (code_gen_buffer_size > 16 * 1024 * 1024)
517 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700518#elif defined(__s390x__)
519 /* Map the buffer so that we can use direct calls and branches. */
520 /* We have a +- 4GB range on the branches; leave some slop. */
521 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
522 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
523 }
524 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000525#endif
blueswir1141ac462008-07-26 15:05:57 +0000526 code_gen_buffer = mmap(start, code_gen_buffer_size,
527 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000528 flags, -1, 0);
529 if (code_gen_buffer == MAP_FAILED) {
530 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
531 exit(1);
532 }
533 }
Bradcbb608a2010-12-20 21:25:40 -0500534#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000535 || defined(__DragonFly__) || defined(__OpenBSD__) \
536 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000537 {
538 int flags;
539 void *addr = NULL;
540 flags = MAP_PRIVATE | MAP_ANONYMOUS;
541#if defined(__x86_64__)
542 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
543 * 0x40000000 is free */
544 flags |= MAP_FIXED;
545 addr = (void *)0x40000000;
546 /* Cannot map more than that */
547 if (code_gen_buffer_size > (800 * 1024 * 1024))
548 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000549#elif defined(__sparc_v9__)
550 // Map the buffer below 2G, so we can use direct calls and branches
551 flags |= MAP_FIXED;
552 addr = (void *) 0x60000000UL;
553 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
554 code_gen_buffer_size = (512 * 1024 * 1024);
555 }
aliguori06e67a82008-09-27 15:32:41 +0000556#endif
557 code_gen_buffer = mmap(addr, code_gen_buffer_size,
558 PROT_WRITE | PROT_READ | PROT_EXEC,
559 flags, -1, 0);
560 if (code_gen_buffer == MAP_FAILED) {
561 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
562 exit(1);
563 }
564 }
bellard26a5f132008-05-28 12:30:31 +0000565#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500566 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000567 map_exec(code_gen_buffer, code_gen_buffer_size);
568#endif
bellard43694152008-05-29 09:35:57 +0000569#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000570 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100571 code_gen_buffer_max_size = code_gen_buffer_size -
572 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000573 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500574 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000575}
576
577/* Must be called before using the QEMU cpus. 'tb_size' is the size
578 (in bytes) allocated to the translation buffer. Zero means default
579 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200580void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000581{
bellard26a5f132008-05-28 12:30:31 +0000582 cpu_gen_init();
583 code_gen_alloc(tb_size);
584 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000585 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700586#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
587 /* There's no guest base to take into account, so go ahead and
588 initialize the prologue now. */
589 tcg_prologue_init(&tcg_ctx);
590#endif
bellard26a5f132008-05-28 12:30:31 +0000591}
592
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200593bool tcg_enabled(void)
594{
595 return code_gen_buffer != NULL;
596}
597
598void cpu_exec_init_all(void)
599{
600#if !defined(CONFIG_USER_ONLY)
601 memory_map_init();
602 io_mem_init();
603#endif
604}
605
pbrook9656f322008-07-01 20:01:19 +0000606#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
607
Juan Quintelae59fb372009-09-29 22:48:21 +0200608static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200609{
610 CPUState *env = opaque;
611
aurel323098dba2009-03-07 21:28:24 +0000612 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
613 version_id is increased. */
614 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000615 tlb_flush(env, 1);
616
617 return 0;
618}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200619
620static const VMStateDescription vmstate_cpu_common = {
621 .name = "cpu_common",
622 .version_id = 1,
623 .minimum_version_id = 1,
624 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200625 .post_load = cpu_common_post_load,
626 .fields = (VMStateField []) {
627 VMSTATE_UINT32(halted, CPUState),
628 VMSTATE_UINT32(interrupt_request, CPUState),
629 VMSTATE_END_OF_LIST()
630 }
631};
pbrook9656f322008-07-01 20:01:19 +0000632#endif
633
Glauber Costa950f1472009-06-09 12:15:18 -0400634CPUState *qemu_get_cpu(int cpu)
635{
636 CPUState *env = first_cpu;
637
638 while (env) {
639 if (env->cpu_index == cpu)
640 break;
641 env = env->next_cpu;
642 }
643
644 return env;
645}
646
bellard6a00d602005-11-21 23:25:50 +0000647void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000648{
bellard6a00d602005-11-21 23:25:50 +0000649 CPUState **penv;
650 int cpu_index;
651
pbrookc2764712009-03-07 15:24:59 +0000652#if defined(CONFIG_USER_ONLY)
653 cpu_list_lock();
654#endif
bellard6a00d602005-11-21 23:25:50 +0000655 env->next_cpu = NULL;
656 penv = &first_cpu;
657 cpu_index = 0;
658 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700659 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000660 cpu_index++;
661 }
662 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000663 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000664 QTAILQ_INIT(&env->breakpoints);
665 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100666#ifndef CONFIG_USER_ONLY
667 env->thread_id = qemu_get_thread_id();
668#endif
bellard6a00d602005-11-21 23:25:50 +0000669 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000670#if defined(CONFIG_USER_ONLY)
671 cpu_list_unlock();
672#endif
pbrookb3c77242008-06-30 16:31:04 +0000673#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600674 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
675 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000676 cpu_save, cpu_load, env);
677#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000678}
679
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100680/* Allocate a new translation block. Flush the translation buffer if
681 too many translation blocks or too much generated code. */
682static TranslationBlock *tb_alloc(target_ulong pc)
683{
684 TranslationBlock *tb;
685
686 if (nb_tbs >= code_gen_max_blocks ||
687 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
688 return NULL;
689 tb = &tbs[nb_tbs++];
690 tb->pc = pc;
691 tb->cflags = 0;
692 return tb;
693}
694
695void tb_free(TranslationBlock *tb)
696{
697 /* In practice this is mostly used for single use temporary TB
698 Ignore the hard cases and just back up if this TB happens to
699 be the last one generated. */
700 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
701 code_gen_ptr = tb->tc_ptr;
702 nb_tbs--;
703 }
704}
705
bellard9fa3e852004-01-04 18:06:42 +0000706static inline void invalidate_page_bitmap(PageDesc *p)
707{
708 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500709 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000710 p->code_bitmap = NULL;
711 }
712 p->code_write_count = 0;
713}
714
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800715/* Set to NULL all the 'first_tb' fields in all PageDescs. */
716
717static void page_flush_tb_1 (int level, void **lp)
718{
719 int i;
720
721 if (*lp == NULL) {
722 return;
723 }
724 if (level == 0) {
725 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000726 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800727 pd[i].first_tb = NULL;
728 invalidate_page_bitmap(pd + i);
729 }
730 } else {
731 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000732 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800733 page_flush_tb_1 (level - 1, pp + i);
734 }
735 }
736}
737
bellardfd6ce8f2003-05-14 19:00:11 +0000738static void page_flush_tb(void)
739{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800740 int i;
741 for (i = 0; i < V_L1_SIZE; i++) {
742 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000743 }
744}
745
746/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000747/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000748void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000749{
bellard6a00d602005-11-21 23:25:50 +0000750 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000751#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000752 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
753 (unsigned long)(code_gen_ptr - code_gen_buffer),
754 nb_tbs, nb_tbs > 0 ?
755 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000756#endif
bellard26a5f132008-05-28 12:30:31 +0000757 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000758 cpu_abort(env1, "Internal error: code buffer overflow\n");
759
bellardfd6ce8f2003-05-14 19:00:11 +0000760 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000761
bellard6a00d602005-11-21 23:25:50 +0000762 for(env = first_cpu; env != NULL; env = env->next_cpu) {
763 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
764 }
bellard9fa3e852004-01-04 18:06:42 +0000765
bellard8a8a6082004-10-03 13:36:49 +0000766 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000767 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000768
bellardfd6ce8f2003-05-14 19:00:11 +0000769 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000770 /* XXX: flush processor icache at this point if cache flush is
771 expensive */
bellarde3db7222005-01-26 22:00:47 +0000772 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000773}
774
775#ifdef DEBUG_TB_CHECK
776
j_mayerbc98a7e2007-04-04 07:55:12 +0000777static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000778{
779 TranslationBlock *tb;
780 int i;
781 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000782 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
783 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000784 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
785 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000786 printf("ERROR invalidate: address=" TARGET_FMT_lx
787 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000788 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000789 }
790 }
791 }
792}
793
794/* verify that all the pages have correct rights for code */
795static void tb_page_check(void)
796{
797 TranslationBlock *tb;
798 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000799
pbrook99773bd2006-04-16 15:14:59 +0000800 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
801 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000802 flags1 = page_get_flags(tb->pc);
803 flags2 = page_get_flags(tb->pc + tb->size - 1);
804 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
805 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000806 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000807 }
808 }
809 }
810}
811
812#endif
813
814/* invalidate one TB */
815static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
816 int next_offset)
817{
818 TranslationBlock *tb1;
819 for(;;) {
820 tb1 = *ptb;
821 if (tb1 == tb) {
822 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
823 break;
824 }
825 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
826 }
827}
828
bellard9fa3e852004-01-04 18:06:42 +0000829static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
830{
831 TranslationBlock *tb1;
832 unsigned int n1;
833
834 for(;;) {
835 tb1 = *ptb;
836 n1 = (long)tb1 & 3;
837 tb1 = (TranslationBlock *)((long)tb1 & ~3);
838 if (tb1 == tb) {
839 *ptb = tb1->page_next[n1];
840 break;
841 }
842 ptb = &tb1->page_next[n1];
843 }
844}
845
bellardd4e81642003-05-25 16:46:15 +0000846static inline void tb_jmp_remove(TranslationBlock *tb, int n)
847{
848 TranslationBlock *tb1, **ptb;
849 unsigned int n1;
850
851 ptb = &tb->jmp_next[n];
852 tb1 = *ptb;
853 if (tb1) {
854 /* find tb(n) in circular list */
855 for(;;) {
856 tb1 = *ptb;
857 n1 = (long)tb1 & 3;
858 tb1 = (TranslationBlock *)((long)tb1 & ~3);
859 if (n1 == n && tb1 == tb)
860 break;
861 if (n1 == 2) {
862 ptb = &tb1->jmp_first;
863 } else {
864 ptb = &tb1->jmp_next[n1];
865 }
866 }
867 /* now we can suppress tb(n) from the list */
868 *ptb = tb->jmp_next[n];
869
870 tb->jmp_next[n] = NULL;
871 }
872}
873
874/* reset the jump entry 'n' of a TB so that it is not chained to
875 another TB */
876static inline void tb_reset_jump(TranslationBlock *tb, int n)
877{
878 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
879}
880
Paul Brook41c1b1c2010-03-12 16:54:58 +0000881void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000882{
bellard6a00d602005-11-21 23:25:50 +0000883 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000884 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000885 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000886 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000887 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000888
bellard9fa3e852004-01-04 18:06:42 +0000889 /* remove the TB from the hash list */
890 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
891 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000892 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000893 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000894
bellard9fa3e852004-01-04 18:06:42 +0000895 /* remove the TB from the page list */
896 if (tb->page_addr[0] != page_addr) {
897 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
898 tb_page_remove(&p->first_tb, tb);
899 invalidate_page_bitmap(p);
900 }
901 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
902 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
903 tb_page_remove(&p->first_tb, tb);
904 invalidate_page_bitmap(p);
905 }
906
bellard8a40a182005-11-20 10:35:40 +0000907 tb_invalidated_flag = 1;
908
909 /* remove the TB from the hash list */
910 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000911 for(env = first_cpu; env != NULL; env = env->next_cpu) {
912 if (env->tb_jmp_cache[h] == tb)
913 env->tb_jmp_cache[h] = NULL;
914 }
bellard8a40a182005-11-20 10:35:40 +0000915
916 /* suppress this TB from the two jump lists */
917 tb_jmp_remove(tb, 0);
918 tb_jmp_remove(tb, 1);
919
920 /* suppress any remaining jumps to this TB */
921 tb1 = tb->jmp_first;
922 for(;;) {
923 n1 = (long)tb1 & 3;
924 if (n1 == 2)
925 break;
926 tb1 = (TranslationBlock *)((long)tb1 & ~3);
927 tb2 = tb1->jmp_next[n1];
928 tb_reset_jump(tb1, n1);
929 tb1->jmp_next[n1] = NULL;
930 tb1 = tb2;
931 }
932 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
933
bellarde3db7222005-01-26 22:00:47 +0000934 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000935}
936
937static inline void set_bits(uint8_t *tab, int start, int len)
938{
939 int end, mask, end1;
940
941 end = start + len;
942 tab += start >> 3;
943 mask = 0xff << (start & 7);
944 if ((start & ~7) == (end & ~7)) {
945 if (start < end) {
946 mask &= ~(0xff << (end & 7));
947 *tab |= mask;
948 }
949 } else {
950 *tab++ |= mask;
951 start = (start + 8) & ~7;
952 end1 = end & ~7;
953 while (start < end1) {
954 *tab++ = 0xff;
955 start += 8;
956 }
957 if (start < end) {
958 mask = ~(0xff << (end & 7));
959 *tab |= mask;
960 }
961 }
962}
963
964static void build_page_bitmap(PageDesc *p)
965{
966 int n, tb_start, tb_end;
967 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000968
Anthony Liguori7267c092011-08-20 22:09:37 -0500969 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000970
971 tb = p->first_tb;
972 while (tb != NULL) {
973 n = (long)tb & 3;
974 tb = (TranslationBlock *)((long)tb & ~3);
975 /* NOTE: this is subtle as a TB may span two physical pages */
976 if (n == 0) {
977 /* NOTE: tb_end may be after the end of the page, but
978 it is not a problem */
979 tb_start = tb->pc & ~TARGET_PAGE_MASK;
980 tb_end = tb_start + tb->size;
981 if (tb_end > TARGET_PAGE_SIZE)
982 tb_end = TARGET_PAGE_SIZE;
983 } else {
984 tb_start = 0;
985 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
986 }
987 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
988 tb = tb->page_next[n];
989 }
990}
991
pbrook2e70f6e2008-06-29 01:03:05 +0000992TranslationBlock *tb_gen_code(CPUState *env,
993 target_ulong pc, target_ulong cs_base,
994 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000995{
996 TranslationBlock *tb;
997 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000998 tb_page_addr_t phys_pc, phys_page2;
999 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001000 int code_gen_size;
1001
Paul Brook41c1b1c2010-03-12 16:54:58 +00001002 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001003 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001004 if (!tb) {
1005 /* flush must be done */
1006 tb_flush(env);
1007 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001008 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001009 /* Don't forget to invalidate previous TB info. */
1010 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001011 }
1012 tc_ptr = code_gen_ptr;
1013 tb->tc_ptr = tc_ptr;
1014 tb->cs_base = cs_base;
1015 tb->flags = flags;
1016 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001017 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001018 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001019
bellardd720b932004-04-25 17:57:43 +00001020 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001021 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001022 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001023 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001024 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001025 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001026 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001027 return tb;
bellardd720b932004-04-25 17:57:43 +00001028}
ths3b46e622007-09-17 08:09:54 +00001029
bellard9fa3e852004-01-04 18:06:42 +00001030/* invalidate all TBs which intersect with the target physical page
1031 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001032 the same physical page. 'is_cpu_write_access' should be true if called
1033 from a real cpu write access: the virtual CPU will exit the current
1034 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001035void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001036 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001037{
aliguori6b917542008-11-18 19:46:41 +00001038 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001039 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001040 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001041 PageDesc *p;
1042 int n;
1043#ifdef TARGET_HAS_PRECISE_SMC
1044 int current_tb_not_found = is_cpu_write_access;
1045 TranslationBlock *current_tb = NULL;
1046 int current_tb_modified = 0;
1047 target_ulong current_pc = 0;
1048 target_ulong current_cs_base = 0;
1049 int current_flags = 0;
1050#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001051
1052 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001053 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001054 return;
ths5fafdf22007-09-16 21:08:06 +00001055 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001056 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1057 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001058 /* build code bitmap */
1059 build_page_bitmap(p);
1060 }
1061
1062 /* we remove all the TBs in the range [start, end[ */
1063 /* XXX: see if in some cases it could be faster to invalidate all the code */
1064 tb = p->first_tb;
1065 while (tb != NULL) {
1066 n = (long)tb & 3;
1067 tb = (TranslationBlock *)((long)tb & ~3);
1068 tb_next = tb->page_next[n];
1069 /* NOTE: this is subtle as a TB may span two physical pages */
1070 if (n == 0) {
1071 /* NOTE: tb_end may be after the end of the page, but
1072 it is not a problem */
1073 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1074 tb_end = tb_start + tb->size;
1075 } else {
1076 tb_start = tb->page_addr[1];
1077 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1078 }
1079 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001080#ifdef TARGET_HAS_PRECISE_SMC
1081 if (current_tb_not_found) {
1082 current_tb_not_found = 0;
1083 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001084 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001085 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001086 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001087 }
1088 }
1089 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001090 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001091 /* If we are modifying the current TB, we must stop
1092 its execution. We could be more precise by checking
1093 that the modification is after the current PC, but it
1094 would require a specialized function to partially
1095 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001096
bellardd720b932004-04-25 17:57:43 +00001097 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001098 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001099 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1100 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001101 }
1102#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001103 /* we need to do that to handle the case where a signal
1104 occurs while doing tb_phys_invalidate() */
1105 saved_tb = NULL;
1106 if (env) {
1107 saved_tb = env->current_tb;
1108 env->current_tb = NULL;
1109 }
bellard9fa3e852004-01-04 18:06:42 +00001110 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001111 if (env) {
1112 env->current_tb = saved_tb;
1113 if (env->interrupt_request && env->current_tb)
1114 cpu_interrupt(env, env->interrupt_request);
1115 }
bellard9fa3e852004-01-04 18:06:42 +00001116 }
1117 tb = tb_next;
1118 }
1119#if !defined(CONFIG_USER_ONLY)
1120 /* if no code remaining, no need to continue to use slow writes */
1121 if (!p->first_tb) {
1122 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001123 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001124 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001125 }
1126 }
1127#endif
1128#ifdef TARGET_HAS_PRECISE_SMC
1129 if (current_tb_modified) {
1130 /* we generate a block containing just the instruction
1131 modifying the memory. It will ensure that it cannot modify
1132 itself */
bellardea1c1802004-06-14 18:56:36 +00001133 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001134 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001135 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001136 }
1137#endif
1138}
1139
1140/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001141static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001142{
1143 PageDesc *p;
1144 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001145#if 0
bellarda4193c82004-06-03 14:01:43 +00001146 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001147 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1148 cpu_single_env->mem_io_vaddr, len,
1149 cpu_single_env->eip,
1150 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001151 }
1152#endif
bellard9fa3e852004-01-04 18:06:42 +00001153 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001154 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001155 return;
1156 if (p->code_bitmap) {
1157 offset = start & ~TARGET_PAGE_MASK;
1158 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1159 if (b & ((1 << len) - 1))
1160 goto do_invalidate;
1161 } else {
1162 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001163 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001164 }
1165}
1166
bellard9fa3e852004-01-04 18:06:42 +00001167#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001168static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001169 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001170{
aliguori6b917542008-11-18 19:46:41 +00001171 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001172 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001173 int n;
bellardd720b932004-04-25 17:57:43 +00001174#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001175 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001176 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001177 int current_tb_modified = 0;
1178 target_ulong current_pc = 0;
1179 target_ulong current_cs_base = 0;
1180 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001181#endif
bellard9fa3e852004-01-04 18:06:42 +00001182
1183 addr &= TARGET_PAGE_MASK;
1184 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001185 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001186 return;
1187 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001188#ifdef TARGET_HAS_PRECISE_SMC
1189 if (tb && pc != 0) {
1190 current_tb = tb_find_pc(pc);
1191 }
1192#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001193 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001194 n = (long)tb & 3;
1195 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001196#ifdef TARGET_HAS_PRECISE_SMC
1197 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001198 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001199 /* If we are modifying the current TB, we must stop
1200 its execution. We could be more precise by checking
1201 that the modification is after the current PC, but it
1202 would require a specialized function to partially
1203 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001204
bellardd720b932004-04-25 17:57:43 +00001205 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001206 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001207 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1208 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001209 }
1210#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001211 tb_phys_invalidate(tb, addr);
1212 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001213 }
1214 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001215#ifdef TARGET_HAS_PRECISE_SMC
1216 if (current_tb_modified) {
1217 /* we generate a block containing just the instruction
1218 modifying the memory. It will ensure that it cannot modify
1219 itself */
bellardea1c1802004-06-14 18:56:36 +00001220 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001221 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001222 cpu_resume_from_signal(env, puc);
1223 }
1224#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001225}
bellard9fa3e852004-01-04 18:06:42 +00001226#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001227
1228/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001229static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001230 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001231{
1232 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001233#ifndef CONFIG_USER_ONLY
1234 bool page_already_protected;
1235#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001236
bellard9fa3e852004-01-04 18:06:42 +00001237 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001238 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001239 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001240#ifndef CONFIG_USER_ONLY
1241 page_already_protected = p->first_tb != NULL;
1242#endif
bellard9fa3e852004-01-04 18:06:42 +00001243 p->first_tb = (TranslationBlock *)((long)tb | n);
1244 invalidate_page_bitmap(p);
1245
bellard107db442004-06-22 18:48:46 +00001246#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001247
bellard9fa3e852004-01-04 18:06:42 +00001248#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001249 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001250 target_ulong addr;
1251 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001252 int prot;
1253
bellardfd6ce8f2003-05-14 19:00:11 +00001254 /* force the host page as non writable (writes will have a
1255 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001256 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001257 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001258 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1259 addr += TARGET_PAGE_SIZE) {
1260
1261 p2 = page_find (addr >> TARGET_PAGE_BITS);
1262 if (!p2)
1263 continue;
1264 prot |= p2->flags;
1265 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001266 }
ths5fafdf22007-09-16 21:08:06 +00001267 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001268 (prot & PAGE_BITS) & ~PAGE_WRITE);
1269#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001270 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001271 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001272#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001273 }
bellard9fa3e852004-01-04 18:06:42 +00001274#else
1275 /* if some code is already present, then the pages are already
1276 protected. So we handle the case where only the first TB is
1277 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001278 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001279 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001280 }
1281#endif
bellardd720b932004-04-25 17:57:43 +00001282
1283#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001284}
1285
bellard9fa3e852004-01-04 18:06:42 +00001286/* add a new TB and link it to the physical page tables. phys_page2 is
1287 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001288void tb_link_page(TranslationBlock *tb,
1289 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001290{
bellard9fa3e852004-01-04 18:06:42 +00001291 unsigned int h;
1292 TranslationBlock **ptb;
1293
pbrookc8a706f2008-06-02 16:16:42 +00001294 /* Grab the mmap lock to stop another thread invalidating this TB
1295 before we are done. */
1296 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001297 /* add in the physical hash table */
1298 h = tb_phys_hash_func(phys_pc);
1299 ptb = &tb_phys_hash[h];
1300 tb->phys_hash_next = *ptb;
1301 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001302
1303 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001304 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1305 if (phys_page2 != -1)
1306 tb_alloc_page(tb, 1, phys_page2);
1307 else
1308 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001309
bellardd4e81642003-05-25 16:46:15 +00001310 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1311 tb->jmp_next[0] = NULL;
1312 tb->jmp_next[1] = NULL;
1313
1314 /* init original jump addresses */
1315 if (tb->tb_next_offset[0] != 0xffff)
1316 tb_reset_jump(tb, 0);
1317 if (tb->tb_next_offset[1] != 0xffff)
1318 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001319
1320#ifdef DEBUG_TB_CHECK
1321 tb_page_check();
1322#endif
pbrookc8a706f2008-06-02 16:16:42 +00001323 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001324}
1325
bellarda513fe12003-05-27 23:29:48 +00001326/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1327 tb[1].tc_ptr. Return NULL if not found */
1328TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1329{
1330 int m_min, m_max, m;
1331 unsigned long v;
1332 TranslationBlock *tb;
1333
1334 if (nb_tbs <= 0)
1335 return NULL;
1336 if (tc_ptr < (unsigned long)code_gen_buffer ||
1337 tc_ptr >= (unsigned long)code_gen_ptr)
1338 return NULL;
1339 /* binary search (cf Knuth) */
1340 m_min = 0;
1341 m_max = nb_tbs - 1;
1342 while (m_min <= m_max) {
1343 m = (m_min + m_max) >> 1;
1344 tb = &tbs[m];
1345 v = (unsigned long)tb->tc_ptr;
1346 if (v == tc_ptr)
1347 return tb;
1348 else if (tc_ptr < v) {
1349 m_max = m - 1;
1350 } else {
1351 m_min = m + 1;
1352 }
ths5fafdf22007-09-16 21:08:06 +00001353 }
bellarda513fe12003-05-27 23:29:48 +00001354 return &tbs[m_max];
1355}
bellard75012672003-06-21 13:11:07 +00001356
bellardea041c02003-06-25 16:16:50 +00001357static void tb_reset_jump_recursive(TranslationBlock *tb);
1358
1359static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1360{
1361 TranslationBlock *tb1, *tb_next, **ptb;
1362 unsigned int n1;
1363
1364 tb1 = tb->jmp_next[n];
1365 if (tb1 != NULL) {
1366 /* find head of list */
1367 for(;;) {
1368 n1 = (long)tb1 & 3;
1369 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1370 if (n1 == 2)
1371 break;
1372 tb1 = tb1->jmp_next[n1];
1373 }
1374 /* we are now sure now that tb jumps to tb1 */
1375 tb_next = tb1;
1376
1377 /* remove tb from the jmp_first list */
1378 ptb = &tb_next->jmp_first;
1379 for(;;) {
1380 tb1 = *ptb;
1381 n1 = (long)tb1 & 3;
1382 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1383 if (n1 == n && tb1 == tb)
1384 break;
1385 ptb = &tb1->jmp_next[n1];
1386 }
1387 *ptb = tb->jmp_next[n];
1388 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001389
bellardea041c02003-06-25 16:16:50 +00001390 /* suppress the jump to next tb in generated code */
1391 tb_reset_jump(tb, n);
1392
bellard01243112004-01-04 15:48:17 +00001393 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001394 tb_reset_jump_recursive(tb_next);
1395 }
1396}
1397
1398static void tb_reset_jump_recursive(TranslationBlock *tb)
1399{
1400 tb_reset_jump_recursive2(tb, 0);
1401 tb_reset_jump_recursive2(tb, 1);
1402}
1403
bellard1fddef42005-04-17 19:16:13 +00001404#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001405#if defined(CONFIG_USER_ONLY)
1406static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1407{
1408 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1409}
1410#else
bellardd720b932004-04-25 17:57:43 +00001411static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1412{
Anthony Liguoric227f092009-10-01 16:12:16 -05001413 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001414 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001415 ram_addr_t ram_addr;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001416 PhysPageDesc p;
bellardd720b932004-04-25 17:57:43 +00001417
pbrookc2f07f82006-04-08 17:14:56 +00001418 addr = cpu_get_phys_page_debug(env, pc);
1419 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001420 pd = p.phys_offset;
pbrookc2f07f82006-04-08 17:14:56 +00001421 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001422 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001423}
bellardc27004e2005-01-03 23:35:10 +00001424#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001425#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001426
Paul Brookc527ee82010-03-01 03:31:14 +00001427#if defined(CONFIG_USER_ONLY)
1428void cpu_watchpoint_remove_all(CPUState *env, int mask)
1429
1430{
1431}
1432
1433int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1434 int flags, CPUWatchpoint **watchpoint)
1435{
1436 return -ENOSYS;
1437}
1438#else
pbrook6658ffb2007-03-16 23:58:11 +00001439/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001440int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1441 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001442{
aliguorib4051332008-11-18 20:14:20 +00001443 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001444 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001445
aliguorib4051332008-11-18 20:14:20 +00001446 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1447 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1448 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1449 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1450 return -EINVAL;
1451 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001452 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001453
aliguoria1d1bb32008-11-18 20:07:32 +00001454 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001455 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001456 wp->flags = flags;
1457
aliguori2dc9f412008-11-18 20:56:59 +00001458 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001459 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001460 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001461 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001462 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001463
pbrook6658ffb2007-03-16 23:58:11 +00001464 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001465
1466 if (watchpoint)
1467 *watchpoint = wp;
1468 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001469}
1470
aliguoria1d1bb32008-11-18 20:07:32 +00001471/* Remove a specific watchpoint. */
1472int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1473 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001474{
aliguorib4051332008-11-18 20:14:20 +00001475 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001476 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001477
Blue Swirl72cf2d42009-09-12 07:36:22 +00001478 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001479 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001480 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001481 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001482 return 0;
1483 }
1484 }
aliguoria1d1bb32008-11-18 20:07:32 +00001485 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001486}
1487
aliguoria1d1bb32008-11-18 20:07:32 +00001488/* Remove a specific watchpoint by reference. */
1489void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1490{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001491 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001492
aliguoria1d1bb32008-11-18 20:07:32 +00001493 tlb_flush_page(env, watchpoint->vaddr);
1494
Anthony Liguori7267c092011-08-20 22:09:37 -05001495 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001496}
1497
aliguoria1d1bb32008-11-18 20:07:32 +00001498/* Remove all matching watchpoints. */
1499void cpu_watchpoint_remove_all(CPUState *env, int mask)
1500{
aliguoric0ce9982008-11-25 22:13:57 +00001501 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001502
Blue Swirl72cf2d42009-09-12 07:36:22 +00001503 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001504 if (wp->flags & mask)
1505 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001506 }
aliguoria1d1bb32008-11-18 20:07:32 +00001507}
Paul Brookc527ee82010-03-01 03:31:14 +00001508#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001509
1510/* Add a breakpoint. */
1511int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1512 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001513{
bellard1fddef42005-04-17 19:16:13 +00001514#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001515 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001516
Anthony Liguori7267c092011-08-20 22:09:37 -05001517 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001518
1519 bp->pc = pc;
1520 bp->flags = flags;
1521
aliguori2dc9f412008-11-18 20:56:59 +00001522 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001523 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001524 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001525 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001526 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001527
1528 breakpoint_invalidate(env, pc);
1529
1530 if (breakpoint)
1531 *breakpoint = bp;
1532 return 0;
1533#else
1534 return -ENOSYS;
1535#endif
1536}
1537
1538/* Remove a specific breakpoint. */
1539int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1540{
1541#if defined(TARGET_HAS_ICE)
1542 CPUBreakpoint *bp;
1543
Blue Swirl72cf2d42009-09-12 07:36:22 +00001544 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001545 if (bp->pc == pc && bp->flags == flags) {
1546 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001547 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001548 }
bellard4c3a88a2003-07-26 12:06:08 +00001549 }
aliguoria1d1bb32008-11-18 20:07:32 +00001550 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001551#else
aliguoria1d1bb32008-11-18 20:07:32 +00001552 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001553#endif
1554}
1555
aliguoria1d1bb32008-11-18 20:07:32 +00001556/* Remove a specific breakpoint by reference. */
1557void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001558{
bellard1fddef42005-04-17 19:16:13 +00001559#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001560 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001561
aliguoria1d1bb32008-11-18 20:07:32 +00001562 breakpoint_invalidate(env, breakpoint->pc);
1563
Anthony Liguori7267c092011-08-20 22:09:37 -05001564 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001565#endif
1566}
1567
1568/* Remove all matching breakpoints. */
1569void cpu_breakpoint_remove_all(CPUState *env, int mask)
1570{
1571#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001572 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001573
Blue Swirl72cf2d42009-09-12 07:36:22 +00001574 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001575 if (bp->flags & mask)
1576 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001577 }
bellard4c3a88a2003-07-26 12:06:08 +00001578#endif
1579}
1580
bellardc33a3462003-07-29 20:50:33 +00001581/* enable or disable single step mode. EXCP_DEBUG is returned by the
1582 CPU loop after each instruction */
1583void cpu_single_step(CPUState *env, int enabled)
1584{
bellard1fddef42005-04-17 19:16:13 +00001585#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001586 if (env->singlestep_enabled != enabled) {
1587 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001588 if (kvm_enabled())
1589 kvm_update_guest_debug(env, 0);
1590 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001591 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001592 /* XXX: only flush what is necessary */
1593 tb_flush(env);
1594 }
bellardc33a3462003-07-29 20:50:33 +00001595 }
1596#endif
1597}
1598
bellard34865132003-10-05 14:28:56 +00001599/* enable or disable low levels log */
1600void cpu_set_log(int log_flags)
1601{
1602 loglevel = log_flags;
1603 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001604 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001605 if (!logfile) {
1606 perror(logfilename);
1607 _exit(1);
1608 }
bellard9fa3e852004-01-04 18:06:42 +00001609#if !defined(CONFIG_SOFTMMU)
1610 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1611 {
blueswir1b55266b2008-09-20 08:07:15 +00001612 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001613 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1614 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001615#elif defined(_WIN32)
1616 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1617 setvbuf(logfile, NULL, _IONBF, 0);
1618#else
bellard34865132003-10-05 14:28:56 +00001619 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001620#endif
pbrooke735b912007-06-30 13:53:24 +00001621 log_append = 1;
1622 }
1623 if (!loglevel && logfile) {
1624 fclose(logfile);
1625 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001626 }
1627}
1628
1629void cpu_set_log_filename(const char *filename)
1630{
1631 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001632 if (logfile) {
1633 fclose(logfile);
1634 logfile = NULL;
1635 }
1636 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001637}
bellardc33a3462003-07-29 20:50:33 +00001638
aurel323098dba2009-03-07 21:28:24 +00001639static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001640{
pbrookd5975362008-06-07 20:50:51 +00001641 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1642 problem and hope the cpu will stop of its own accord. For userspace
1643 emulation this often isn't actually as bad as it sounds. Often
1644 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001645 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001646 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001647
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001648 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001649 tb = env->current_tb;
1650 /* if the cpu is currently executing code, we must unlink it and
1651 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001652 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001653 env->current_tb = NULL;
1654 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001655 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001656 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001657}
1658
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001659#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001660/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001661static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001662{
1663 int old_mask;
1664
1665 old_mask = env->interrupt_request;
1666 env->interrupt_request |= mask;
1667
aliguori8edac962009-04-24 18:03:45 +00001668 /*
1669 * If called from iothread context, wake the target cpu in
1670 * case its halted.
1671 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001672 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001673 qemu_cpu_kick(env);
1674 return;
1675 }
aliguori8edac962009-04-24 18:03:45 +00001676
pbrook2e70f6e2008-06-29 01:03:05 +00001677 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001678 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001679 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001680 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001681 cpu_abort(env, "Raised interrupt while not in I/O function");
1682 }
pbrook2e70f6e2008-06-29 01:03:05 +00001683 } else {
aurel323098dba2009-03-07 21:28:24 +00001684 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001685 }
1686}
1687
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001688CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1689
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001690#else /* CONFIG_USER_ONLY */
1691
1692void cpu_interrupt(CPUState *env, int mask)
1693{
1694 env->interrupt_request |= mask;
1695 cpu_unlink_tb(env);
1696}
1697#endif /* CONFIG_USER_ONLY */
1698
bellardb54ad042004-05-20 13:42:52 +00001699void cpu_reset_interrupt(CPUState *env, int mask)
1700{
1701 env->interrupt_request &= ~mask;
1702}
1703
aurel323098dba2009-03-07 21:28:24 +00001704void cpu_exit(CPUState *env)
1705{
1706 env->exit_request = 1;
1707 cpu_unlink_tb(env);
1708}
1709
blueswir1c7cd6a32008-10-02 18:27:46 +00001710const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001711 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001712 "show generated host assembly code for each compiled TB" },
1713 { CPU_LOG_TB_IN_ASM, "in_asm",
1714 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001715 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001716 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001717 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001718 "show micro ops "
1719#ifdef TARGET_I386
1720 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001721#endif
blueswir1e01a1152008-03-14 17:37:11 +00001722 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001723 { CPU_LOG_INT, "int",
1724 "show interrupts/exceptions in short format" },
1725 { CPU_LOG_EXEC, "exec",
1726 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001727 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001728 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001729#ifdef TARGET_I386
1730 { CPU_LOG_PCALL, "pcall",
1731 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001732 { CPU_LOG_RESET, "cpu_reset",
1733 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001734#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001735#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001736 { CPU_LOG_IOPORT, "ioport",
1737 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001738#endif
bellardf193c792004-03-21 17:06:25 +00001739 { 0, NULL, NULL },
1740};
1741
1742static int cmp1(const char *s1, int n, const char *s2)
1743{
1744 if (strlen(s2) != n)
1745 return 0;
1746 return memcmp(s1, s2, n) == 0;
1747}
ths3b46e622007-09-17 08:09:54 +00001748
bellardf193c792004-03-21 17:06:25 +00001749/* takes a comma separated list of log masks. Return 0 if error. */
1750int cpu_str_to_log_mask(const char *str)
1751{
blueswir1c7cd6a32008-10-02 18:27:46 +00001752 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001753 int mask;
1754 const char *p, *p1;
1755
1756 p = str;
1757 mask = 0;
1758 for(;;) {
1759 p1 = strchr(p, ',');
1760 if (!p1)
1761 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001762 if(cmp1(p,p1-p,"all")) {
1763 for(item = cpu_log_items; item->mask != 0; item++) {
1764 mask |= item->mask;
1765 }
1766 } else {
1767 for(item = cpu_log_items; item->mask != 0; item++) {
1768 if (cmp1(p, p1 - p, item->name))
1769 goto found;
1770 }
1771 return 0;
bellardf193c792004-03-21 17:06:25 +00001772 }
bellardf193c792004-03-21 17:06:25 +00001773 found:
1774 mask |= item->mask;
1775 if (*p1 != ',')
1776 break;
1777 p = p1 + 1;
1778 }
1779 return mask;
1780}
bellardea041c02003-06-25 16:16:50 +00001781
bellard75012672003-06-21 13:11:07 +00001782void cpu_abort(CPUState *env, const char *fmt, ...)
1783{
1784 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001785 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001786
1787 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001788 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001789 fprintf(stderr, "qemu: fatal: ");
1790 vfprintf(stderr, fmt, ap);
1791 fprintf(stderr, "\n");
1792#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001793 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1794#else
1795 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001796#endif
aliguori93fcfe32009-01-15 22:34:14 +00001797 if (qemu_log_enabled()) {
1798 qemu_log("qemu: fatal: ");
1799 qemu_log_vprintf(fmt, ap2);
1800 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001801#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001802 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001803#else
aliguori93fcfe32009-01-15 22:34:14 +00001804 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001805#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001806 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001807 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001808 }
pbrook493ae1f2007-11-23 16:53:59 +00001809 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001810 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001811#if defined(CONFIG_USER_ONLY)
1812 {
1813 struct sigaction act;
1814 sigfillset(&act.sa_mask);
1815 act.sa_handler = SIG_DFL;
1816 sigaction(SIGABRT, &act, NULL);
1817 }
1818#endif
bellard75012672003-06-21 13:11:07 +00001819 abort();
1820}
1821
thsc5be9f02007-02-28 20:20:53 +00001822CPUState *cpu_copy(CPUState *env)
1823{
ths01ba9812007-12-09 02:22:57 +00001824 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001825 CPUState *next_cpu = new_env->next_cpu;
1826 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001827#if defined(TARGET_HAS_ICE)
1828 CPUBreakpoint *bp;
1829 CPUWatchpoint *wp;
1830#endif
1831
thsc5be9f02007-02-28 20:20:53 +00001832 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001833
1834 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001835 new_env->next_cpu = next_cpu;
1836 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001837
1838 /* Clone all break/watchpoints.
1839 Note: Once we support ptrace with hw-debug register access, make sure
1840 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001841 QTAILQ_INIT(&env->breakpoints);
1842 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001843#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001844 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001845 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1846 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001847 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001848 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1849 wp->flags, NULL);
1850 }
1851#endif
1852
thsc5be9f02007-02-28 20:20:53 +00001853 return new_env;
1854}
1855
bellard01243112004-01-04 15:48:17 +00001856#if !defined(CONFIG_USER_ONLY)
1857
edgar_igl5c751e92008-05-06 08:44:21 +00001858static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1859{
1860 unsigned int i;
1861
1862 /* Discard jump cache entries for any tb which might potentially
1863 overlap the flushed page. */
1864 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1865 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001866 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001867
1868 i = tb_jmp_cache_hash_page(addr);
1869 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001870 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001871}
1872
Igor Kovalenko08738982009-07-12 02:15:40 +04001873static CPUTLBEntry s_cputlb_empty_entry = {
1874 .addr_read = -1,
1875 .addr_write = -1,
1876 .addr_code = -1,
1877 .addend = -1,
1878};
1879
bellardee8b7022004-02-03 23:35:10 +00001880/* NOTE: if flush_global is true, also flush global entries (not
1881 implemented yet) */
1882void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001883{
bellard33417e72003-08-10 21:47:01 +00001884 int i;
bellard01243112004-01-04 15:48:17 +00001885
bellard9fa3e852004-01-04 18:06:42 +00001886#if defined(DEBUG_TLB)
1887 printf("tlb_flush:\n");
1888#endif
bellard01243112004-01-04 15:48:17 +00001889 /* must reset current TB so that interrupts cannot modify the
1890 links while we are modifying them */
1891 env->current_tb = NULL;
1892
bellard33417e72003-08-10 21:47:01 +00001893 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001894 int mmu_idx;
1895 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001896 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001897 }
bellard33417e72003-08-10 21:47:01 +00001898 }
bellard9fa3e852004-01-04 18:06:42 +00001899
bellard8a40a182005-11-20 10:35:40 +00001900 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001901
Paul Brookd4c430a2010-03-17 02:14:28 +00001902 env->tlb_flush_addr = -1;
1903 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001904 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001905}
1906
bellard274da6b2004-05-20 21:56:27 +00001907static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001908{
ths5fafdf22007-09-16 21:08:06 +00001909 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001910 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001911 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001912 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001913 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001914 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001915 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001916 }
bellard61382a52003-10-27 21:22:23 +00001917}
1918
bellard2e126692004-04-25 21:28:44 +00001919void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001920{
bellard8a40a182005-11-20 10:35:40 +00001921 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001922 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001923
bellard9fa3e852004-01-04 18:06:42 +00001924#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001925 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001926#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001927 /* Check if we need to flush due to large pages. */
1928 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1929#if defined(DEBUG_TLB)
1930 printf("tlb_flush_page: forced full flush ("
1931 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1932 env->tlb_flush_addr, env->tlb_flush_mask);
1933#endif
1934 tlb_flush(env, 1);
1935 return;
1936 }
bellard01243112004-01-04 15:48:17 +00001937 /* must reset current TB so that interrupts cannot modify the
1938 links while we are modifying them */
1939 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001940
bellard61382a52003-10-27 21:22:23 +00001941 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001942 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001943 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1944 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001945
edgar_igl5c751e92008-05-06 08:44:21 +00001946 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001947}
1948
bellard9fa3e852004-01-04 18:06:42 +00001949/* update the TLBs so that writes to code in the virtual page 'addr'
1950 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001951static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001952{
ths5fafdf22007-09-16 21:08:06 +00001953 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001954 ram_addr + TARGET_PAGE_SIZE,
1955 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001956}
1957
bellard9fa3e852004-01-04 18:06:42 +00001958/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001959 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001960static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001961 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001962{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001963 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00001964}
1965
ths5fafdf22007-09-16 21:08:06 +00001966static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001967 unsigned long start, unsigned long length)
1968{
1969 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001970 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00001971 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001972 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001973 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001974 }
1975 }
1976}
1977
pbrook5579c7f2009-04-11 14:47:08 +00001978/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001979void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001980 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001981{
1982 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001983 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001984 int i;
bellard1ccde1c2004-02-06 19:46:14 +00001985
1986 start &= TARGET_PAGE_MASK;
1987 end = TARGET_PAGE_ALIGN(end);
1988
1989 length = end - start;
1990 if (length == 0)
1991 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001992 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001993
bellard1ccde1c2004-02-06 19:46:14 +00001994 /* we modify the TLB cache so that the dirty bit will be set again
1995 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001996 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001997 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001998 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001999 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002000 != (end - 1) - start) {
2001 abort();
2002 }
2003
bellard6a00d602005-11-21 23:25:50 +00002004 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002005 int mmu_idx;
2006 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2007 for(i = 0; i < CPU_TLB_SIZE; i++)
2008 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2009 start1, length);
2010 }
bellard6a00d602005-11-21 23:25:50 +00002011 }
bellard1ccde1c2004-02-06 19:46:14 +00002012}
2013
aliguori74576192008-10-06 14:02:03 +00002014int cpu_physical_memory_set_dirty_tracking(int enable)
2015{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002016 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002017 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002018 return ret;
aliguori74576192008-10-06 14:02:03 +00002019}
2020
bellard3a7d9292005-08-21 09:26:42 +00002021static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2022{
Anthony Liguoric227f092009-10-01 16:12:16 -05002023 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002024 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002025
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002026 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002027 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2028 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002029 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002030 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002031 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002032 }
2033 }
2034}
2035
2036/* update the TLB according to the current state of the dirty bits */
2037void cpu_tlb_update_dirty(CPUState *env)
2038{
2039 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002040 int mmu_idx;
2041 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2042 for(i = 0; i < CPU_TLB_SIZE; i++)
2043 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2044 }
bellard3a7d9292005-08-21 09:26:42 +00002045}
2046
pbrook0f459d12008-06-09 00:20:13 +00002047static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002048{
pbrook0f459d12008-06-09 00:20:13 +00002049 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2050 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002051}
2052
pbrook0f459d12008-06-09 00:20:13 +00002053/* update the TLB corresponding to virtual page vaddr
2054 so that it is no longer dirty */
2055static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002056{
bellard1ccde1c2004-02-06 19:46:14 +00002057 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002058 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002059
pbrook0f459d12008-06-09 00:20:13 +00002060 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002061 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002062 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2063 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002064}
2065
Paul Brookd4c430a2010-03-17 02:14:28 +00002066/* Our TLB does not support large pages, so remember the area covered by
2067 large pages and trigger a full TLB flush if these are invalidated. */
2068static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2069 target_ulong size)
2070{
2071 target_ulong mask = ~(size - 1);
2072
2073 if (env->tlb_flush_addr == (target_ulong)-1) {
2074 env->tlb_flush_addr = vaddr & mask;
2075 env->tlb_flush_mask = mask;
2076 return;
2077 }
2078 /* Extend the existing region to include the new page.
2079 This is a compromise between unnecessary flushes and the cost
2080 of maintaining a full variable size TLB. */
2081 mask &= env->tlb_flush_mask;
2082 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2083 mask <<= 1;
2084 }
2085 env->tlb_flush_addr &= mask;
2086 env->tlb_flush_mask = mask;
2087}
2088
Avi Kivity1d393fa2012-01-01 21:15:42 +02002089static bool is_ram_rom(ram_addr_t pd)
2090{
2091 pd &= ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002092 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002093}
2094
2095static bool is_ram_rom_romd(ram_addr_t pd)
2096{
2097 return is_ram_rom(pd) || (pd & IO_MEM_ROMD);
2098}
2099
Paul Brookd4c430a2010-03-17 02:14:28 +00002100/* Add a new TLB entry. At most one entry for a given virtual address
2101 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2102 supplied size is only used by tlb_flush_page. */
2103void tlb_set_page(CPUState *env, target_ulong vaddr,
2104 target_phys_addr_t paddr, int prot,
2105 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002106{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002107 PhysPageDesc p;
bellard4f2ac232004-04-26 19:44:02 +00002108 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002109 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002110 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002111 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002112 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002113 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002114 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002115 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002116
Paul Brookd4c430a2010-03-17 02:14:28 +00002117 assert(size >= TARGET_PAGE_SIZE);
2118 if (size != TARGET_PAGE_SIZE) {
2119 tlb_add_large_page(env, vaddr, size);
2120 }
bellard92e873b2004-05-21 14:52:29 +00002121 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002122 pd = p.phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002123#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002124 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2125 " prot=%x idx=%d pd=0x%08lx\n",
2126 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002127#endif
2128
pbrook0f459d12008-06-09 00:20:13 +00002129 address = vaddr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002130 if (!is_ram_rom_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002131 /* IO memory case (romd handled later) */
2132 address |= TLB_MMIO;
2133 }
pbrook5579c7f2009-04-11 14:47:08 +00002134 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002135 if (is_ram_rom(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002136 /* Normal RAM. */
2137 iotlb = pd & TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002138 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2139 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002140 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002141 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002142 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002143 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002144 It would be nice to pass an offset from the base address
2145 of that region. This would avoid having to special case RAM,
2146 and avoid full address decoding in every device.
2147 We can't use the high bits of pd for this because
2148 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002149 iotlb = (pd & ~TARGET_PAGE_MASK);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002150 iotlb += p.region_offset;
pbrook0f459d12008-06-09 00:20:13 +00002151 }
pbrook6658ffb2007-03-16 23:58:11 +00002152
pbrook0f459d12008-06-09 00:20:13 +00002153 code_address = address;
2154 /* Make accesses to pages with watchpoints go via the
2155 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002156 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002157 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002158 /* Avoid trapping reads of pages with a write breakpoint. */
2159 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2160 iotlb = io_mem_watch + paddr;
2161 address |= TLB_MMIO;
2162 break;
2163 }
pbrook6658ffb2007-03-16 23:58:11 +00002164 }
pbrook0f459d12008-06-09 00:20:13 +00002165 }
balrogd79acba2007-06-26 20:01:13 +00002166
pbrook0f459d12008-06-09 00:20:13 +00002167 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2168 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2169 te = &env->tlb_table[mmu_idx][index];
2170 te->addend = addend - vaddr;
2171 if (prot & PAGE_READ) {
2172 te->addr_read = address;
2173 } else {
2174 te->addr_read = -1;
2175 }
edgar_igl5c751e92008-05-06 08:44:21 +00002176
pbrook0f459d12008-06-09 00:20:13 +00002177 if (prot & PAGE_EXEC) {
2178 te->addr_code = code_address;
2179 } else {
2180 te->addr_code = -1;
2181 }
2182 if (prot & PAGE_WRITE) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002183 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr ||
pbrook0f459d12008-06-09 00:20:13 +00002184 (pd & IO_MEM_ROMD)) {
2185 /* Write access calls the I/O callback. */
2186 te->addr_write = address | TLB_MMIO;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002187 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
pbrook0f459d12008-06-09 00:20:13 +00002188 !cpu_physical_memory_is_dirty(pd)) {
2189 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002190 } else {
pbrook0f459d12008-06-09 00:20:13 +00002191 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002192 }
pbrook0f459d12008-06-09 00:20:13 +00002193 } else {
2194 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002195 }
bellard9fa3e852004-01-04 18:06:42 +00002196}
2197
bellard01243112004-01-04 15:48:17 +00002198#else
2199
bellardee8b7022004-02-03 23:35:10 +00002200void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002201{
2202}
2203
bellard2e126692004-04-25 21:28:44 +00002204void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002205{
2206}
2207
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002208/*
2209 * Walks guest process memory "regions" one by one
2210 * and calls callback function 'fn' for each region.
2211 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002212
2213struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002214{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002215 walk_memory_regions_fn fn;
2216 void *priv;
2217 unsigned long start;
2218 int prot;
2219};
bellard9fa3e852004-01-04 18:06:42 +00002220
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002221static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002222 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002223{
2224 if (data->start != -1ul) {
2225 int rc = data->fn(data->priv, data->start, end, data->prot);
2226 if (rc != 0) {
2227 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002228 }
bellard33417e72003-08-10 21:47:01 +00002229 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002230
2231 data->start = (new_prot ? end : -1ul);
2232 data->prot = new_prot;
2233
2234 return 0;
2235}
2236
2237static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002238 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002239{
Paul Brookb480d9b2010-03-12 23:23:29 +00002240 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002241 int i, rc;
2242
2243 if (*lp == NULL) {
2244 return walk_memory_regions_end(data, base, 0);
2245 }
2246
2247 if (level == 0) {
2248 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002249 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002250 int prot = pd[i].flags;
2251
2252 pa = base | (i << TARGET_PAGE_BITS);
2253 if (prot != data->prot) {
2254 rc = walk_memory_regions_end(data, pa, prot);
2255 if (rc != 0) {
2256 return rc;
2257 }
2258 }
2259 }
2260 } else {
2261 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002262 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002263 pa = base | ((abi_ulong)i <<
2264 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002265 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2266 if (rc != 0) {
2267 return rc;
2268 }
2269 }
2270 }
2271
2272 return 0;
2273}
2274
2275int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2276{
2277 struct walk_memory_regions_data data;
2278 unsigned long i;
2279
2280 data.fn = fn;
2281 data.priv = priv;
2282 data.start = -1ul;
2283 data.prot = 0;
2284
2285 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002286 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002287 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2288 if (rc != 0) {
2289 return rc;
2290 }
2291 }
2292
2293 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002294}
2295
Paul Brookb480d9b2010-03-12 23:23:29 +00002296static int dump_region(void *priv, abi_ulong start,
2297 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002298{
2299 FILE *f = (FILE *)priv;
2300
Paul Brookb480d9b2010-03-12 23:23:29 +00002301 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2302 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002303 start, end, end - start,
2304 ((prot & PAGE_READ) ? 'r' : '-'),
2305 ((prot & PAGE_WRITE) ? 'w' : '-'),
2306 ((prot & PAGE_EXEC) ? 'x' : '-'));
2307
2308 return (0);
2309}
2310
2311/* dump memory mappings */
2312void page_dump(FILE *f)
2313{
2314 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2315 "start", "end", "size", "prot");
2316 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002317}
2318
pbrook53a59602006-03-25 19:31:22 +00002319int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002320{
bellard9fa3e852004-01-04 18:06:42 +00002321 PageDesc *p;
2322
2323 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002324 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002325 return 0;
2326 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002327}
2328
Richard Henderson376a7902010-03-10 15:57:04 -08002329/* Modify the flags of a page and invalidate the code if necessary.
2330 The flag PAGE_WRITE_ORG is positioned automatically depending
2331 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002332void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002333{
Richard Henderson376a7902010-03-10 15:57:04 -08002334 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002335
Richard Henderson376a7902010-03-10 15:57:04 -08002336 /* This function should never be called with addresses outside the
2337 guest address space. If this assert fires, it probably indicates
2338 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002339#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2340 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002341#endif
2342 assert(start < end);
2343
bellard9fa3e852004-01-04 18:06:42 +00002344 start = start & TARGET_PAGE_MASK;
2345 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002346
2347 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002348 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002349 }
2350
2351 for (addr = start, len = end - start;
2352 len != 0;
2353 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2354 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2355
2356 /* If the write protection bit is set, then we invalidate
2357 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002358 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002359 (flags & PAGE_WRITE) &&
2360 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002361 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002362 }
2363 p->flags = flags;
2364 }
bellard9fa3e852004-01-04 18:06:42 +00002365}
2366
ths3d97b402007-11-02 19:02:07 +00002367int page_check_range(target_ulong start, target_ulong len, int flags)
2368{
2369 PageDesc *p;
2370 target_ulong end;
2371 target_ulong addr;
2372
Richard Henderson376a7902010-03-10 15:57:04 -08002373 /* This function should never be called with addresses outside the
2374 guest address space. If this assert fires, it probably indicates
2375 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002376#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2377 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002378#endif
2379
Richard Henderson3e0650a2010-03-29 10:54:42 -07002380 if (len == 0) {
2381 return 0;
2382 }
Richard Henderson376a7902010-03-10 15:57:04 -08002383 if (start + len - 1 < start) {
2384 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002385 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002386 }
balrog55f280c2008-10-28 10:24:11 +00002387
ths3d97b402007-11-02 19:02:07 +00002388 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2389 start = start & TARGET_PAGE_MASK;
2390
Richard Henderson376a7902010-03-10 15:57:04 -08002391 for (addr = start, len = end - start;
2392 len != 0;
2393 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002394 p = page_find(addr >> TARGET_PAGE_BITS);
2395 if( !p )
2396 return -1;
2397 if( !(p->flags & PAGE_VALID) )
2398 return -1;
2399
bellarddae32702007-11-14 10:51:00 +00002400 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002401 return -1;
bellarddae32702007-11-14 10:51:00 +00002402 if (flags & PAGE_WRITE) {
2403 if (!(p->flags & PAGE_WRITE_ORG))
2404 return -1;
2405 /* unprotect the page if it was put read-only because it
2406 contains translated code */
2407 if (!(p->flags & PAGE_WRITE)) {
2408 if (!page_unprotect(addr, 0, NULL))
2409 return -1;
2410 }
2411 return 0;
2412 }
ths3d97b402007-11-02 19:02:07 +00002413 }
2414 return 0;
2415}
2416
bellard9fa3e852004-01-04 18:06:42 +00002417/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002418 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002419int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002420{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002421 unsigned int prot;
2422 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002423 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002424
pbrookc8a706f2008-06-02 16:16:42 +00002425 /* Technically this isn't safe inside a signal handler. However we
2426 know this only ever happens in a synchronous SEGV handler, so in
2427 practice it seems to be ok. */
2428 mmap_lock();
2429
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002430 p = page_find(address >> TARGET_PAGE_BITS);
2431 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002432 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002433 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002434 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002435
bellard9fa3e852004-01-04 18:06:42 +00002436 /* if the page was really writable, then we change its
2437 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002438 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2439 host_start = address & qemu_host_page_mask;
2440 host_end = host_start + qemu_host_page_size;
2441
2442 prot = 0;
2443 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2444 p = page_find(addr >> TARGET_PAGE_BITS);
2445 p->flags |= PAGE_WRITE;
2446 prot |= p->flags;
2447
bellard9fa3e852004-01-04 18:06:42 +00002448 /* and since the content will be modified, we must invalidate
2449 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002450 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002451#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002452 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002453#endif
bellard9fa3e852004-01-04 18:06:42 +00002454 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002455 mprotect((void *)g2h(host_start), qemu_host_page_size,
2456 prot & PAGE_BITS);
2457
2458 mmap_unlock();
2459 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002460 }
pbrookc8a706f2008-06-02 16:16:42 +00002461 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002462 return 0;
2463}
2464
bellard6a00d602005-11-21 23:25:50 +00002465static inline void tlb_set_dirty(CPUState *env,
2466 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002467{
2468}
bellard9fa3e852004-01-04 18:06:42 +00002469#endif /* defined(CONFIG_USER_ONLY) */
2470
pbrooke2eef172008-06-08 01:09:01 +00002471#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002472
Paul Brookc04b2b72010-03-01 03:31:14 +00002473#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2474typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002475 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002476 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002477 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2478 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002479} subpage_t;
2480
Anthony Liguoric227f092009-10-01 16:12:16 -05002481static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2482 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002483static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2484 ram_addr_t orig_memory,
2485 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002486#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2487 need_subpage) \
2488 do { \
2489 if (addr > start_addr) \
2490 start_addr2 = 0; \
2491 else { \
2492 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2493 if (start_addr2 > 0) \
2494 need_subpage = 1; \
2495 } \
2496 \
blueswir149e9fba2007-05-30 17:25:06 +00002497 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002498 end_addr2 = TARGET_PAGE_SIZE - 1; \
2499 else { \
2500 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2501 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2502 need_subpage = 1; \
2503 } \
2504 } while (0)
2505
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002506/* register physical memory.
2507 For RAM, 'size' must be a multiple of the target page size.
2508 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002509 io memory page. The address used when calling the IO function is
2510 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002511 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002512 before calculating this offset. This should not be a problem unless
2513 the low bits of start_addr and region_offset differ. */
Avi Kivitydd811242012-01-02 12:17:03 +02002514void cpu_register_physical_memory_log(MemoryRegionSection *section,
2515 bool readable, bool readonly)
bellard33417e72003-08-10 21:47:01 +00002516{
Avi Kivitydd811242012-01-02 12:17:03 +02002517 target_phys_addr_t start_addr = section->offset_within_address_space;
2518 ram_addr_t size = section->size;
2519 ram_addr_t phys_offset = section->mr->ram_addr;
2520 ram_addr_t region_offset = section->offset_within_region;
Anthony Liguoric227f092009-10-01 16:12:16 -05002521 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002522 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002523 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002524 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002525 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002526
Avi Kivitydd811242012-01-02 12:17:03 +02002527 if (memory_region_is_ram(section->mr)) {
2528 phys_offset += region_offset;
2529 region_offset = 0;
2530 }
2531
2532 if (!readable) {
2533 phys_offset &= ~TARGET_PAGE_MASK & ~IO_MEM_ROMD;
2534 }
2535
2536 if (readonly) {
2537 phys_offset |= io_mem_rom.ram_addr;
2538 }
2539
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002540 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002541
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002542 if (phys_offset == io_mem_unassigned.ram_addr) {
pbrook67c4d232009-02-23 13:16:07 +00002543 region_offset = start_addr;
2544 }
pbrook8da3ff12008-12-01 18:59:50 +00002545 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002546 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002547 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002548
2549 addr = start_addr;
2550 do {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002551 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002552 if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002553 ram_addr_t orig_memory = p->phys_offset;
2554 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002555 int need_subpage = 0;
2556
2557 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2558 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002559 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002560 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2561 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002562 &p->phys_offset, orig_memory,
2563 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002564 } else {
2565 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2566 >> IO_MEM_SHIFT];
2567 }
pbrook8da3ff12008-12-01 18:59:50 +00002568 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2569 region_offset);
2570 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002571 } else {
2572 p->phys_offset = phys_offset;
Avi Kivity2774c6d2012-01-01 18:24:24 +02002573 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002574 if (is_ram_rom_romd(phys_offset))
blueswir1db7b5422007-05-26 17:36:03 +00002575 phys_offset += TARGET_PAGE_SIZE;
2576 }
2577 } else {
2578 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2579 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002580 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002581 if (is_ram_rom_romd(phys_offset)) {
blueswir1db7b5422007-05-26 17:36:03 +00002582 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002583 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002584 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002585 int need_subpage = 0;
2586
2587 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2588 end_addr2, need_subpage);
2589
Richard Hendersonf6405242010-04-22 16:47:31 -07002590 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002591 subpage = subpage_init((addr & TARGET_PAGE_MASK),
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002592 &p->phys_offset,
2593 io_mem_unassigned.ram_addr,
pbrook67c4d232009-02-23 13:16:07 +00002594 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002595 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002596 phys_offset, region_offset);
2597 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002598 }
2599 }
2600 }
pbrook8da3ff12008-12-01 18:59:50 +00002601 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002602 addr += TARGET_PAGE_SIZE;
2603 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002604
bellard9d420372006-06-25 22:25:22 +00002605 /* since each CPU stores ram addresses in its TLB cache, we must
2606 reset the modified entries */
2607 /* XXX: slow ! */
2608 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2609 tlb_flush(env, 1);
2610 }
bellard33417e72003-08-10 21:47:01 +00002611}
2612
Anthony Liguoric227f092009-10-01 16:12:16 -05002613void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002614{
2615 if (kvm_enabled())
2616 kvm_coalesce_mmio_region(addr, size);
2617}
2618
Anthony Liguoric227f092009-10-01 16:12:16 -05002619void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002620{
2621 if (kvm_enabled())
2622 kvm_uncoalesce_mmio_region(addr, size);
2623}
2624
Sheng Yang62a27442010-01-26 19:21:16 +08002625void qemu_flush_coalesced_mmio_buffer(void)
2626{
2627 if (kvm_enabled())
2628 kvm_flush_coalesced_mmio_buffer();
2629}
2630
Marcelo Tosattic9027602010-03-01 20:25:08 -03002631#if defined(__linux__) && !defined(TARGET_S390X)
2632
2633#include <sys/vfs.h>
2634
2635#define HUGETLBFS_MAGIC 0x958458f6
2636
2637static long gethugepagesize(const char *path)
2638{
2639 struct statfs fs;
2640 int ret;
2641
2642 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002643 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002644 } while (ret != 0 && errno == EINTR);
2645
2646 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002647 perror(path);
2648 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002649 }
2650
2651 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002652 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002653
2654 return fs.f_bsize;
2655}
2656
Alex Williamson04b16652010-07-02 11:13:17 -06002657static void *file_ram_alloc(RAMBlock *block,
2658 ram_addr_t memory,
2659 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002660{
2661 char *filename;
2662 void *area;
2663 int fd;
2664#ifdef MAP_POPULATE
2665 int flags;
2666#endif
2667 unsigned long hpagesize;
2668
2669 hpagesize = gethugepagesize(path);
2670 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002671 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002672 }
2673
2674 if (memory < hpagesize) {
2675 return NULL;
2676 }
2677
2678 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2679 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2680 return NULL;
2681 }
2682
2683 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002684 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002685 }
2686
2687 fd = mkstemp(filename);
2688 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002689 perror("unable to create backing store for hugepages");
2690 free(filename);
2691 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002692 }
2693 unlink(filename);
2694 free(filename);
2695
2696 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2697
2698 /*
2699 * ftruncate is not supported by hugetlbfs in older
2700 * hosts, so don't bother bailing out on errors.
2701 * If anything goes wrong with it under other filesystems,
2702 * mmap will fail.
2703 */
2704 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002705 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002706
2707#ifdef MAP_POPULATE
2708 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2709 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2710 * to sidestep this quirk.
2711 */
2712 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2713 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2714#else
2715 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2716#endif
2717 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002718 perror("file_ram_alloc: can't mmap RAM pages");
2719 close(fd);
2720 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002721 }
Alex Williamson04b16652010-07-02 11:13:17 -06002722 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002723 return area;
2724}
2725#endif
2726
Alex Williamsond17b5282010-06-25 11:08:38 -06002727static ram_addr_t find_ram_offset(ram_addr_t size)
2728{
Alex Williamson04b16652010-07-02 11:13:17 -06002729 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002730 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002731
2732 if (QLIST_EMPTY(&ram_list.blocks))
2733 return 0;
2734
2735 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002736 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002737
2738 end = block->offset + block->length;
2739
2740 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2741 if (next_block->offset >= end) {
2742 next = MIN(next, next_block->offset);
2743 }
2744 }
2745 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002746 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002747 mingap = next - end;
2748 }
2749 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002750
2751 if (offset == RAM_ADDR_MAX) {
2752 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2753 (uint64_t)size);
2754 abort();
2755 }
2756
Alex Williamson04b16652010-07-02 11:13:17 -06002757 return offset;
2758}
2759
2760static ram_addr_t last_ram_offset(void)
2761{
Alex Williamsond17b5282010-06-25 11:08:38 -06002762 RAMBlock *block;
2763 ram_addr_t last = 0;
2764
2765 QLIST_FOREACH(block, &ram_list.blocks, next)
2766 last = MAX(last, block->offset + block->length);
2767
2768 return last;
2769}
2770
Avi Kivityc5705a72011-12-20 15:59:12 +02002771void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002772{
2773 RAMBlock *new_block, *block;
2774
Avi Kivityc5705a72011-12-20 15:59:12 +02002775 new_block = NULL;
2776 QLIST_FOREACH(block, &ram_list.blocks, next) {
2777 if (block->offset == addr) {
2778 new_block = block;
2779 break;
2780 }
2781 }
2782 assert(new_block);
2783 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002784
2785 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2786 char *id = dev->parent_bus->info->get_dev_path(dev);
2787 if (id) {
2788 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002789 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002790 }
2791 }
2792 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2793
2794 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002795 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002796 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2797 new_block->idstr);
2798 abort();
2799 }
2800 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002801}
2802
2803ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2804 MemoryRegion *mr)
2805{
2806 RAMBlock *new_block;
2807
2808 size = TARGET_PAGE_ALIGN(size);
2809 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002810
Avi Kivity7c637362011-12-21 13:09:49 +02002811 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002812 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002813 if (host) {
2814 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002815 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002816 } else {
2817 if (mem_path) {
2818#if defined (__linux__) && !defined(TARGET_S390X)
2819 new_block->host = file_ram_alloc(new_block, size, mem_path);
2820 if (!new_block->host) {
2821 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002822 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002823 }
2824#else
2825 fprintf(stderr, "-mem-path option unsupported\n");
2826 exit(1);
2827#endif
2828 } else {
2829#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002830 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2831 an system defined value, which is at least 256GB. Larger systems
2832 have larger values. We put the guest between the end of data
2833 segment (system break) and this value. We use 32GB as a base to
2834 have enough room for the system break to grow. */
2835 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002836 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002837 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002838 if (new_block->host == MAP_FAILED) {
2839 fprintf(stderr, "Allocating RAM failed\n");
2840 abort();
2841 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002842#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002843 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002844 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002845 } else {
2846 new_block->host = qemu_vmalloc(size);
2847 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002848#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002849 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002850 }
2851 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002852 new_block->length = size;
2853
2854 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2855
Anthony Liguori7267c092011-08-20 22:09:37 -05002856 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002857 last_ram_offset() >> TARGET_PAGE_BITS);
2858 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2859 0xff, size >> TARGET_PAGE_BITS);
2860
2861 if (kvm_enabled())
2862 kvm_setup_guest_memory(new_block->host, size);
2863
2864 return new_block->offset;
2865}
2866
Avi Kivityc5705a72011-12-20 15:59:12 +02002867ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002868{
Avi Kivityc5705a72011-12-20 15:59:12 +02002869 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002870}
bellarde9a1ab12007-02-08 23:08:38 +00002871
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002872void qemu_ram_free_from_ptr(ram_addr_t addr)
2873{
2874 RAMBlock *block;
2875
2876 QLIST_FOREACH(block, &ram_list.blocks, next) {
2877 if (addr == block->offset) {
2878 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002879 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002880 return;
2881 }
2882 }
2883}
2884
Anthony Liguoric227f092009-10-01 16:12:16 -05002885void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002886{
Alex Williamson04b16652010-07-02 11:13:17 -06002887 RAMBlock *block;
2888
2889 QLIST_FOREACH(block, &ram_list.blocks, next) {
2890 if (addr == block->offset) {
2891 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002892 if (block->flags & RAM_PREALLOC_MASK) {
2893 ;
2894 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002895#if defined (__linux__) && !defined(TARGET_S390X)
2896 if (block->fd) {
2897 munmap(block->host, block->length);
2898 close(block->fd);
2899 } else {
2900 qemu_vfree(block->host);
2901 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002902#else
2903 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002904#endif
2905 } else {
2906#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2907 munmap(block->host, block->length);
2908#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002909 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002910 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002911 } else {
2912 qemu_vfree(block->host);
2913 }
Alex Williamson04b16652010-07-02 11:13:17 -06002914#endif
2915 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002916 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002917 return;
2918 }
2919 }
2920
bellarde9a1ab12007-02-08 23:08:38 +00002921}
2922
Huang Yingcd19cfa2011-03-02 08:56:19 +01002923#ifndef _WIN32
2924void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2925{
2926 RAMBlock *block;
2927 ram_addr_t offset;
2928 int flags;
2929 void *area, *vaddr;
2930
2931 QLIST_FOREACH(block, &ram_list.blocks, next) {
2932 offset = addr - block->offset;
2933 if (offset < block->length) {
2934 vaddr = block->host + offset;
2935 if (block->flags & RAM_PREALLOC_MASK) {
2936 ;
2937 } else {
2938 flags = MAP_FIXED;
2939 munmap(vaddr, length);
2940 if (mem_path) {
2941#if defined(__linux__) && !defined(TARGET_S390X)
2942 if (block->fd) {
2943#ifdef MAP_POPULATE
2944 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2945 MAP_PRIVATE;
2946#else
2947 flags |= MAP_PRIVATE;
2948#endif
2949 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2950 flags, block->fd, offset);
2951 } else {
2952 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2953 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2954 flags, -1, 0);
2955 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002956#else
2957 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002958#endif
2959 } else {
2960#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2961 flags |= MAP_SHARED | MAP_ANONYMOUS;
2962 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2963 flags, -1, 0);
2964#else
2965 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2966 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2967 flags, -1, 0);
2968#endif
2969 }
2970 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002971 fprintf(stderr, "Could not remap addr: "
2972 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002973 length, addr);
2974 exit(1);
2975 }
2976 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2977 }
2978 return;
2979 }
2980 }
2981}
2982#endif /* !_WIN32 */
2983
pbrookdc828ca2009-04-09 22:21:07 +00002984/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002985 With the exception of the softmmu code in this file, this should
2986 only be used for local memory (e.g. video ram) that the device owns,
2987 and knows it isn't going to access beyond the end of the block.
2988
2989 It should not be used for general purpose DMA.
2990 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2991 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002992void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002993{
pbrook94a6b542009-04-11 17:15:54 +00002994 RAMBlock *block;
2995
Alex Williamsonf471a172010-06-11 11:11:42 -06002996 QLIST_FOREACH(block, &ram_list.blocks, next) {
2997 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002998 /* Move this entry to to start of the list. */
2999 if (block != QLIST_FIRST(&ram_list.blocks)) {
3000 QLIST_REMOVE(block, next);
3001 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3002 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003003 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003004 /* We need to check if the requested address is in the RAM
3005 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003006 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003007 */
3008 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003009 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003010 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003011 block->host =
3012 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003013 }
3014 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003015 return block->host + (addr - block->offset);
3016 }
pbrook94a6b542009-04-11 17:15:54 +00003017 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003018
3019 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3020 abort();
3021
3022 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003023}
3024
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003025/* Return a host pointer to ram allocated with qemu_ram_alloc.
3026 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3027 */
3028void *qemu_safe_ram_ptr(ram_addr_t addr)
3029{
3030 RAMBlock *block;
3031
3032 QLIST_FOREACH(block, &ram_list.blocks, next) {
3033 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003034 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003035 /* We need to check if the requested address is in the RAM
3036 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003037 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003038 */
3039 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003040 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003041 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003042 block->host =
3043 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003044 }
3045 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003046 return block->host + (addr - block->offset);
3047 }
3048 }
3049
3050 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3051 abort();
3052
3053 return NULL;
3054}
3055
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003056/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3057 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003058void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003059{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003060 if (*size == 0) {
3061 return NULL;
3062 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003063 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003064 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003065 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003066 RAMBlock *block;
3067
3068 QLIST_FOREACH(block, &ram_list.blocks, next) {
3069 if (addr - block->offset < block->length) {
3070 if (addr - block->offset + *size > block->length)
3071 *size = block->length - addr + block->offset;
3072 return block->host + (addr - block->offset);
3073 }
3074 }
3075
3076 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3077 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003078 }
3079}
3080
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003081void qemu_put_ram_ptr(void *addr)
3082{
3083 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003084}
3085
Marcelo Tosattie8902612010-10-11 15:31:19 -03003086int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003087{
pbrook94a6b542009-04-11 17:15:54 +00003088 RAMBlock *block;
3089 uint8_t *host = ptr;
3090
Jan Kiszka868bb332011-06-21 22:59:09 +02003091 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003092 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003093 return 0;
3094 }
3095
Alex Williamsonf471a172010-06-11 11:11:42 -06003096 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003097 /* This case append when the block is not mapped. */
3098 if (block->host == NULL) {
3099 continue;
3100 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003101 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003102 *ram_addr = block->offset + (host - block->host);
3103 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003104 }
pbrook94a6b542009-04-11 17:15:54 +00003105 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003106
Marcelo Tosattie8902612010-10-11 15:31:19 -03003107 return -1;
3108}
Alex Williamsonf471a172010-06-11 11:11:42 -06003109
Marcelo Tosattie8902612010-10-11 15:31:19 -03003110/* Some of the softmmu routines need to translate from a host pointer
3111 (typically a TLB entry) back to a ram offset. */
3112ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3113{
3114 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003115
Marcelo Tosattie8902612010-10-11 15:31:19 -03003116 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3117 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3118 abort();
3119 }
3120 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003121}
3122
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003123static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3124 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003125{
pbrook67d3b952006-12-18 05:03:52 +00003126#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003127 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003128#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003129#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003130 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003131#endif
3132 return 0;
3133}
3134
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003135static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3136 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003137{
3138#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003139 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003140#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003141#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003142 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003143#endif
3144}
3145
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003146static const MemoryRegionOps unassigned_mem_ops = {
3147 .read = unassigned_mem_read,
3148 .write = unassigned_mem_write,
3149 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003150};
3151
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003152static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3153 unsigned size)
3154{
3155 abort();
3156}
3157
3158static void error_mem_write(void *opaque, target_phys_addr_t addr,
3159 uint64_t value, unsigned size)
3160{
3161 abort();
3162}
3163
3164static const MemoryRegionOps error_mem_ops = {
3165 .read = error_mem_read,
3166 .write = error_mem_write,
3167 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003168};
3169
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003170static const MemoryRegionOps rom_mem_ops = {
3171 .read = error_mem_read,
3172 .write = unassigned_mem_write,
3173 .endianness = DEVICE_NATIVE_ENDIAN,
3174};
3175
3176static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3177 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003178{
bellard3a7d9292005-08-21 09:26:42 +00003179 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003180 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003181 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3182#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003183 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003184 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003185#endif
3186 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003187 switch (size) {
3188 case 1:
3189 stb_p(qemu_get_ram_ptr(ram_addr), val);
3190 break;
3191 case 2:
3192 stw_p(qemu_get_ram_ptr(ram_addr), val);
3193 break;
3194 case 4:
3195 stl_p(qemu_get_ram_ptr(ram_addr), val);
3196 break;
3197 default:
3198 abort();
3199 }
bellardf23db162005-08-21 19:12:28 +00003200 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003201 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003202 /* we remove the notdirty callback only if the code has been
3203 flushed */
3204 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003205 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003206}
3207
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003208static const MemoryRegionOps notdirty_mem_ops = {
3209 .read = error_mem_read,
3210 .write = notdirty_mem_write,
3211 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003212};
3213
pbrook0f459d12008-06-09 00:20:13 +00003214/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003215static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003216{
3217 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003218 target_ulong pc, cs_base;
3219 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003220 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003221 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003222 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003223
aliguori06d55cc2008-11-18 20:24:06 +00003224 if (env->watchpoint_hit) {
3225 /* We re-entered the check after replacing the TB. Now raise
3226 * the debug interrupt so that is will trigger after the
3227 * current instruction. */
3228 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3229 return;
3230 }
pbrook2e70f6e2008-06-29 01:03:05 +00003231 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003232 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003233 if ((vaddr == (wp->vaddr & len_mask) ||
3234 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003235 wp->flags |= BP_WATCHPOINT_HIT;
3236 if (!env->watchpoint_hit) {
3237 env->watchpoint_hit = wp;
3238 tb = tb_find_pc(env->mem_io_pc);
3239 if (!tb) {
3240 cpu_abort(env, "check_watchpoint: could not find TB for "
3241 "pc=%p", (void *)env->mem_io_pc);
3242 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003243 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003244 tb_phys_invalidate(tb, -1);
3245 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3246 env->exception_index = EXCP_DEBUG;
3247 } else {
3248 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3249 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3250 }
3251 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003252 }
aliguori6e140f22008-11-18 20:37:55 +00003253 } else {
3254 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003255 }
3256 }
3257}
3258
pbrook6658ffb2007-03-16 23:58:11 +00003259/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3260 so these check for a hit then pass through to the normal out-of-line
3261 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003262static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003263{
aliguorib4051332008-11-18 20:14:20 +00003264 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003265 return ldub_phys(addr);
3266}
3267
Anthony Liguoric227f092009-10-01 16:12:16 -05003268static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003269{
aliguorib4051332008-11-18 20:14:20 +00003270 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003271 return lduw_phys(addr);
3272}
3273
Anthony Liguoric227f092009-10-01 16:12:16 -05003274static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003275{
aliguorib4051332008-11-18 20:14:20 +00003276 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003277 return ldl_phys(addr);
3278}
3279
Anthony Liguoric227f092009-10-01 16:12:16 -05003280static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003281 uint32_t val)
3282{
aliguorib4051332008-11-18 20:14:20 +00003283 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003284 stb_phys(addr, val);
3285}
3286
Anthony Liguoric227f092009-10-01 16:12:16 -05003287static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003288 uint32_t val)
3289{
aliguorib4051332008-11-18 20:14:20 +00003290 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003291 stw_phys(addr, val);
3292}
3293
Anthony Liguoric227f092009-10-01 16:12:16 -05003294static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003295 uint32_t val)
3296{
aliguorib4051332008-11-18 20:14:20 +00003297 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003298 stl_phys(addr, val);
3299}
3300
Blue Swirld60efc62009-08-25 18:29:31 +00003301static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003302 watch_mem_readb,
3303 watch_mem_readw,
3304 watch_mem_readl,
3305};
3306
Blue Swirld60efc62009-08-25 18:29:31 +00003307static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003308 watch_mem_writeb,
3309 watch_mem_writew,
3310 watch_mem_writel,
3311};
pbrook6658ffb2007-03-16 23:58:11 +00003312
Avi Kivity70c68e42012-01-02 12:32:48 +02003313static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3314 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003315{
Avi Kivity70c68e42012-01-02 12:32:48 +02003316 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003317 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003318#if defined(DEBUG_SUBPAGE)
3319 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3320 mmio, len, addr, idx);
3321#endif
blueswir1db7b5422007-05-26 17:36:03 +00003322
Richard Hendersonf6405242010-04-22 16:47:31 -07003323 addr += mmio->region_offset[idx];
3324 idx = mmio->sub_io_index[idx];
Avi Kivity70c68e42012-01-02 12:32:48 +02003325 return io_mem_read(idx, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003326}
3327
Avi Kivity70c68e42012-01-02 12:32:48 +02003328static void subpage_write(void *opaque, target_phys_addr_t addr,
3329 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003330{
Avi Kivity70c68e42012-01-02 12:32:48 +02003331 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003332 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003333#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003334 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3335 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003336 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003337#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003338
3339 addr += mmio->region_offset[idx];
3340 idx = mmio->sub_io_index[idx];
Avi Kivity70c68e42012-01-02 12:32:48 +02003341 io_mem_write(idx, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003342}
3343
Avi Kivity70c68e42012-01-02 12:32:48 +02003344static const MemoryRegionOps subpage_ops = {
3345 .read = subpage_read,
3346 .write = subpage_write,
3347 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003348};
3349
Andreas Färber56384e82011-11-30 16:26:21 +01003350static uint32_t subpage_ram_readb(void *opaque, target_phys_addr_t addr)
3351{
3352 ram_addr_t raddr = addr;
3353 void *ptr = qemu_get_ram_ptr(raddr);
3354 return ldub_p(ptr);
3355}
3356
3357static void subpage_ram_writeb(void *opaque, target_phys_addr_t addr,
3358 uint32_t value)
3359{
3360 ram_addr_t raddr = addr;
3361 void *ptr = qemu_get_ram_ptr(raddr);
3362 stb_p(ptr, value);
3363}
3364
3365static uint32_t subpage_ram_readw(void *opaque, target_phys_addr_t addr)
3366{
3367 ram_addr_t raddr = addr;
3368 void *ptr = qemu_get_ram_ptr(raddr);
3369 return lduw_p(ptr);
3370}
3371
3372static void subpage_ram_writew(void *opaque, target_phys_addr_t addr,
3373 uint32_t value)
3374{
3375 ram_addr_t raddr = addr;
3376 void *ptr = qemu_get_ram_ptr(raddr);
3377 stw_p(ptr, value);
3378}
3379
3380static uint32_t subpage_ram_readl(void *opaque, target_phys_addr_t addr)
3381{
3382 ram_addr_t raddr = addr;
3383 void *ptr = qemu_get_ram_ptr(raddr);
3384 return ldl_p(ptr);
3385}
3386
3387static void subpage_ram_writel(void *opaque, target_phys_addr_t addr,
3388 uint32_t value)
3389{
3390 ram_addr_t raddr = addr;
3391 void *ptr = qemu_get_ram_ptr(raddr);
3392 stl_p(ptr, value);
3393}
3394
3395static CPUReadMemoryFunc * const subpage_ram_read[] = {
3396 &subpage_ram_readb,
3397 &subpage_ram_readw,
3398 &subpage_ram_readl,
3399};
3400
3401static CPUWriteMemoryFunc * const subpage_ram_write[] = {
3402 &subpage_ram_writeb,
3403 &subpage_ram_writew,
3404 &subpage_ram_writel,
3405};
3406
Anthony Liguoric227f092009-10-01 16:12:16 -05003407static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3408 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003409{
3410 int idx, eidx;
3411
3412 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3413 return -1;
3414 idx = SUBPAGE_IDX(start);
3415 eidx = SUBPAGE_IDX(end);
3416#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003417 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003418 mmio, start, end, idx, eidx, memory);
3419#endif
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003420 if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
Andreas Färber56384e82011-11-30 16:26:21 +01003421 memory = IO_MEM_SUBPAGE_RAM;
3422 }
Richard Hendersonf6405242010-04-22 16:47:31 -07003423 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003424 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003425 mmio->sub_io_index[idx] = memory;
3426 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003427 }
3428
3429 return 0;
3430}
3431
Richard Hendersonf6405242010-04-22 16:47:31 -07003432static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3433 ram_addr_t orig_memory,
3434 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003435{
Anthony Liguoric227f092009-10-01 16:12:16 -05003436 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003437 int subpage_memory;
3438
Anthony Liguori7267c092011-08-20 22:09:37 -05003439 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003440
3441 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003442 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3443 "subpage", TARGET_PAGE_SIZE);
3444 subpage_memory = mmio->iomem.ram_addr;
blueswir1db7b5422007-05-26 17:36:03 +00003445#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003446 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3447 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003448#endif
aliguori1eec6142009-02-05 22:06:18 +00003449 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003450 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003451
3452 return mmio;
3453}
3454
aliguori88715652009-02-11 15:20:58 +00003455static int get_free_io_mem_idx(void)
3456{
3457 int i;
3458
3459 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3460 if (!io_mem_used[i]) {
3461 io_mem_used[i] = 1;
3462 return i;
3463 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003464 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003465 return -1;
3466}
3467
bellard33417e72003-08-10 21:47:01 +00003468/* mem_read and mem_write are arrays of functions containing the
3469 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003470 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003471 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003472 modified. If it is zero, a new io zone is allocated. The return
3473 value can be used with cpu_register_physical_memory(). (-1) is
3474 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003475static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003476 CPUReadMemoryFunc * const *mem_read,
3477 CPUWriteMemoryFunc * const *mem_write,
Avi Kivitybe675c92011-11-20 16:22:55 +02003478 void *opaque)
bellard33417e72003-08-10 21:47:01 +00003479{
Richard Henderson3cab7212010-05-07 09:52:51 -07003480 int i;
3481
bellard33417e72003-08-10 21:47:01 +00003482 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003483 io_index = get_free_io_mem_idx();
3484 if (io_index == -1)
3485 return io_index;
bellard33417e72003-08-10 21:47:01 +00003486 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003487 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003488 if (io_index >= IO_MEM_NB_ENTRIES)
3489 return -1;
3490 }
bellardb5ff1b32005-11-26 10:38:39 +00003491
Richard Henderson3cab7212010-05-07 09:52:51 -07003492 for (i = 0; i < 3; ++i) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003493 assert(mem_read[i]);
3494 _io_mem_read[io_index][i] = mem_read[i];
Richard Henderson3cab7212010-05-07 09:52:51 -07003495 }
3496 for (i = 0; i < 3; ++i) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003497 assert(mem_write[i]);
3498 _io_mem_write[io_index][i] = mem_write[i];
Richard Henderson3cab7212010-05-07 09:52:51 -07003499 }
bellarda4193c82004-06-03 14:01:43 +00003500 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003501
3502 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003503}
bellard61382a52003-10-27 21:22:23 +00003504
Blue Swirld60efc62009-08-25 18:29:31 +00003505int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3506 CPUWriteMemoryFunc * const *mem_write,
Avi Kivitybe675c92011-11-20 16:22:55 +02003507 void *opaque)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003508{
Avi Kivitybe675c92011-11-20 16:22:55 +02003509 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003510}
3511
aliguori88715652009-02-11 15:20:58 +00003512void cpu_unregister_io_memory(int io_table_address)
3513{
3514 int i;
3515 int io_index = io_table_address >> IO_MEM_SHIFT;
3516
3517 for (i=0;i < 3; i++) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003518 _io_mem_read[io_index][i] = NULL;
3519 _io_mem_write[io_index][i] = NULL;
aliguori88715652009-02-11 15:20:58 +00003520 }
3521 io_mem_opaque[io_index] = NULL;
3522 io_mem_used[io_index] = 0;
3523}
3524
Avi Kivitye9179ce2009-06-14 11:38:52 +03003525static void io_mem_init(void)
3526{
3527 int i;
3528
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003529 /* Must be first: */
3530 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3531 assert(io_mem_ram.ram_addr == 0);
3532 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3533 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3534 "unassigned", UINT64_MAX);
3535 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3536 "notdirty", UINT64_MAX);
Andreas Färber56384e82011-11-30 16:26:21 +01003537 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM, subpage_ram_read,
Avi Kivitybe675c92011-11-20 16:22:55 +02003538 subpage_ram_write, NULL);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003539 for (i=0; i<5; i++)
3540 io_mem_used[i] = 1;
3541
3542 io_mem_watch = cpu_register_io_memory(watch_mem_read,
Avi Kivitybe675c92011-11-20 16:22:55 +02003543 watch_mem_write, NULL);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003544}
3545
Avi Kivity62152b82011-07-26 14:26:14 +03003546static void memory_map_init(void)
3547{
Anthony Liguori7267c092011-08-20 22:09:37 -05003548 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003549 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003550 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003551
Anthony Liguori7267c092011-08-20 22:09:37 -05003552 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003553 memory_region_init(system_io, "io", 65536);
3554 set_system_io_map(system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003555}
3556
3557MemoryRegion *get_system_memory(void)
3558{
3559 return system_memory;
3560}
3561
Avi Kivity309cb472011-08-08 16:09:03 +03003562MemoryRegion *get_system_io(void)
3563{
3564 return system_io;
3565}
3566
pbrooke2eef172008-06-08 01:09:01 +00003567#endif /* !defined(CONFIG_USER_ONLY) */
3568
bellard13eb76e2004-01-24 15:23:36 +00003569/* physical memory access (slow version, mainly for debug) */
3570#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003571int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3572 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003573{
3574 int l, flags;
3575 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003576 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003577
3578 while (len > 0) {
3579 page = addr & TARGET_PAGE_MASK;
3580 l = (page + TARGET_PAGE_SIZE) - addr;
3581 if (l > len)
3582 l = len;
3583 flags = page_get_flags(page);
3584 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003585 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003586 if (is_write) {
3587 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003588 return -1;
bellard579a97f2007-11-11 14:26:47 +00003589 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003590 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003591 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003592 memcpy(p, buf, l);
3593 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003594 } else {
3595 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003596 return -1;
bellard579a97f2007-11-11 14:26:47 +00003597 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003598 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003599 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003600 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003601 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003602 }
3603 len -= l;
3604 buf += l;
3605 addr += l;
3606 }
Paul Brooka68fe892010-03-01 00:08:59 +00003607 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003608}
bellard8df1cd02005-01-28 22:37:22 +00003609
bellard13eb76e2004-01-24 15:23:36 +00003610#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003611void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003612 int len, int is_write)
3613{
3614 int l, io_index;
3615 uint8_t *ptr;
3616 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003617 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003618 ram_addr_t pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003619 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003620
bellard13eb76e2004-01-24 15:23:36 +00003621 while (len > 0) {
3622 page = addr & TARGET_PAGE_MASK;
3623 l = (page + TARGET_PAGE_SIZE) - addr;
3624 if (l > len)
3625 l = len;
bellard92e873b2004-05-21 14:52:29 +00003626 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003627 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003628
bellard13eb76e2004-01-24 15:23:36 +00003629 if (is_write) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003630 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003631 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003632 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003633 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bellard6a00d602005-11-21 23:25:50 +00003634 /* XXX: could force cpu_single_env to NULL to avoid
3635 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003636 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003637 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003638 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003639 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003640 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003641 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003642 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003643 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003644 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003645 l = 2;
3646 } else {
bellard1c213d12005-09-03 10:49:04 +00003647 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003648 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003649 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003650 l = 1;
3651 }
3652 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003653 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003654 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003655 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003656 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003657 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003658 if (!cpu_physical_memory_is_dirty(addr1)) {
3659 /* invalidate code */
3660 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3661 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003662 cpu_physical_memory_set_dirty_flags(
3663 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003664 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003665 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003666 }
3667 } else {
Avi Kivity1d393fa2012-01-01 21:15:42 +02003668 if (!is_ram_rom_romd(pd)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003669 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003670 /* I/O case */
3671 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003672 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
aurel326c2934d2009-02-18 21:37:17 +00003673 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003674 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003675 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003676 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003677 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003678 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003679 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003680 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003681 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003682 l = 2;
3683 } else {
bellard1c213d12005-09-03 10:49:04 +00003684 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003685 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003686 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003687 l = 1;
3688 }
3689 } else {
3690 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003691 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3692 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3693 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003694 }
3695 }
3696 len -= l;
3697 buf += l;
3698 addr += l;
3699 }
3700}
bellard8df1cd02005-01-28 22:37:22 +00003701
bellardd0ecd2a2006-04-23 17:14:48 +00003702/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003703void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003704 const uint8_t *buf, int len)
3705{
3706 int l;
3707 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003708 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003709 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003710 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003711
bellardd0ecd2a2006-04-23 17:14:48 +00003712 while (len > 0) {
3713 page = addr & TARGET_PAGE_MASK;
3714 l = (page + TARGET_PAGE_SIZE) - addr;
3715 if (l > len)
3716 l = len;
3717 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003718 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003719
Avi Kivity1d393fa2012-01-01 21:15:42 +02003720 if (!is_ram_rom_romd(pd)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003721 /* do nothing */
3722 } else {
3723 unsigned long addr1;
3724 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3725 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003726 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003727 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003728 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003729 }
3730 len -= l;
3731 buf += l;
3732 addr += l;
3733 }
3734}
3735
aliguori6d16c2f2009-01-22 16:59:11 +00003736typedef struct {
3737 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003738 target_phys_addr_t addr;
3739 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003740} BounceBuffer;
3741
3742static BounceBuffer bounce;
3743
aliguoriba223c22009-01-22 16:59:16 +00003744typedef struct MapClient {
3745 void *opaque;
3746 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003747 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003748} MapClient;
3749
Blue Swirl72cf2d42009-09-12 07:36:22 +00003750static QLIST_HEAD(map_client_list, MapClient) map_client_list
3751 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003752
3753void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3754{
Anthony Liguori7267c092011-08-20 22:09:37 -05003755 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003756
3757 client->opaque = opaque;
3758 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003759 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003760 return client;
3761}
3762
3763void cpu_unregister_map_client(void *_client)
3764{
3765 MapClient *client = (MapClient *)_client;
3766
Blue Swirl72cf2d42009-09-12 07:36:22 +00003767 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003768 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003769}
3770
3771static void cpu_notify_map_clients(void)
3772{
3773 MapClient *client;
3774
Blue Swirl72cf2d42009-09-12 07:36:22 +00003775 while (!QLIST_EMPTY(&map_client_list)) {
3776 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003777 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003778 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003779 }
3780}
3781
aliguori6d16c2f2009-01-22 16:59:11 +00003782/* Map a physical memory region into a host virtual address.
3783 * May map a subset of the requested range, given by and returned in *plen.
3784 * May return NULL if resources needed to perform the mapping are exhausted.
3785 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003786 * Use cpu_register_map_client() to know when retrying the map operation is
3787 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003788 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003789void *cpu_physical_memory_map(target_phys_addr_t addr,
3790 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003791 int is_write)
3792{
Anthony Liguoric227f092009-10-01 16:12:16 -05003793 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003794 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003795 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003796 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003797 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003798 PhysPageDesc p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003799 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003800 ram_addr_t rlen;
3801 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003802
3803 while (len > 0) {
3804 page = addr & TARGET_PAGE_MASK;
3805 l = (page + TARGET_PAGE_SIZE) - addr;
3806 if (l > len)
3807 l = len;
3808 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003809 pd = p.phys_offset;
aliguori6d16c2f2009-01-22 16:59:11 +00003810
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003811 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003812 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003813 break;
3814 }
3815 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3816 bounce.addr = addr;
3817 bounce.len = l;
3818 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003819 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003820 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003821
3822 *plen = l;
3823 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003824 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003825 if (!todo) {
3826 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3827 }
aliguori6d16c2f2009-01-22 16:59:11 +00003828
3829 len -= l;
3830 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003831 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003832 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003833 rlen = todo;
3834 ret = qemu_ram_ptr_length(raddr, &rlen);
3835 *plen = rlen;
3836 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003837}
3838
3839/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3840 * Will also mark the memory as dirty if is_write == 1. access_len gives
3841 * the amount of memory that was actually read or written by the caller.
3842 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003843void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3844 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003845{
3846 if (buffer != bounce.buffer) {
3847 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003848 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003849 while (access_len) {
3850 unsigned l;
3851 l = TARGET_PAGE_SIZE;
3852 if (l > access_len)
3853 l = access_len;
3854 if (!cpu_physical_memory_is_dirty(addr1)) {
3855 /* invalidate code */
3856 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3857 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003858 cpu_physical_memory_set_dirty_flags(
3859 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003860 }
3861 addr1 += l;
3862 access_len -= l;
3863 }
3864 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003865 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003866 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003867 }
aliguori6d16c2f2009-01-22 16:59:11 +00003868 return;
3869 }
3870 if (is_write) {
3871 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3872 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003873 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003874 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003875 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003876}
bellardd0ecd2a2006-04-23 17:14:48 +00003877
bellard8df1cd02005-01-28 22:37:22 +00003878/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003879static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3880 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003881{
3882 int io_index;
3883 uint8_t *ptr;
3884 uint32_t val;
3885 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003886 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00003887
3888 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003889 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003890
Avi Kivity1d393fa2012-01-01 21:15:42 +02003891 if (!is_ram_rom_romd(pd)) {
bellard8df1cd02005-01-28 22:37:22 +00003892 /* I/O case */
3893 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003894 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02003895 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003896#if defined(TARGET_WORDS_BIGENDIAN)
3897 if (endian == DEVICE_LITTLE_ENDIAN) {
3898 val = bswap32(val);
3899 }
3900#else
3901 if (endian == DEVICE_BIG_ENDIAN) {
3902 val = bswap32(val);
3903 }
3904#endif
bellard8df1cd02005-01-28 22:37:22 +00003905 } else {
3906 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003907 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00003908 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003909 switch (endian) {
3910 case DEVICE_LITTLE_ENDIAN:
3911 val = ldl_le_p(ptr);
3912 break;
3913 case DEVICE_BIG_ENDIAN:
3914 val = ldl_be_p(ptr);
3915 break;
3916 default:
3917 val = ldl_p(ptr);
3918 break;
3919 }
bellard8df1cd02005-01-28 22:37:22 +00003920 }
3921 return val;
3922}
3923
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003924uint32_t ldl_phys(target_phys_addr_t addr)
3925{
3926 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3927}
3928
3929uint32_t ldl_le_phys(target_phys_addr_t addr)
3930{
3931 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3932}
3933
3934uint32_t ldl_be_phys(target_phys_addr_t addr)
3935{
3936 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3937}
3938
bellard84b7b8e2005-11-28 21:19:04 +00003939/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003940static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3941 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003942{
3943 int io_index;
3944 uint8_t *ptr;
3945 uint64_t val;
3946 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003947 PhysPageDesc p;
bellard84b7b8e2005-11-28 21:19:04 +00003948
3949 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003950 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003951
Avi Kivity1d393fa2012-01-01 21:15:42 +02003952 if (!is_ram_rom_romd(pd)) {
bellard84b7b8e2005-11-28 21:19:04 +00003953 /* I/O case */
3954 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003955 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003956
3957 /* XXX This is broken when device endian != cpu endian.
3958 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003959#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02003960 val = io_mem_read(io_index, addr, 4) << 32;
3961 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003962#else
Avi Kivityacbbec52011-11-21 12:27:03 +02003963 val = io_mem_read(io_index, addr, 4);
3964 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003965#endif
3966 } else {
3967 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003968 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00003969 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003970 switch (endian) {
3971 case DEVICE_LITTLE_ENDIAN:
3972 val = ldq_le_p(ptr);
3973 break;
3974 case DEVICE_BIG_ENDIAN:
3975 val = ldq_be_p(ptr);
3976 break;
3977 default:
3978 val = ldq_p(ptr);
3979 break;
3980 }
bellard84b7b8e2005-11-28 21:19:04 +00003981 }
3982 return val;
3983}
3984
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003985uint64_t ldq_phys(target_phys_addr_t addr)
3986{
3987 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3988}
3989
3990uint64_t ldq_le_phys(target_phys_addr_t addr)
3991{
3992 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3993}
3994
3995uint64_t ldq_be_phys(target_phys_addr_t addr)
3996{
3997 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3998}
3999
bellardaab33092005-10-30 20:48:42 +00004000/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004001uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004002{
4003 uint8_t val;
4004 cpu_physical_memory_read(addr, &val, 1);
4005 return val;
4006}
4007
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004008/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004009static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4010 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004011{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004012 int io_index;
4013 uint8_t *ptr;
4014 uint64_t val;
4015 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004016 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004017
4018 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004019 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004020
Avi Kivity1d393fa2012-01-01 21:15:42 +02004021 if (!is_ram_rom_romd(pd)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004022 /* I/O case */
4023 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004024 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004025 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004026#if defined(TARGET_WORDS_BIGENDIAN)
4027 if (endian == DEVICE_LITTLE_ENDIAN) {
4028 val = bswap16(val);
4029 }
4030#else
4031 if (endian == DEVICE_BIG_ENDIAN) {
4032 val = bswap16(val);
4033 }
4034#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004035 } else {
4036 /* RAM case */
4037 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4038 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004039 switch (endian) {
4040 case DEVICE_LITTLE_ENDIAN:
4041 val = lduw_le_p(ptr);
4042 break;
4043 case DEVICE_BIG_ENDIAN:
4044 val = lduw_be_p(ptr);
4045 break;
4046 default:
4047 val = lduw_p(ptr);
4048 break;
4049 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004050 }
4051 return val;
bellardaab33092005-10-30 20:48:42 +00004052}
4053
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004054uint32_t lduw_phys(target_phys_addr_t addr)
4055{
4056 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4057}
4058
4059uint32_t lduw_le_phys(target_phys_addr_t addr)
4060{
4061 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4062}
4063
4064uint32_t lduw_be_phys(target_phys_addr_t addr)
4065{
4066 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4067}
4068
bellard8df1cd02005-01-28 22:37:22 +00004069/* warning: addr must be aligned. The ram page is not masked as dirty
4070 and the code inside is not invalidated. It is useful if the dirty
4071 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004072void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004073{
4074 int io_index;
4075 uint8_t *ptr;
4076 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004077 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004078
4079 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004080 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004081
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004082 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
bellard8df1cd02005-01-28 22:37:22 +00004083 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004084 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004085 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004086 } else {
aliguori74576192008-10-06 14:02:03 +00004087 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004088 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004089 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004090
4091 if (unlikely(in_migration)) {
4092 if (!cpu_physical_memory_is_dirty(addr1)) {
4093 /* invalidate code */
4094 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4095 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004096 cpu_physical_memory_set_dirty_flags(
4097 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004098 }
4099 }
bellard8df1cd02005-01-28 22:37:22 +00004100 }
4101}
4102
Anthony Liguoric227f092009-10-01 16:12:16 -05004103void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004104{
4105 int io_index;
4106 uint8_t *ptr;
4107 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004108 PhysPageDesc p;
j_mayerbc98a7e2007-04-04 07:55:12 +00004109
4110 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004111 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004112
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004113 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
j_mayerbc98a7e2007-04-04 07:55:12 +00004114 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004115 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004116#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004117 io_mem_write(io_index, addr, val >> 32, 4);
4118 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004119#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004120 io_mem_write(io_index, addr, (uint32_t)val, 4);
4121 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004122#endif
4123 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004124 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004125 (addr & ~TARGET_PAGE_MASK);
4126 stq_p(ptr, val);
4127 }
4128}
4129
bellard8df1cd02005-01-28 22:37:22 +00004130/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004131static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4132 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004133{
4134 int io_index;
4135 uint8_t *ptr;
4136 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004137 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004138
4139 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004140 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004141
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004142 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
bellard8df1cd02005-01-28 22:37:22 +00004143 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004144 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004145#if defined(TARGET_WORDS_BIGENDIAN)
4146 if (endian == DEVICE_LITTLE_ENDIAN) {
4147 val = bswap32(val);
4148 }
4149#else
4150 if (endian == DEVICE_BIG_ENDIAN) {
4151 val = bswap32(val);
4152 }
4153#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004154 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004155 } else {
4156 unsigned long addr1;
4157 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4158 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004159 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004160 switch (endian) {
4161 case DEVICE_LITTLE_ENDIAN:
4162 stl_le_p(ptr, val);
4163 break;
4164 case DEVICE_BIG_ENDIAN:
4165 stl_be_p(ptr, val);
4166 break;
4167 default:
4168 stl_p(ptr, val);
4169 break;
4170 }
bellard3a7d9292005-08-21 09:26:42 +00004171 if (!cpu_physical_memory_is_dirty(addr1)) {
4172 /* invalidate code */
4173 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4174 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004175 cpu_physical_memory_set_dirty_flags(addr1,
4176 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004177 }
bellard8df1cd02005-01-28 22:37:22 +00004178 }
4179}
4180
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004181void stl_phys(target_phys_addr_t addr, uint32_t val)
4182{
4183 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4184}
4185
4186void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4187{
4188 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4189}
4190
4191void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4192{
4193 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4194}
4195
bellardaab33092005-10-30 20:48:42 +00004196/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004197void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004198{
4199 uint8_t v = val;
4200 cpu_physical_memory_write(addr, &v, 1);
4201}
4202
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004203/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004204static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4205 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004206{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004207 int io_index;
4208 uint8_t *ptr;
4209 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004210 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004211
4212 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004213 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004214
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004215 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004216 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004217 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004218#if defined(TARGET_WORDS_BIGENDIAN)
4219 if (endian == DEVICE_LITTLE_ENDIAN) {
4220 val = bswap16(val);
4221 }
4222#else
4223 if (endian == DEVICE_BIG_ENDIAN) {
4224 val = bswap16(val);
4225 }
4226#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004227 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004228 } else {
4229 unsigned long addr1;
4230 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4231 /* RAM case */
4232 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004233 switch (endian) {
4234 case DEVICE_LITTLE_ENDIAN:
4235 stw_le_p(ptr, val);
4236 break;
4237 case DEVICE_BIG_ENDIAN:
4238 stw_be_p(ptr, val);
4239 break;
4240 default:
4241 stw_p(ptr, val);
4242 break;
4243 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004244 if (!cpu_physical_memory_is_dirty(addr1)) {
4245 /* invalidate code */
4246 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4247 /* set dirty bit */
4248 cpu_physical_memory_set_dirty_flags(addr1,
4249 (0xff & ~CODE_DIRTY_FLAG));
4250 }
4251 }
bellardaab33092005-10-30 20:48:42 +00004252}
4253
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004254void stw_phys(target_phys_addr_t addr, uint32_t val)
4255{
4256 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4257}
4258
4259void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4260{
4261 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4262}
4263
4264void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4265{
4266 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4267}
4268
bellardaab33092005-10-30 20:48:42 +00004269/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004270void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004271{
4272 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004273 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004274}
4275
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004276void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4277{
4278 val = cpu_to_le64(val);
4279 cpu_physical_memory_write(addr, &val, 8);
4280}
4281
4282void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4283{
4284 val = cpu_to_be64(val);
4285 cpu_physical_memory_write(addr, &val, 8);
4286}
4287
aliguori5e2972f2009-03-28 17:51:36 +00004288/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004289int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004290 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004291{
4292 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004293 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004294 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004295
4296 while (len > 0) {
4297 page = addr & TARGET_PAGE_MASK;
4298 phys_addr = cpu_get_phys_page_debug(env, page);
4299 /* if no physical page mapped, return an error */
4300 if (phys_addr == -1)
4301 return -1;
4302 l = (page + TARGET_PAGE_SIZE) - addr;
4303 if (l > len)
4304 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004305 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004306 if (is_write)
4307 cpu_physical_memory_write_rom(phys_addr, buf, l);
4308 else
aliguori5e2972f2009-03-28 17:51:36 +00004309 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004310 len -= l;
4311 buf += l;
4312 addr += l;
4313 }
4314 return 0;
4315}
Paul Brooka68fe892010-03-01 00:08:59 +00004316#endif
bellard13eb76e2004-01-24 15:23:36 +00004317
pbrook2e70f6e2008-06-29 01:03:05 +00004318/* in deterministic execution mode, instructions doing device I/Os
4319 must be at the end of the TB */
4320void cpu_io_recompile(CPUState *env, void *retaddr)
4321{
4322 TranslationBlock *tb;
4323 uint32_t n, cflags;
4324 target_ulong pc, cs_base;
4325 uint64_t flags;
4326
4327 tb = tb_find_pc((unsigned long)retaddr);
4328 if (!tb) {
4329 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4330 retaddr);
4331 }
4332 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004333 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004334 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004335 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004336 n = n - env->icount_decr.u16.low;
4337 /* Generate a new TB ending on the I/O insn. */
4338 n++;
4339 /* On MIPS and SH, delay slot instructions can only be restarted if
4340 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004341 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004342 branch. */
4343#if defined(TARGET_MIPS)
4344 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4345 env->active_tc.PC -= 4;
4346 env->icount_decr.u16.low++;
4347 env->hflags &= ~MIPS_HFLAG_BMASK;
4348 }
4349#elif defined(TARGET_SH4)
4350 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4351 && n > 1) {
4352 env->pc -= 2;
4353 env->icount_decr.u16.low++;
4354 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4355 }
4356#endif
4357 /* This should never happen. */
4358 if (n > CF_COUNT_MASK)
4359 cpu_abort(env, "TB too big during recompile");
4360
4361 cflags = n | CF_LAST_IO;
4362 pc = tb->pc;
4363 cs_base = tb->cs_base;
4364 flags = tb->flags;
4365 tb_phys_invalidate(tb, -1);
4366 /* FIXME: In theory this could raise an exception. In practice
4367 we have already translated the block once so it's probably ok. */
4368 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004369 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004370 the first in the TB) then we end up generating a whole new TB and
4371 repeating the fault, which is horribly inefficient.
4372 Better would be to execute just this insn uncached, or generate a
4373 second new TB. */
4374 cpu_resume_from_signal(env, NULL);
4375}
4376
Paul Brookb3755a92010-03-12 16:54:58 +00004377#if !defined(CONFIG_USER_ONLY)
4378
Stefan Weil055403b2010-10-22 23:03:32 +02004379void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004380{
4381 int i, target_code_size, max_target_code_size;
4382 int direct_jmp_count, direct_jmp2_count, cross_page;
4383 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004384
bellarde3db7222005-01-26 22:00:47 +00004385 target_code_size = 0;
4386 max_target_code_size = 0;
4387 cross_page = 0;
4388 direct_jmp_count = 0;
4389 direct_jmp2_count = 0;
4390 for(i = 0; i < nb_tbs; i++) {
4391 tb = &tbs[i];
4392 target_code_size += tb->size;
4393 if (tb->size > max_target_code_size)
4394 max_target_code_size = tb->size;
4395 if (tb->page_addr[1] != -1)
4396 cross_page++;
4397 if (tb->tb_next_offset[0] != 0xffff) {
4398 direct_jmp_count++;
4399 if (tb->tb_next_offset[1] != 0xffff) {
4400 direct_jmp2_count++;
4401 }
4402 }
4403 }
4404 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004405 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004406 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004407 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4408 cpu_fprintf(f, "TB count %d/%d\n",
4409 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004410 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004411 nb_tbs ? target_code_size / nb_tbs : 0,
4412 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004413 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004414 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4415 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004416 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4417 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004418 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4419 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004420 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004421 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4422 direct_jmp2_count,
4423 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004424 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004425 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4426 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4427 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004428 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004429}
4430
Avi Kivityd39e8222012-01-01 23:35:10 +02004431/* NOTE: this function can trigger an exception */
4432/* NOTE2: the returned address is not exactly the physical address: it
4433 is the offset relative to phys_ram_base */
4434tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4435{
4436 int mmu_idx, page_index, pd;
4437 void *p;
4438
4439 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4440 mmu_idx = cpu_mmu_index(env1);
4441 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4442 (addr & TARGET_PAGE_MASK))) {
4443 ldub_code(addr);
4444 }
4445 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004446 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
4447 && !(pd & IO_MEM_ROMD)) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004448#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4449 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4450#else
4451 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4452#endif
4453 }
4454 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4455 return qemu_ram_addr_from_host_nofail(p);
4456}
4457
bellard61382a52003-10-27 21:22:23 +00004458#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004459#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004460#define GETPC() NULL
4461#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004462#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004463
4464#define SHIFT 0
4465#include "softmmu_template.h"
4466
4467#define SHIFT 1
4468#include "softmmu_template.h"
4469
4470#define SHIFT 2
4471#include "softmmu_template.h"
4472
4473#define SHIFT 3
4474#include "softmmu_template.h"
4475
4476#undef env
4477
4478#endif