blob: 36b61c91ac38a8279e6b012bc181fb3c266fe320 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
pbrooke2eef172008-06-08 01:09:01 +0000121#endif
bellard9fa3e852004-01-04 18:06:42 +0000122
bellard6a00d602005-11-21 23:25:50 +0000123CPUState *first_cpu;
124/* current CPU in the current thread. It is only valid inside
125 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100126DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000127/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000128 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000129 2 = Adaptive rate instruction counting. */
130int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000131
bellard54936002003-05-13 00:25:15 +0000132typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000133 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000134 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count;
138 uint8_t *code_bitmap;
139#if defined(CONFIG_USER_ONLY)
140 unsigned long flags;
141#endif
bellard54936002003-05-13 00:25:15 +0000142} PageDesc;
143
Paul Brook41c1b1c2010-03-12 16:54:58 +0000144/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800145 while in user mode we want it to be based on virtual addresses. */
146#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
149#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800150# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000151#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000154#endif
bellard54936002003-05-13 00:25:15 +0000155
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156/* Size of the L2 (and L3, etc) page tables. */
157#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000158#define L2_SIZE (1 << L2_BITS)
159
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800160/* The bits remaining after N lower levels of page tables. */
161#define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163#define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
165
166/* Size of the L1 page table. Avoid silly small sizes. */
167#if P_L1_BITS_REM < 4
168#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
169#else
170#define P_L1_BITS P_L1_BITS_REM
171#endif
172
173#if V_L1_BITS_REM < 4
174#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
175#else
176#define V_L1_BITS V_L1_BITS_REM
177#endif
178
179#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
181
182#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
184
bellard83fb7ad2004-07-05 21:25:26 +0000185unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000186unsigned long qemu_host_page_size;
187unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000188
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800189/* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000192
pbrooke2eef172008-06-08 01:09:01 +0000193#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000194typedef struct PhysPageDesc {
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset;
197 ram_addr_t region_offset;
198} PhysPageDesc;
199
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800200/* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000203
pbrooke2eef172008-06-08 01:09:01 +0000204static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300205static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000206
bellard33417e72003-08-10 21:47:01 +0000207/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000208CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
209CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000210void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000211static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000212static int io_mem_watch;
213#endif
bellard33417e72003-08-10 21:47:01 +0000214
bellard34865132003-10-05 14:28:56 +0000215/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200216#ifdef WIN32
217static const char *logfilename = "qemu.log";
218#else
blueswir1d9b630f2008-10-05 09:57:08 +0000219static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200220#endif
bellard34865132003-10-05 14:28:56 +0000221FILE *logfile;
222int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000223static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000224
bellarde3db7222005-01-26 22:00:47 +0000225/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000226#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000227static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000228#endif
bellarde3db7222005-01-26 22:00:47 +0000229static int tb_flush_count;
230static int tb_phys_invalidate_count;
231
bellard7cb69ca2008-05-10 10:55:51 +0000232#ifdef _WIN32
233static void map_exec(void *addr, long size)
234{
235 DWORD old_protect;
236 VirtualProtect(addr, size,
237 PAGE_EXECUTE_READWRITE, &old_protect);
238
239}
240#else
241static void map_exec(void *addr, long size)
242{
bellard43694152008-05-29 09:35:57 +0000243 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000244
bellard43694152008-05-29 09:35:57 +0000245 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000246 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000247 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000248
249 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000250 end += page_size - 1;
251 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000252
253 mprotect((void *)start, end - start,
254 PROT_READ | PROT_WRITE | PROT_EXEC);
255}
256#endif
257
bellardb346ff42003-06-15 20:05:50 +0000258static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000259{
bellard83fb7ad2004-07-05 21:25:26 +0000260 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000261 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000262#ifdef _WIN32
263 {
264 SYSTEM_INFO system_info;
265
266 GetSystemInfo(&system_info);
267 qemu_real_host_page_size = system_info.dwPageSize;
268 }
269#else
270 qemu_real_host_page_size = getpagesize();
271#endif
bellard83fb7ad2004-07-05 21:25:26 +0000272 if (qemu_host_page_size == 0)
273 qemu_host_page_size = qemu_real_host_page_size;
274 if (qemu_host_page_size < TARGET_PAGE_SIZE)
275 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000276 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000277
Paul Brook2e9a5712010-05-05 16:32:59 +0100278#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000279 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100280#ifdef HAVE_KINFO_GETVMMAP
281 struct kinfo_vmentry *freep;
282 int i, cnt;
283
284 freep = kinfo_getvmmap(getpid(), &cnt);
285 if (freep) {
286 mmap_lock();
287 for (i = 0; i < cnt; i++) {
288 unsigned long startaddr, endaddr;
289
290 startaddr = freep[i].kve_start;
291 endaddr = freep[i].kve_end;
292 if (h2g_valid(startaddr)) {
293 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
294
295 if (h2g_valid(endaddr)) {
296 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200297 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100298 } else {
299#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
300 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200301 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100302#endif
303 }
304 }
305 }
306 free(freep);
307 mmap_unlock();
308 }
309#else
balrog50a95692007-12-12 01:16:23 +0000310 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000311
pbrook07765902008-05-31 16:33:53 +0000312 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800313
Aurelien Jarnofd436902010-04-10 17:20:36 +0200314 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000315 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800316 mmap_lock();
317
balrog50a95692007-12-12 01:16:23 +0000318 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800319 unsigned long startaddr, endaddr;
320 int n;
321
322 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
323
324 if (n == 2 && h2g_valid(startaddr)) {
325 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
326
327 if (h2g_valid(endaddr)) {
328 endaddr = h2g(endaddr);
329 } else {
330 endaddr = ~0ul;
331 }
332 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000333 }
334 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800335
balrog50a95692007-12-12 01:16:23 +0000336 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800337 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000338 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100339#endif
balrog50a95692007-12-12 01:16:23 +0000340 }
341#endif
bellard54936002003-05-13 00:25:15 +0000342}
343
Paul Brook41c1b1c2010-03-12 16:54:58 +0000344static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000345{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000346 PageDesc *pd;
347 void **lp;
348 int i;
349
pbrook17e23772008-06-09 13:47:45 +0000350#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500351 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800352# define ALLOC(P, SIZE) \
353 do { \
354 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
355 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800356 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000357#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800358# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500359 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000360#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800361
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362 /* Level 1. Always allocated. */
363 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
364
365 /* Level 2..N-1. */
366 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
367 void **p = *lp;
368
369 if (p == NULL) {
370 if (!alloc) {
371 return NULL;
372 }
373 ALLOC(p, sizeof(void *) * L2_SIZE);
374 *lp = p;
375 }
376
377 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000378 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800379
380 pd = *lp;
381 if (pd == NULL) {
382 if (!alloc) {
383 return NULL;
384 }
385 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
386 *lp = pd;
387 }
388
389#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800390
391 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000392}
393
Paul Brook41c1b1c2010-03-12 16:54:58 +0000394static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000395{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800396 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000397}
398
Paul Brook6d9a1302010-02-28 23:55:53 +0000399#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500400static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000401{
pbrooke3f4e2a2006-04-08 20:02:06 +0000402 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800403 void **lp;
404 int i;
bellard92e873b2004-05-21 14:52:29 +0000405
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800406 /* Level 1. Always allocated. */
407 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000408
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800409 /* Level 2..N-1. */
410 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
411 void **p = *lp;
412 if (p == NULL) {
413 if (!alloc) {
414 return NULL;
415 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500416 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800417 }
418 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000419 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800420
pbrooke3f4e2a2006-04-08 20:02:06 +0000421 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800422 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000423 int i;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200424 int first_index = index & ~(L2_SIZE - 1);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800425
426 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000427 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800428 }
429
Anthony Liguori7267c092011-08-20 22:09:37 -0500430 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800431
pbrook67c4d232009-02-23 13:16:07 +0000432 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800433 pd[i].phys_offset = IO_MEM_UNASSIGNED;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200434 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000435 }
bellard92e873b2004-05-21 14:52:29 +0000436 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800437
438 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000439}
440
Anthony Liguoric227f092009-10-01 16:12:16 -0500441static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000442{
bellard108c49b2005-07-24 12:55:09 +0000443 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000444}
445
Anthony Liguoric227f092009-10-01 16:12:16 -0500446static void tlb_protect_code(ram_addr_t ram_addr);
447static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000448 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000449#define mmap_lock() do { } while(0)
450#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000451#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000452
bellard43694152008-05-29 09:35:57 +0000453#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
454
455#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100456/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000457 user mode. It will change when a dedicated libc will be used */
458#define USE_STATIC_CODE_GEN_BUFFER
459#endif
460
461#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200462static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
463 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000464#endif
465
blueswir18fcd3692008-08-17 20:26:25 +0000466static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000467{
bellard43694152008-05-29 09:35:57 +0000468#ifdef USE_STATIC_CODE_GEN_BUFFER
469 code_gen_buffer = static_code_gen_buffer;
470 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
471 map_exec(code_gen_buffer, code_gen_buffer_size);
472#else
bellard26a5f132008-05-28 12:30:31 +0000473 code_gen_buffer_size = tb_size;
474 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000475#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000476 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
477#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100478 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000479 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000480#endif
bellard26a5f132008-05-28 12:30:31 +0000481 }
482 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
483 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
484 /* The code gen buffer location may have constraints depending on
485 the host cpu and OS */
486#if defined(__linux__)
487 {
488 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000489 void *start = NULL;
490
bellard26a5f132008-05-28 12:30:31 +0000491 flags = MAP_PRIVATE | MAP_ANONYMOUS;
492#if defined(__x86_64__)
493 flags |= MAP_32BIT;
494 /* Cannot map more than that */
495 if (code_gen_buffer_size > (800 * 1024 * 1024))
496 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000497#elif defined(__sparc_v9__)
498 // Map the buffer below 2G, so we can use direct calls and branches
499 flags |= MAP_FIXED;
500 start = (void *) 0x60000000UL;
501 if (code_gen_buffer_size > (512 * 1024 * 1024))
502 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000503#elif defined(__arm__)
Dr. David Alan Gilbert222f23f2011-12-12 16:37:31 +0100504 /* Keep the buffer no bigger than 16GB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000505 if (code_gen_buffer_size > 16 * 1024 * 1024)
506 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700507#elif defined(__s390x__)
508 /* Map the buffer so that we can use direct calls and branches. */
509 /* We have a +- 4GB range on the branches; leave some slop. */
510 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
511 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
512 }
513 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000514#endif
blueswir1141ac462008-07-26 15:05:57 +0000515 code_gen_buffer = mmap(start, code_gen_buffer_size,
516 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000517 flags, -1, 0);
518 if (code_gen_buffer == MAP_FAILED) {
519 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
520 exit(1);
521 }
522 }
Bradcbb608a2010-12-20 21:25:40 -0500523#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000524 || defined(__DragonFly__) || defined(__OpenBSD__) \
525 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000526 {
527 int flags;
528 void *addr = NULL;
529 flags = MAP_PRIVATE | MAP_ANONYMOUS;
530#if defined(__x86_64__)
531 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
532 * 0x40000000 is free */
533 flags |= MAP_FIXED;
534 addr = (void *)0x40000000;
535 /* Cannot map more than that */
536 if (code_gen_buffer_size > (800 * 1024 * 1024))
537 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000538#elif defined(__sparc_v9__)
539 // Map the buffer below 2G, so we can use direct calls and branches
540 flags |= MAP_FIXED;
541 addr = (void *) 0x60000000UL;
542 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
543 code_gen_buffer_size = (512 * 1024 * 1024);
544 }
aliguori06e67a82008-09-27 15:32:41 +0000545#endif
546 code_gen_buffer = mmap(addr, code_gen_buffer_size,
547 PROT_WRITE | PROT_READ | PROT_EXEC,
548 flags, -1, 0);
549 if (code_gen_buffer == MAP_FAILED) {
550 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
551 exit(1);
552 }
553 }
bellard26a5f132008-05-28 12:30:31 +0000554#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500555 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000556 map_exec(code_gen_buffer, code_gen_buffer_size);
557#endif
bellard43694152008-05-29 09:35:57 +0000558#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000559 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100560 code_gen_buffer_max_size = code_gen_buffer_size -
561 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000562 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500563 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000564}
565
566/* Must be called before using the QEMU cpus. 'tb_size' is the size
567 (in bytes) allocated to the translation buffer. Zero means default
568 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200569void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000570{
bellard26a5f132008-05-28 12:30:31 +0000571 cpu_gen_init();
572 code_gen_alloc(tb_size);
573 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000574 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700575#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
576 /* There's no guest base to take into account, so go ahead and
577 initialize the prologue now. */
578 tcg_prologue_init(&tcg_ctx);
579#endif
bellard26a5f132008-05-28 12:30:31 +0000580}
581
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200582bool tcg_enabled(void)
583{
584 return code_gen_buffer != NULL;
585}
586
587void cpu_exec_init_all(void)
588{
589#if !defined(CONFIG_USER_ONLY)
590 memory_map_init();
591 io_mem_init();
592#endif
593}
594
pbrook9656f322008-07-01 20:01:19 +0000595#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
596
Juan Quintelae59fb372009-09-29 22:48:21 +0200597static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200598{
599 CPUState *env = opaque;
600
aurel323098dba2009-03-07 21:28:24 +0000601 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
602 version_id is increased. */
603 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000604 tlb_flush(env, 1);
605
606 return 0;
607}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200608
609static const VMStateDescription vmstate_cpu_common = {
610 .name = "cpu_common",
611 .version_id = 1,
612 .minimum_version_id = 1,
613 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200614 .post_load = cpu_common_post_load,
615 .fields = (VMStateField []) {
616 VMSTATE_UINT32(halted, CPUState),
617 VMSTATE_UINT32(interrupt_request, CPUState),
618 VMSTATE_END_OF_LIST()
619 }
620};
pbrook9656f322008-07-01 20:01:19 +0000621#endif
622
Glauber Costa950f1472009-06-09 12:15:18 -0400623CPUState *qemu_get_cpu(int cpu)
624{
625 CPUState *env = first_cpu;
626
627 while (env) {
628 if (env->cpu_index == cpu)
629 break;
630 env = env->next_cpu;
631 }
632
633 return env;
634}
635
bellard6a00d602005-11-21 23:25:50 +0000636void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000637{
bellard6a00d602005-11-21 23:25:50 +0000638 CPUState **penv;
639 int cpu_index;
640
pbrookc2764712009-03-07 15:24:59 +0000641#if defined(CONFIG_USER_ONLY)
642 cpu_list_lock();
643#endif
bellard6a00d602005-11-21 23:25:50 +0000644 env->next_cpu = NULL;
645 penv = &first_cpu;
646 cpu_index = 0;
647 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700648 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000649 cpu_index++;
650 }
651 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000652 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000653 QTAILQ_INIT(&env->breakpoints);
654 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100655#ifndef CONFIG_USER_ONLY
656 env->thread_id = qemu_get_thread_id();
657#endif
bellard6a00d602005-11-21 23:25:50 +0000658 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000659#if defined(CONFIG_USER_ONLY)
660 cpu_list_unlock();
661#endif
pbrookb3c77242008-06-30 16:31:04 +0000662#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600663 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
664 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000665 cpu_save, cpu_load, env);
666#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000667}
668
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100669/* Allocate a new translation block. Flush the translation buffer if
670 too many translation blocks or too much generated code. */
671static TranslationBlock *tb_alloc(target_ulong pc)
672{
673 TranslationBlock *tb;
674
675 if (nb_tbs >= code_gen_max_blocks ||
676 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
677 return NULL;
678 tb = &tbs[nb_tbs++];
679 tb->pc = pc;
680 tb->cflags = 0;
681 return tb;
682}
683
684void tb_free(TranslationBlock *tb)
685{
686 /* In practice this is mostly used for single use temporary TB
687 Ignore the hard cases and just back up if this TB happens to
688 be the last one generated. */
689 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
690 code_gen_ptr = tb->tc_ptr;
691 nb_tbs--;
692 }
693}
694
bellard9fa3e852004-01-04 18:06:42 +0000695static inline void invalidate_page_bitmap(PageDesc *p)
696{
697 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500698 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000699 p->code_bitmap = NULL;
700 }
701 p->code_write_count = 0;
702}
703
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800704/* Set to NULL all the 'first_tb' fields in all PageDescs. */
705
706static void page_flush_tb_1 (int level, void **lp)
707{
708 int i;
709
710 if (*lp == NULL) {
711 return;
712 }
713 if (level == 0) {
714 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000715 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800716 pd[i].first_tb = NULL;
717 invalidate_page_bitmap(pd + i);
718 }
719 } else {
720 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000721 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800722 page_flush_tb_1 (level - 1, pp + i);
723 }
724 }
725}
726
bellardfd6ce8f2003-05-14 19:00:11 +0000727static void page_flush_tb(void)
728{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800729 int i;
730 for (i = 0; i < V_L1_SIZE; i++) {
731 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000732 }
733}
734
735/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000736/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000737void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000738{
bellard6a00d602005-11-21 23:25:50 +0000739 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000740#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000741 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
742 (unsigned long)(code_gen_ptr - code_gen_buffer),
743 nb_tbs, nb_tbs > 0 ?
744 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000745#endif
bellard26a5f132008-05-28 12:30:31 +0000746 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000747 cpu_abort(env1, "Internal error: code buffer overflow\n");
748
bellardfd6ce8f2003-05-14 19:00:11 +0000749 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000750
bellard6a00d602005-11-21 23:25:50 +0000751 for(env = first_cpu; env != NULL; env = env->next_cpu) {
752 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
753 }
bellard9fa3e852004-01-04 18:06:42 +0000754
bellard8a8a6082004-10-03 13:36:49 +0000755 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000756 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000757
bellardfd6ce8f2003-05-14 19:00:11 +0000758 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000759 /* XXX: flush processor icache at this point if cache flush is
760 expensive */
bellarde3db7222005-01-26 22:00:47 +0000761 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000762}
763
764#ifdef DEBUG_TB_CHECK
765
j_mayerbc98a7e2007-04-04 07:55:12 +0000766static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000767{
768 TranslationBlock *tb;
769 int i;
770 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000771 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
772 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000773 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
774 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000775 printf("ERROR invalidate: address=" TARGET_FMT_lx
776 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000777 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000778 }
779 }
780 }
781}
782
783/* verify that all the pages have correct rights for code */
784static void tb_page_check(void)
785{
786 TranslationBlock *tb;
787 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000788
pbrook99773bd2006-04-16 15:14:59 +0000789 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
790 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000791 flags1 = page_get_flags(tb->pc);
792 flags2 = page_get_flags(tb->pc + tb->size - 1);
793 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
794 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000795 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000796 }
797 }
798 }
799}
800
801#endif
802
803/* invalidate one TB */
804static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
805 int next_offset)
806{
807 TranslationBlock *tb1;
808 for(;;) {
809 tb1 = *ptb;
810 if (tb1 == tb) {
811 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
812 break;
813 }
814 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
815 }
816}
817
bellard9fa3e852004-01-04 18:06:42 +0000818static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
819{
820 TranslationBlock *tb1;
821 unsigned int n1;
822
823 for(;;) {
824 tb1 = *ptb;
825 n1 = (long)tb1 & 3;
826 tb1 = (TranslationBlock *)((long)tb1 & ~3);
827 if (tb1 == tb) {
828 *ptb = tb1->page_next[n1];
829 break;
830 }
831 ptb = &tb1->page_next[n1];
832 }
833}
834
bellardd4e81642003-05-25 16:46:15 +0000835static inline void tb_jmp_remove(TranslationBlock *tb, int n)
836{
837 TranslationBlock *tb1, **ptb;
838 unsigned int n1;
839
840 ptb = &tb->jmp_next[n];
841 tb1 = *ptb;
842 if (tb1) {
843 /* find tb(n) in circular list */
844 for(;;) {
845 tb1 = *ptb;
846 n1 = (long)tb1 & 3;
847 tb1 = (TranslationBlock *)((long)tb1 & ~3);
848 if (n1 == n && tb1 == tb)
849 break;
850 if (n1 == 2) {
851 ptb = &tb1->jmp_first;
852 } else {
853 ptb = &tb1->jmp_next[n1];
854 }
855 }
856 /* now we can suppress tb(n) from the list */
857 *ptb = tb->jmp_next[n];
858
859 tb->jmp_next[n] = NULL;
860 }
861}
862
863/* reset the jump entry 'n' of a TB so that it is not chained to
864 another TB */
865static inline void tb_reset_jump(TranslationBlock *tb, int n)
866{
867 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
868}
869
Paul Brook41c1b1c2010-03-12 16:54:58 +0000870void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000871{
bellard6a00d602005-11-21 23:25:50 +0000872 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000873 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000874 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000875 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000876 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000877
bellard9fa3e852004-01-04 18:06:42 +0000878 /* remove the TB from the hash list */
879 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
880 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000881 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000882 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000883
bellard9fa3e852004-01-04 18:06:42 +0000884 /* remove the TB from the page list */
885 if (tb->page_addr[0] != page_addr) {
886 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
887 tb_page_remove(&p->first_tb, tb);
888 invalidate_page_bitmap(p);
889 }
890 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
891 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
892 tb_page_remove(&p->first_tb, tb);
893 invalidate_page_bitmap(p);
894 }
895
bellard8a40a182005-11-20 10:35:40 +0000896 tb_invalidated_flag = 1;
897
898 /* remove the TB from the hash list */
899 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000900 for(env = first_cpu; env != NULL; env = env->next_cpu) {
901 if (env->tb_jmp_cache[h] == tb)
902 env->tb_jmp_cache[h] = NULL;
903 }
bellard8a40a182005-11-20 10:35:40 +0000904
905 /* suppress this TB from the two jump lists */
906 tb_jmp_remove(tb, 0);
907 tb_jmp_remove(tb, 1);
908
909 /* suppress any remaining jumps to this TB */
910 tb1 = tb->jmp_first;
911 for(;;) {
912 n1 = (long)tb1 & 3;
913 if (n1 == 2)
914 break;
915 tb1 = (TranslationBlock *)((long)tb1 & ~3);
916 tb2 = tb1->jmp_next[n1];
917 tb_reset_jump(tb1, n1);
918 tb1->jmp_next[n1] = NULL;
919 tb1 = tb2;
920 }
921 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
922
bellarde3db7222005-01-26 22:00:47 +0000923 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000924}
925
926static inline void set_bits(uint8_t *tab, int start, int len)
927{
928 int end, mask, end1;
929
930 end = start + len;
931 tab += start >> 3;
932 mask = 0xff << (start & 7);
933 if ((start & ~7) == (end & ~7)) {
934 if (start < end) {
935 mask &= ~(0xff << (end & 7));
936 *tab |= mask;
937 }
938 } else {
939 *tab++ |= mask;
940 start = (start + 8) & ~7;
941 end1 = end & ~7;
942 while (start < end1) {
943 *tab++ = 0xff;
944 start += 8;
945 }
946 if (start < end) {
947 mask = ~(0xff << (end & 7));
948 *tab |= mask;
949 }
950 }
951}
952
953static void build_page_bitmap(PageDesc *p)
954{
955 int n, tb_start, tb_end;
956 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000957
Anthony Liguori7267c092011-08-20 22:09:37 -0500958 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000959
960 tb = p->first_tb;
961 while (tb != NULL) {
962 n = (long)tb & 3;
963 tb = (TranslationBlock *)((long)tb & ~3);
964 /* NOTE: this is subtle as a TB may span two physical pages */
965 if (n == 0) {
966 /* NOTE: tb_end may be after the end of the page, but
967 it is not a problem */
968 tb_start = tb->pc & ~TARGET_PAGE_MASK;
969 tb_end = tb_start + tb->size;
970 if (tb_end > TARGET_PAGE_SIZE)
971 tb_end = TARGET_PAGE_SIZE;
972 } else {
973 tb_start = 0;
974 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
975 }
976 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
977 tb = tb->page_next[n];
978 }
979}
980
pbrook2e70f6e2008-06-29 01:03:05 +0000981TranslationBlock *tb_gen_code(CPUState *env,
982 target_ulong pc, target_ulong cs_base,
983 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000984{
985 TranslationBlock *tb;
986 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000987 tb_page_addr_t phys_pc, phys_page2;
988 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000989 int code_gen_size;
990
Paul Brook41c1b1c2010-03-12 16:54:58 +0000991 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000992 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000993 if (!tb) {
994 /* flush must be done */
995 tb_flush(env);
996 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000997 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000998 /* Don't forget to invalidate previous TB info. */
999 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001000 }
1001 tc_ptr = code_gen_ptr;
1002 tb->tc_ptr = tc_ptr;
1003 tb->cs_base = cs_base;
1004 tb->flags = flags;
1005 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001006 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001007 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001008
bellardd720b932004-04-25 17:57:43 +00001009 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001010 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001011 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001012 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001013 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001014 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001015 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001016 return tb;
bellardd720b932004-04-25 17:57:43 +00001017}
ths3b46e622007-09-17 08:09:54 +00001018
bellard9fa3e852004-01-04 18:06:42 +00001019/* invalidate all TBs which intersect with the target physical page
1020 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001021 the same physical page. 'is_cpu_write_access' should be true if called
1022 from a real cpu write access: the virtual CPU will exit the current
1023 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001024void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001025 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001026{
aliguori6b917542008-11-18 19:46:41 +00001027 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001028 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001029 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001030 PageDesc *p;
1031 int n;
1032#ifdef TARGET_HAS_PRECISE_SMC
1033 int current_tb_not_found = is_cpu_write_access;
1034 TranslationBlock *current_tb = NULL;
1035 int current_tb_modified = 0;
1036 target_ulong current_pc = 0;
1037 target_ulong current_cs_base = 0;
1038 int current_flags = 0;
1039#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001040
1041 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001042 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001043 return;
ths5fafdf22007-09-16 21:08:06 +00001044 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001045 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1046 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001047 /* build code bitmap */
1048 build_page_bitmap(p);
1049 }
1050
1051 /* we remove all the TBs in the range [start, end[ */
1052 /* XXX: see if in some cases it could be faster to invalidate all the code */
1053 tb = p->first_tb;
1054 while (tb != NULL) {
1055 n = (long)tb & 3;
1056 tb = (TranslationBlock *)((long)tb & ~3);
1057 tb_next = tb->page_next[n];
1058 /* NOTE: this is subtle as a TB may span two physical pages */
1059 if (n == 0) {
1060 /* NOTE: tb_end may be after the end of the page, but
1061 it is not a problem */
1062 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1063 tb_end = tb_start + tb->size;
1064 } else {
1065 tb_start = tb->page_addr[1];
1066 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1067 }
1068 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001069#ifdef TARGET_HAS_PRECISE_SMC
1070 if (current_tb_not_found) {
1071 current_tb_not_found = 0;
1072 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001073 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001074 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001075 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001076 }
1077 }
1078 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001079 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001080 /* If we are modifying the current TB, we must stop
1081 its execution. We could be more precise by checking
1082 that the modification is after the current PC, but it
1083 would require a specialized function to partially
1084 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001085
bellardd720b932004-04-25 17:57:43 +00001086 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001087 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001088 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1089 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001090 }
1091#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001092 /* we need to do that to handle the case where a signal
1093 occurs while doing tb_phys_invalidate() */
1094 saved_tb = NULL;
1095 if (env) {
1096 saved_tb = env->current_tb;
1097 env->current_tb = NULL;
1098 }
bellard9fa3e852004-01-04 18:06:42 +00001099 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001100 if (env) {
1101 env->current_tb = saved_tb;
1102 if (env->interrupt_request && env->current_tb)
1103 cpu_interrupt(env, env->interrupt_request);
1104 }
bellard9fa3e852004-01-04 18:06:42 +00001105 }
1106 tb = tb_next;
1107 }
1108#if !defined(CONFIG_USER_ONLY)
1109 /* if no code remaining, no need to continue to use slow writes */
1110 if (!p->first_tb) {
1111 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001112 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001113 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001114 }
1115 }
1116#endif
1117#ifdef TARGET_HAS_PRECISE_SMC
1118 if (current_tb_modified) {
1119 /* we generate a block containing just the instruction
1120 modifying the memory. It will ensure that it cannot modify
1121 itself */
bellardea1c1802004-06-14 18:56:36 +00001122 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001123 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001124 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001125 }
1126#endif
1127}
1128
1129/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001130static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001131{
1132 PageDesc *p;
1133 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001134#if 0
bellarda4193c82004-06-03 14:01:43 +00001135 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001136 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1137 cpu_single_env->mem_io_vaddr, len,
1138 cpu_single_env->eip,
1139 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001140 }
1141#endif
bellard9fa3e852004-01-04 18:06:42 +00001142 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001143 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001144 return;
1145 if (p->code_bitmap) {
1146 offset = start & ~TARGET_PAGE_MASK;
1147 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1148 if (b & ((1 << len) - 1))
1149 goto do_invalidate;
1150 } else {
1151 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001152 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001153 }
1154}
1155
bellard9fa3e852004-01-04 18:06:42 +00001156#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001157static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001158 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001159{
aliguori6b917542008-11-18 19:46:41 +00001160 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001161 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001162 int n;
bellardd720b932004-04-25 17:57:43 +00001163#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001164 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001165 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001166 int current_tb_modified = 0;
1167 target_ulong current_pc = 0;
1168 target_ulong current_cs_base = 0;
1169 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001170#endif
bellard9fa3e852004-01-04 18:06:42 +00001171
1172 addr &= TARGET_PAGE_MASK;
1173 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001174 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001175 return;
1176 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001177#ifdef TARGET_HAS_PRECISE_SMC
1178 if (tb && pc != 0) {
1179 current_tb = tb_find_pc(pc);
1180 }
1181#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001182 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001183 n = (long)tb & 3;
1184 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001185#ifdef TARGET_HAS_PRECISE_SMC
1186 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001187 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001188 /* If we are modifying the current TB, we must stop
1189 its execution. We could be more precise by checking
1190 that the modification is after the current PC, but it
1191 would require a specialized function to partially
1192 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001193
bellardd720b932004-04-25 17:57:43 +00001194 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001195 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001196 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1197 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001198 }
1199#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001200 tb_phys_invalidate(tb, addr);
1201 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001202 }
1203 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001204#ifdef TARGET_HAS_PRECISE_SMC
1205 if (current_tb_modified) {
1206 /* we generate a block containing just the instruction
1207 modifying the memory. It will ensure that it cannot modify
1208 itself */
bellardea1c1802004-06-14 18:56:36 +00001209 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001210 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001211 cpu_resume_from_signal(env, puc);
1212 }
1213#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001214}
bellard9fa3e852004-01-04 18:06:42 +00001215#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001216
1217/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001218static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001219 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001220{
1221 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001222#ifndef CONFIG_USER_ONLY
1223 bool page_already_protected;
1224#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001225
bellard9fa3e852004-01-04 18:06:42 +00001226 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001227 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001228 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001229#ifndef CONFIG_USER_ONLY
1230 page_already_protected = p->first_tb != NULL;
1231#endif
bellard9fa3e852004-01-04 18:06:42 +00001232 p->first_tb = (TranslationBlock *)((long)tb | n);
1233 invalidate_page_bitmap(p);
1234
bellard107db442004-06-22 18:48:46 +00001235#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001236
bellard9fa3e852004-01-04 18:06:42 +00001237#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001238 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001239 target_ulong addr;
1240 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001241 int prot;
1242
bellardfd6ce8f2003-05-14 19:00:11 +00001243 /* force the host page as non writable (writes will have a
1244 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001245 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001246 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001247 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1248 addr += TARGET_PAGE_SIZE) {
1249
1250 p2 = page_find (addr >> TARGET_PAGE_BITS);
1251 if (!p2)
1252 continue;
1253 prot |= p2->flags;
1254 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001255 }
ths5fafdf22007-09-16 21:08:06 +00001256 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001257 (prot & PAGE_BITS) & ~PAGE_WRITE);
1258#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001259 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001260 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001261#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001262 }
bellard9fa3e852004-01-04 18:06:42 +00001263#else
1264 /* if some code is already present, then the pages are already
1265 protected. So we handle the case where only the first TB is
1266 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001267 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001268 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001269 }
1270#endif
bellardd720b932004-04-25 17:57:43 +00001271
1272#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001273}
1274
bellard9fa3e852004-01-04 18:06:42 +00001275/* add a new TB and link it to the physical page tables. phys_page2 is
1276 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001277void tb_link_page(TranslationBlock *tb,
1278 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001279{
bellard9fa3e852004-01-04 18:06:42 +00001280 unsigned int h;
1281 TranslationBlock **ptb;
1282
pbrookc8a706f2008-06-02 16:16:42 +00001283 /* Grab the mmap lock to stop another thread invalidating this TB
1284 before we are done. */
1285 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001286 /* add in the physical hash table */
1287 h = tb_phys_hash_func(phys_pc);
1288 ptb = &tb_phys_hash[h];
1289 tb->phys_hash_next = *ptb;
1290 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001291
1292 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001293 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1294 if (phys_page2 != -1)
1295 tb_alloc_page(tb, 1, phys_page2);
1296 else
1297 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001298
bellardd4e81642003-05-25 16:46:15 +00001299 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1300 tb->jmp_next[0] = NULL;
1301 tb->jmp_next[1] = NULL;
1302
1303 /* init original jump addresses */
1304 if (tb->tb_next_offset[0] != 0xffff)
1305 tb_reset_jump(tb, 0);
1306 if (tb->tb_next_offset[1] != 0xffff)
1307 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001308
1309#ifdef DEBUG_TB_CHECK
1310 tb_page_check();
1311#endif
pbrookc8a706f2008-06-02 16:16:42 +00001312 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001313}
1314
bellarda513fe12003-05-27 23:29:48 +00001315/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1316 tb[1].tc_ptr. Return NULL if not found */
1317TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1318{
1319 int m_min, m_max, m;
1320 unsigned long v;
1321 TranslationBlock *tb;
1322
1323 if (nb_tbs <= 0)
1324 return NULL;
1325 if (tc_ptr < (unsigned long)code_gen_buffer ||
1326 tc_ptr >= (unsigned long)code_gen_ptr)
1327 return NULL;
1328 /* binary search (cf Knuth) */
1329 m_min = 0;
1330 m_max = nb_tbs - 1;
1331 while (m_min <= m_max) {
1332 m = (m_min + m_max) >> 1;
1333 tb = &tbs[m];
1334 v = (unsigned long)tb->tc_ptr;
1335 if (v == tc_ptr)
1336 return tb;
1337 else if (tc_ptr < v) {
1338 m_max = m - 1;
1339 } else {
1340 m_min = m + 1;
1341 }
ths5fafdf22007-09-16 21:08:06 +00001342 }
bellarda513fe12003-05-27 23:29:48 +00001343 return &tbs[m_max];
1344}
bellard75012672003-06-21 13:11:07 +00001345
bellardea041c02003-06-25 16:16:50 +00001346static void tb_reset_jump_recursive(TranslationBlock *tb);
1347
1348static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1349{
1350 TranslationBlock *tb1, *tb_next, **ptb;
1351 unsigned int n1;
1352
1353 tb1 = tb->jmp_next[n];
1354 if (tb1 != NULL) {
1355 /* find head of list */
1356 for(;;) {
1357 n1 = (long)tb1 & 3;
1358 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1359 if (n1 == 2)
1360 break;
1361 tb1 = tb1->jmp_next[n1];
1362 }
1363 /* we are now sure now that tb jumps to tb1 */
1364 tb_next = tb1;
1365
1366 /* remove tb from the jmp_first list */
1367 ptb = &tb_next->jmp_first;
1368 for(;;) {
1369 tb1 = *ptb;
1370 n1 = (long)tb1 & 3;
1371 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1372 if (n1 == n && tb1 == tb)
1373 break;
1374 ptb = &tb1->jmp_next[n1];
1375 }
1376 *ptb = tb->jmp_next[n];
1377 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001378
bellardea041c02003-06-25 16:16:50 +00001379 /* suppress the jump to next tb in generated code */
1380 tb_reset_jump(tb, n);
1381
bellard01243112004-01-04 15:48:17 +00001382 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001383 tb_reset_jump_recursive(tb_next);
1384 }
1385}
1386
1387static void tb_reset_jump_recursive(TranslationBlock *tb)
1388{
1389 tb_reset_jump_recursive2(tb, 0);
1390 tb_reset_jump_recursive2(tb, 1);
1391}
1392
bellard1fddef42005-04-17 19:16:13 +00001393#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001394#if defined(CONFIG_USER_ONLY)
1395static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1396{
1397 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1398}
1399#else
bellardd720b932004-04-25 17:57:43 +00001400static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1401{
Anthony Liguoric227f092009-10-01 16:12:16 -05001402 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001403 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001404 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001405 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001406
pbrookc2f07f82006-04-08 17:14:56 +00001407 addr = cpu_get_phys_page_debug(env, pc);
1408 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1409 if (!p) {
1410 pd = IO_MEM_UNASSIGNED;
1411 } else {
1412 pd = p->phys_offset;
1413 }
1414 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001415 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001416}
bellardc27004e2005-01-03 23:35:10 +00001417#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001418#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001419
Paul Brookc527ee82010-03-01 03:31:14 +00001420#if defined(CONFIG_USER_ONLY)
1421void cpu_watchpoint_remove_all(CPUState *env, int mask)
1422
1423{
1424}
1425
1426int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1427 int flags, CPUWatchpoint **watchpoint)
1428{
1429 return -ENOSYS;
1430}
1431#else
pbrook6658ffb2007-03-16 23:58:11 +00001432/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001433int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1434 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001435{
aliguorib4051332008-11-18 20:14:20 +00001436 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001437 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001438
aliguorib4051332008-11-18 20:14:20 +00001439 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1440 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1441 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1442 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1443 return -EINVAL;
1444 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001445 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001446
aliguoria1d1bb32008-11-18 20:07:32 +00001447 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001448 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001449 wp->flags = flags;
1450
aliguori2dc9f412008-11-18 20:56:59 +00001451 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001452 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001453 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001454 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001455 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001456
pbrook6658ffb2007-03-16 23:58:11 +00001457 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001458
1459 if (watchpoint)
1460 *watchpoint = wp;
1461 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001462}
1463
aliguoria1d1bb32008-11-18 20:07:32 +00001464/* Remove a specific watchpoint. */
1465int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1466 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001467{
aliguorib4051332008-11-18 20:14:20 +00001468 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001469 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001470
Blue Swirl72cf2d42009-09-12 07:36:22 +00001471 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001472 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001473 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001474 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001475 return 0;
1476 }
1477 }
aliguoria1d1bb32008-11-18 20:07:32 +00001478 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001479}
1480
aliguoria1d1bb32008-11-18 20:07:32 +00001481/* Remove a specific watchpoint by reference. */
1482void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1483{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001484 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001485
aliguoria1d1bb32008-11-18 20:07:32 +00001486 tlb_flush_page(env, watchpoint->vaddr);
1487
Anthony Liguori7267c092011-08-20 22:09:37 -05001488 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001489}
1490
aliguoria1d1bb32008-11-18 20:07:32 +00001491/* Remove all matching watchpoints. */
1492void cpu_watchpoint_remove_all(CPUState *env, int mask)
1493{
aliguoric0ce9982008-11-25 22:13:57 +00001494 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001495
Blue Swirl72cf2d42009-09-12 07:36:22 +00001496 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001497 if (wp->flags & mask)
1498 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001499 }
aliguoria1d1bb32008-11-18 20:07:32 +00001500}
Paul Brookc527ee82010-03-01 03:31:14 +00001501#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001502
1503/* Add a breakpoint. */
1504int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1505 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001506{
bellard1fddef42005-04-17 19:16:13 +00001507#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001508 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001509
Anthony Liguori7267c092011-08-20 22:09:37 -05001510 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001511
1512 bp->pc = pc;
1513 bp->flags = flags;
1514
aliguori2dc9f412008-11-18 20:56:59 +00001515 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001516 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001517 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001518 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001519 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001520
1521 breakpoint_invalidate(env, pc);
1522
1523 if (breakpoint)
1524 *breakpoint = bp;
1525 return 0;
1526#else
1527 return -ENOSYS;
1528#endif
1529}
1530
1531/* Remove a specific breakpoint. */
1532int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1533{
1534#if defined(TARGET_HAS_ICE)
1535 CPUBreakpoint *bp;
1536
Blue Swirl72cf2d42009-09-12 07:36:22 +00001537 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001538 if (bp->pc == pc && bp->flags == flags) {
1539 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001540 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001541 }
bellard4c3a88a2003-07-26 12:06:08 +00001542 }
aliguoria1d1bb32008-11-18 20:07:32 +00001543 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001544#else
aliguoria1d1bb32008-11-18 20:07:32 +00001545 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001546#endif
1547}
1548
aliguoria1d1bb32008-11-18 20:07:32 +00001549/* Remove a specific breakpoint by reference. */
1550void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001551{
bellard1fddef42005-04-17 19:16:13 +00001552#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001553 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001554
aliguoria1d1bb32008-11-18 20:07:32 +00001555 breakpoint_invalidate(env, breakpoint->pc);
1556
Anthony Liguori7267c092011-08-20 22:09:37 -05001557 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001558#endif
1559}
1560
1561/* Remove all matching breakpoints. */
1562void cpu_breakpoint_remove_all(CPUState *env, int mask)
1563{
1564#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001565 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001566
Blue Swirl72cf2d42009-09-12 07:36:22 +00001567 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001568 if (bp->flags & mask)
1569 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001570 }
bellard4c3a88a2003-07-26 12:06:08 +00001571#endif
1572}
1573
bellardc33a3462003-07-29 20:50:33 +00001574/* enable or disable single step mode. EXCP_DEBUG is returned by the
1575 CPU loop after each instruction */
1576void cpu_single_step(CPUState *env, int enabled)
1577{
bellard1fddef42005-04-17 19:16:13 +00001578#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001579 if (env->singlestep_enabled != enabled) {
1580 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001581 if (kvm_enabled())
1582 kvm_update_guest_debug(env, 0);
1583 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001584 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001585 /* XXX: only flush what is necessary */
1586 tb_flush(env);
1587 }
bellardc33a3462003-07-29 20:50:33 +00001588 }
1589#endif
1590}
1591
bellard34865132003-10-05 14:28:56 +00001592/* enable or disable low levels log */
1593void cpu_set_log(int log_flags)
1594{
1595 loglevel = log_flags;
1596 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001597 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001598 if (!logfile) {
1599 perror(logfilename);
1600 _exit(1);
1601 }
bellard9fa3e852004-01-04 18:06:42 +00001602#if !defined(CONFIG_SOFTMMU)
1603 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1604 {
blueswir1b55266b2008-09-20 08:07:15 +00001605 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001606 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1607 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001608#elif defined(_WIN32)
1609 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1610 setvbuf(logfile, NULL, _IONBF, 0);
1611#else
bellard34865132003-10-05 14:28:56 +00001612 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001613#endif
pbrooke735b912007-06-30 13:53:24 +00001614 log_append = 1;
1615 }
1616 if (!loglevel && logfile) {
1617 fclose(logfile);
1618 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001619 }
1620}
1621
1622void cpu_set_log_filename(const char *filename)
1623{
1624 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001625 if (logfile) {
1626 fclose(logfile);
1627 logfile = NULL;
1628 }
1629 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001630}
bellardc33a3462003-07-29 20:50:33 +00001631
aurel323098dba2009-03-07 21:28:24 +00001632static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001633{
pbrookd5975362008-06-07 20:50:51 +00001634 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1635 problem and hope the cpu will stop of its own accord. For userspace
1636 emulation this often isn't actually as bad as it sounds. Often
1637 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001638 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001639 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001640
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001641 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001642 tb = env->current_tb;
1643 /* if the cpu is currently executing code, we must unlink it and
1644 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001645 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001646 env->current_tb = NULL;
1647 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001648 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001649 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001650}
1651
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001652#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001653/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001654static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001655{
1656 int old_mask;
1657
1658 old_mask = env->interrupt_request;
1659 env->interrupt_request |= mask;
1660
aliguori8edac962009-04-24 18:03:45 +00001661 /*
1662 * If called from iothread context, wake the target cpu in
1663 * case its halted.
1664 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001665 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001666 qemu_cpu_kick(env);
1667 return;
1668 }
aliguori8edac962009-04-24 18:03:45 +00001669
pbrook2e70f6e2008-06-29 01:03:05 +00001670 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001671 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001672 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001673 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001674 cpu_abort(env, "Raised interrupt while not in I/O function");
1675 }
pbrook2e70f6e2008-06-29 01:03:05 +00001676 } else {
aurel323098dba2009-03-07 21:28:24 +00001677 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001678 }
1679}
1680
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001681CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1682
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001683#else /* CONFIG_USER_ONLY */
1684
1685void cpu_interrupt(CPUState *env, int mask)
1686{
1687 env->interrupt_request |= mask;
1688 cpu_unlink_tb(env);
1689}
1690#endif /* CONFIG_USER_ONLY */
1691
bellardb54ad042004-05-20 13:42:52 +00001692void cpu_reset_interrupt(CPUState *env, int mask)
1693{
1694 env->interrupt_request &= ~mask;
1695}
1696
aurel323098dba2009-03-07 21:28:24 +00001697void cpu_exit(CPUState *env)
1698{
1699 env->exit_request = 1;
1700 cpu_unlink_tb(env);
1701}
1702
blueswir1c7cd6a32008-10-02 18:27:46 +00001703const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001704 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001705 "show generated host assembly code for each compiled TB" },
1706 { CPU_LOG_TB_IN_ASM, "in_asm",
1707 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001708 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001709 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001710 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001711 "show micro ops "
1712#ifdef TARGET_I386
1713 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001714#endif
blueswir1e01a1152008-03-14 17:37:11 +00001715 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001716 { CPU_LOG_INT, "int",
1717 "show interrupts/exceptions in short format" },
1718 { CPU_LOG_EXEC, "exec",
1719 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001720 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001721 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001722#ifdef TARGET_I386
1723 { CPU_LOG_PCALL, "pcall",
1724 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001725 { CPU_LOG_RESET, "cpu_reset",
1726 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001727#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001728#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001729 { CPU_LOG_IOPORT, "ioport",
1730 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001731#endif
bellardf193c792004-03-21 17:06:25 +00001732 { 0, NULL, NULL },
1733};
1734
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001735#ifndef CONFIG_USER_ONLY
1736static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1737 = QLIST_HEAD_INITIALIZER(memory_client_list);
1738
1739static void cpu_notify_set_memory(target_phys_addr_t start_addr,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001740 ram_addr_t size,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001741 ram_addr_t phys_offset,
1742 bool log_dirty)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001743{
1744 CPUPhysMemoryClient *client;
1745 QLIST_FOREACH(client, &memory_client_list, list) {
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001746 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001747 }
1748}
1749
1750static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001751 target_phys_addr_t end)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001752{
1753 CPUPhysMemoryClient *client;
1754 QLIST_FOREACH(client, &memory_client_list, list) {
1755 int r = client->sync_dirty_bitmap(client, start, end);
1756 if (r < 0)
1757 return r;
1758 }
1759 return 0;
1760}
1761
1762static int cpu_notify_migration_log(int enable)
1763{
1764 CPUPhysMemoryClient *client;
Avi Kivity7664e802011-12-11 14:47:25 +02001765 if (enable) {
1766 memory_global_dirty_log_start();
1767 } else {
1768 memory_global_dirty_log_stop();
1769 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001770 QLIST_FOREACH(client, &memory_client_list, list) {
1771 int r = client->migration_log(client, enable);
1772 if (r < 0)
1773 return r;
1774 }
1775 return 0;
1776}
1777
Alex Williamson2173a752011-05-03 12:36:58 -06001778struct last_map {
1779 target_phys_addr_t start_addr;
1780 ram_addr_t size;
1781 ram_addr_t phys_offset;
1782};
1783
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001784/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1785 * address. Each intermediate table provides the next L2_BITs of guest
1786 * physical address space. The number of levels vary based on host and
1787 * guest configuration, making it efficient to build the final guest
1788 * physical address by seeding the L1 offset and shifting and adding in
1789 * each L2 offset as we recurse through them. */
Alex Williamson2173a752011-05-03 12:36:58 -06001790static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1791 void **lp, target_phys_addr_t addr,
1792 struct last_map *map)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001793{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001794 int i;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001795
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001796 if (*lp == NULL) {
1797 return;
1798 }
1799 if (level == 0) {
1800 PhysPageDesc *pd = *lp;
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001801 addr <<= L2_BITS + TARGET_PAGE_BITS;
Paul Brook7296aba2010-03-14 14:58:46 +00001802 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001803 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
Alex Williamson2173a752011-05-03 12:36:58 -06001804 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1805
1806 if (map->size &&
1807 start_addr == map->start_addr + map->size &&
1808 pd[i].phys_offset == map->phys_offset + map->size) {
1809
1810 map->size += TARGET_PAGE_SIZE;
1811 continue;
1812 } else if (map->size) {
1813 client->set_memory(client, map->start_addr,
1814 map->size, map->phys_offset, false);
1815 }
1816
1817 map->start_addr = start_addr;
1818 map->size = TARGET_PAGE_SIZE;
1819 map->phys_offset = pd[i].phys_offset;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001820 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001821 }
1822 } else {
1823 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001824 for (i = 0; i < L2_SIZE; ++i) {
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001825 phys_page_for_each_1(client, level - 1, pp + i,
Alex Williamson2173a752011-05-03 12:36:58 -06001826 (addr << L2_BITS) | i, map);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001827 }
1828 }
1829}
1830
1831static void phys_page_for_each(CPUPhysMemoryClient *client)
1832{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001833 int i;
Alex Williamson2173a752011-05-03 12:36:58 -06001834 struct last_map map = { };
1835
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001836 for (i = 0; i < P_L1_SIZE; ++i) {
1837 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
Alex Williamson2173a752011-05-03 12:36:58 -06001838 l1_phys_map + i, i, &map);
1839 }
1840 if (map.size) {
1841 client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1842 false);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001843 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001844}
1845
1846void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1847{
1848 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1849 phys_page_for_each(client);
1850}
1851
1852void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1853{
1854 QLIST_REMOVE(client, list);
1855}
1856#endif
1857
bellardf193c792004-03-21 17:06:25 +00001858static int cmp1(const char *s1, int n, const char *s2)
1859{
1860 if (strlen(s2) != n)
1861 return 0;
1862 return memcmp(s1, s2, n) == 0;
1863}
ths3b46e622007-09-17 08:09:54 +00001864
bellardf193c792004-03-21 17:06:25 +00001865/* takes a comma separated list of log masks. Return 0 if error. */
1866int cpu_str_to_log_mask(const char *str)
1867{
blueswir1c7cd6a32008-10-02 18:27:46 +00001868 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001869 int mask;
1870 const char *p, *p1;
1871
1872 p = str;
1873 mask = 0;
1874 for(;;) {
1875 p1 = strchr(p, ',');
1876 if (!p1)
1877 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001878 if(cmp1(p,p1-p,"all")) {
1879 for(item = cpu_log_items; item->mask != 0; item++) {
1880 mask |= item->mask;
1881 }
1882 } else {
1883 for(item = cpu_log_items; item->mask != 0; item++) {
1884 if (cmp1(p, p1 - p, item->name))
1885 goto found;
1886 }
1887 return 0;
bellardf193c792004-03-21 17:06:25 +00001888 }
bellardf193c792004-03-21 17:06:25 +00001889 found:
1890 mask |= item->mask;
1891 if (*p1 != ',')
1892 break;
1893 p = p1 + 1;
1894 }
1895 return mask;
1896}
bellardea041c02003-06-25 16:16:50 +00001897
bellard75012672003-06-21 13:11:07 +00001898void cpu_abort(CPUState *env, const char *fmt, ...)
1899{
1900 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001901 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001902
1903 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001904 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001905 fprintf(stderr, "qemu: fatal: ");
1906 vfprintf(stderr, fmt, ap);
1907 fprintf(stderr, "\n");
1908#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001909 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1910#else
1911 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001912#endif
aliguori93fcfe32009-01-15 22:34:14 +00001913 if (qemu_log_enabled()) {
1914 qemu_log("qemu: fatal: ");
1915 qemu_log_vprintf(fmt, ap2);
1916 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001917#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001918 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001919#else
aliguori93fcfe32009-01-15 22:34:14 +00001920 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001921#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001922 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001923 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001924 }
pbrook493ae1f2007-11-23 16:53:59 +00001925 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001926 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001927#if defined(CONFIG_USER_ONLY)
1928 {
1929 struct sigaction act;
1930 sigfillset(&act.sa_mask);
1931 act.sa_handler = SIG_DFL;
1932 sigaction(SIGABRT, &act, NULL);
1933 }
1934#endif
bellard75012672003-06-21 13:11:07 +00001935 abort();
1936}
1937
thsc5be9f02007-02-28 20:20:53 +00001938CPUState *cpu_copy(CPUState *env)
1939{
ths01ba9812007-12-09 02:22:57 +00001940 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001941 CPUState *next_cpu = new_env->next_cpu;
1942 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001943#if defined(TARGET_HAS_ICE)
1944 CPUBreakpoint *bp;
1945 CPUWatchpoint *wp;
1946#endif
1947
thsc5be9f02007-02-28 20:20:53 +00001948 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001949
1950 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001951 new_env->next_cpu = next_cpu;
1952 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001953
1954 /* Clone all break/watchpoints.
1955 Note: Once we support ptrace with hw-debug register access, make sure
1956 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001957 QTAILQ_INIT(&env->breakpoints);
1958 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001959#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001960 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001961 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1962 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001963 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001964 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1965 wp->flags, NULL);
1966 }
1967#endif
1968
thsc5be9f02007-02-28 20:20:53 +00001969 return new_env;
1970}
1971
bellard01243112004-01-04 15:48:17 +00001972#if !defined(CONFIG_USER_ONLY)
1973
edgar_igl5c751e92008-05-06 08:44:21 +00001974static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1975{
1976 unsigned int i;
1977
1978 /* Discard jump cache entries for any tb which might potentially
1979 overlap the flushed page. */
1980 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1981 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001982 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001983
1984 i = tb_jmp_cache_hash_page(addr);
1985 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001986 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001987}
1988
Igor Kovalenko08738982009-07-12 02:15:40 +04001989static CPUTLBEntry s_cputlb_empty_entry = {
1990 .addr_read = -1,
1991 .addr_write = -1,
1992 .addr_code = -1,
1993 .addend = -1,
1994};
1995
bellardee8b7022004-02-03 23:35:10 +00001996/* NOTE: if flush_global is true, also flush global entries (not
1997 implemented yet) */
1998void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001999{
bellard33417e72003-08-10 21:47:01 +00002000 int i;
bellard01243112004-01-04 15:48:17 +00002001
bellard9fa3e852004-01-04 18:06:42 +00002002#if defined(DEBUG_TLB)
2003 printf("tlb_flush:\n");
2004#endif
bellard01243112004-01-04 15:48:17 +00002005 /* must reset current TB so that interrupts cannot modify the
2006 links while we are modifying them */
2007 env->current_tb = NULL;
2008
bellard33417e72003-08-10 21:47:01 +00002009 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002010 int mmu_idx;
2011 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04002012 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002013 }
bellard33417e72003-08-10 21:47:01 +00002014 }
bellard9fa3e852004-01-04 18:06:42 +00002015
bellard8a40a182005-11-20 10:35:40 +00002016 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00002017
Paul Brookd4c430a2010-03-17 02:14:28 +00002018 env->tlb_flush_addr = -1;
2019 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00002020 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00002021}
2022
bellard274da6b2004-05-20 21:56:27 +00002023static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00002024{
ths5fafdf22007-09-16 21:08:06 +00002025 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00002026 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002027 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00002028 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002029 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00002030 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04002031 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00002032 }
bellard61382a52003-10-27 21:22:23 +00002033}
2034
bellard2e126692004-04-25 21:28:44 +00002035void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00002036{
bellard8a40a182005-11-20 10:35:40 +00002037 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002038 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00002039
bellard9fa3e852004-01-04 18:06:42 +00002040#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00002041 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00002042#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00002043 /* Check if we need to flush due to large pages. */
2044 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2045#if defined(DEBUG_TLB)
2046 printf("tlb_flush_page: forced full flush ("
2047 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2048 env->tlb_flush_addr, env->tlb_flush_mask);
2049#endif
2050 tlb_flush(env, 1);
2051 return;
2052 }
bellard01243112004-01-04 15:48:17 +00002053 /* must reset current TB so that interrupts cannot modify the
2054 links while we are modifying them */
2055 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002056
bellard61382a52003-10-27 21:22:23 +00002057 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002058 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002059 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2060 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002061
edgar_igl5c751e92008-05-06 08:44:21 +00002062 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002063}
2064
bellard9fa3e852004-01-04 18:06:42 +00002065/* update the TLBs so that writes to code in the virtual page 'addr'
2066 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002067static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002068{
ths5fafdf22007-09-16 21:08:06 +00002069 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002070 ram_addr + TARGET_PAGE_SIZE,
2071 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002072}
2073
bellard9fa3e852004-01-04 18:06:42 +00002074/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002075 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002076static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002077 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002078{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002079 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002080}
2081
ths5fafdf22007-09-16 21:08:06 +00002082static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002083 unsigned long start, unsigned long length)
2084{
2085 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00002086 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2087 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002088 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002089 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002090 }
2091 }
2092}
2093
pbrook5579c7f2009-04-11 14:47:08 +00002094/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002095void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002096 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002097{
2098 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002099 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002100 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002101
2102 start &= TARGET_PAGE_MASK;
2103 end = TARGET_PAGE_ALIGN(end);
2104
2105 length = end - start;
2106 if (length == 0)
2107 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002108 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002109
bellard1ccde1c2004-02-06 19:46:14 +00002110 /* we modify the TLB cache so that the dirty bit will be set again
2111 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002112 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002113 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002114 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002115 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002116 != (end - 1) - start) {
2117 abort();
2118 }
2119
bellard6a00d602005-11-21 23:25:50 +00002120 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002121 int mmu_idx;
2122 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2123 for(i = 0; i < CPU_TLB_SIZE; i++)
2124 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2125 start1, length);
2126 }
bellard6a00d602005-11-21 23:25:50 +00002127 }
bellard1ccde1c2004-02-06 19:46:14 +00002128}
2129
aliguori74576192008-10-06 14:02:03 +00002130int cpu_physical_memory_set_dirty_tracking(int enable)
2131{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002132 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002133 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002134 ret = cpu_notify_migration_log(!!enable);
2135 return ret;
aliguori74576192008-10-06 14:02:03 +00002136}
2137
2138int cpu_physical_memory_get_dirty_tracking(void)
2139{
2140 return in_migration;
2141}
2142
Anthony Liguoric227f092009-10-01 16:12:16 -05002143int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2144 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00002145{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02002146 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02002147
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002148 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02002149 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00002150}
2151
Anthony PERARDe5896b12011-02-07 12:19:23 +01002152int cpu_physical_log_start(target_phys_addr_t start_addr,
2153 ram_addr_t size)
2154{
2155 CPUPhysMemoryClient *client;
2156 QLIST_FOREACH(client, &memory_client_list, list) {
2157 if (client->log_start) {
2158 int r = client->log_start(client, start_addr, size);
2159 if (r < 0) {
2160 return r;
2161 }
2162 }
2163 }
2164 return 0;
2165}
2166
2167int cpu_physical_log_stop(target_phys_addr_t start_addr,
2168 ram_addr_t size)
2169{
2170 CPUPhysMemoryClient *client;
2171 QLIST_FOREACH(client, &memory_client_list, list) {
2172 if (client->log_stop) {
2173 int r = client->log_stop(client, start_addr, size);
2174 if (r < 0) {
2175 return r;
2176 }
2177 }
2178 }
2179 return 0;
2180}
2181
bellard3a7d9292005-08-21 09:26:42 +00002182static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2183{
Anthony Liguoric227f092009-10-01 16:12:16 -05002184 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002185 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002186
bellard84b7b8e2005-11-28 21:19:04 +00002187 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002188 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2189 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002190 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002191 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002192 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002193 }
2194 }
2195}
2196
2197/* update the TLB according to the current state of the dirty bits */
2198void cpu_tlb_update_dirty(CPUState *env)
2199{
2200 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002201 int mmu_idx;
2202 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2203 for(i = 0; i < CPU_TLB_SIZE; i++)
2204 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2205 }
bellard3a7d9292005-08-21 09:26:42 +00002206}
2207
pbrook0f459d12008-06-09 00:20:13 +00002208static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002209{
pbrook0f459d12008-06-09 00:20:13 +00002210 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2211 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002212}
2213
pbrook0f459d12008-06-09 00:20:13 +00002214/* update the TLB corresponding to virtual page vaddr
2215 so that it is no longer dirty */
2216static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002217{
bellard1ccde1c2004-02-06 19:46:14 +00002218 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002219 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002220
pbrook0f459d12008-06-09 00:20:13 +00002221 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002222 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002223 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2224 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002225}
2226
Paul Brookd4c430a2010-03-17 02:14:28 +00002227/* Our TLB does not support large pages, so remember the area covered by
2228 large pages and trigger a full TLB flush if these are invalidated. */
2229static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2230 target_ulong size)
2231{
2232 target_ulong mask = ~(size - 1);
2233
2234 if (env->tlb_flush_addr == (target_ulong)-1) {
2235 env->tlb_flush_addr = vaddr & mask;
2236 env->tlb_flush_mask = mask;
2237 return;
2238 }
2239 /* Extend the existing region to include the new page.
2240 This is a compromise between unnecessary flushes and the cost
2241 of maintaining a full variable size TLB. */
2242 mask &= env->tlb_flush_mask;
2243 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2244 mask <<= 1;
2245 }
2246 env->tlb_flush_addr &= mask;
2247 env->tlb_flush_mask = mask;
2248}
2249
2250/* Add a new TLB entry. At most one entry for a given virtual address
2251 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2252 supplied size is only used by tlb_flush_page. */
2253void tlb_set_page(CPUState *env, target_ulong vaddr,
2254 target_phys_addr_t paddr, int prot,
2255 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002256{
bellard92e873b2004-05-21 14:52:29 +00002257 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002258 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002259 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002260 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002261 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002262 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002263 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002264 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002265 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002266
Paul Brookd4c430a2010-03-17 02:14:28 +00002267 assert(size >= TARGET_PAGE_SIZE);
2268 if (size != TARGET_PAGE_SIZE) {
2269 tlb_add_large_page(env, vaddr, size);
2270 }
bellard92e873b2004-05-21 14:52:29 +00002271 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002272 if (!p) {
2273 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002274 } else {
2275 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002276 }
2277#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002278 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2279 " prot=%x idx=%d pd=0x%08lx\n",
2280 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002281#endif
2282
pbrook0f459d12008-06-09 00:20:13 +00002283 address = vaddr;
2284 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2285 /* IO memory case (romd handled later) */
2286 address |= TLB_MMIO;
2287 }
pbrook5579c7f2009-04-11 14:47:08 +00002288 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002289 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2290 /* Normal RAM. */
2291 iotlb = pd & TARGET_PAGE_MASK;
2292 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2293 iotlb |= IO_MEM_NOTDIRTY;
2294 else
2295 iotlb |= IO_MEM_ROM;
2296 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002297 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002298 It would be nice to pass an offset from the base address
2299 of that region. This would avoid having to special case RAM,
2300 and avoid full address decoding in every device.
2301 We can't use the high bits of pd for this because
2302 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002303 iotlb = (pd & ~TARGET_PAGE_MASK);
2304 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002305 iotlb += p->region_offset;
2306 } else {
2307 iotlb += paddr;
2308 }
pbrook0f459d12008-06-09 00:20:13 +00002309 }
pbrook6658ffb2007-03-16 23:58:11 +00002310
pbrook0f459d12008-06-09 00:20:13 +00002311 code_address = address;
2312 /* Make accesses to pages with watchpoints go via the
2313 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002314 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002315 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002316 /* Avoid trapping reads of pages with a write breakpoint. */
2317 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2318 iotlb = io_mem_watch + paddr;
2319 address |= TLB_MMIO;
2320 break;
2321 }
pbrook6658ffb2007-03-16 23:58:11 +00002322 }
pbrook0f459d12008-06-09 00:20:13 +00002323 }
balrogd79acba2007-06-26 20:01:13 +00002324
pbrook0f459d12008-06-09 00:20:13 +00002325 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2326 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2327 te = &env->tlb_table[mmu_idx][index];
2328 te->addend = addend - vaddr;
2329 if (prot & PAGE_READ) {
2330 te->addr_read = address;
2331 } else {
2332 te->addr_read = -1;
2333 }
edgar_igl5c751e92008-05-06 08:44:21 +00002334
pbrook0f459d12008-06-09 00:20:13 +00002335 if (prot & PAGE_EXEC) {
2336 te->addr_code = code_address;
2337 } else {
2338 te->addr_code = -1;
2339 }
2340 if (prot & PAGE_WRITE) {
2341 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2342 (pd & IO_MEM_ROMD)) {
2343 /* Write access calls the I/O callback. */
2344 te->addr_write = address | TLB_MMIO;
2345 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2346 !cpu_physical_memory_is_dirty(pd)) {
2347 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002348 } else {
pbrook0f459d12008-06-09 00:20:13 +00002349 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002350 }
pbrook0f459d12008-06-09 00:20:13 +00002351 } else {
2352 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002353 }
bellard9fa3e852004-01-04 18:06:42 +00002354}
2355
bellard01243112004-01-04 15:48:17 +00002356#else
2357
bellardee8b7022004-02-03 23:35:10 +00002358void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002359{
2360}
2361
bellard2e126692004-04-25 21:28:44 +00002362void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002363{
2364}
2365
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002366/*
2367 * Walks guest process memory "regions" one by one
2368 * and calls callback function 'fn' for each region.
2369 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002370
2371struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002372{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002373 walk_memory_regions_fn fn;
2374 void *priv;
2375 unsigned long start;
2376 int prot;
2377};
bellard9fa3e852004-01-04 18:06:42 +00002378
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002379static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002380 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002381{
2382 if (data->start != -1ul) {
2383 int rc = data->fn(data->priv, data->start, end, data->prot);
2384 if (rc != 0) {
2385 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002386 }
bellard33417e72003-08-10 21:47:01 +00002387 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002388
2389 data->start = (new_prot ? end : -1ul);
2390 data->prot = new_prot;
2391
2392 return 0;
2393}
2394
2395static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002396 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002397{
Paul Brookb480d9b2010-03-12 23:23:29 +00002398 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002399 int i, rc;
2400
2401 if (*lp == NULL) {
2402 return walk_memory_regions_end(data, base, 0);
2403 }
2404
2405 if (level == 0) {
2406 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002407 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002408 int prot = pd[i].flags;
2409
2410 pa = base | (i << TARGET_PAGE_BITS);
2411 if (prot != data->prot) {
2412 rc = walk_memory_regions_end(data, pa, prot);
2413 if (rc != 0) {
2414 return rc;
2415 }
2416 }
2417 }
2418 } else {
2419 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002420 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002421 pa = base | ((abi_ulong)i <<
2422 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002423 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2424 if (rc != 0) {
2425 return rc;
2426 }
2427 }
2428 }
2429
2430 return 0;
2431}
2432
2433int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2434{
2435 struct walk_memory_regions_data data;
2436 unsigned long i;
2437
2438 data.fn = fn;
2439 data.priv = priv;
2440 data.start = -1ul;
2441 data.prot = 0;
2442
2443 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002444 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002445 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2446 if (rc != 0) {
2447 return rc;
2448 }
2449 }
2450
2451 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002452}
2453
Paul Brookb480d9b2010-03-12 23:23:29 +00002454static int dump_region(void *priv, abi_ulong start,
2455 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002456{
2457 FILE *f = (FILE *)priv;
2458
Paul Brookb480d9b2010-03-12 23:23:29 +00002459 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2460 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002461 start, end, end - start,
2462 ((prot & PAGE_READ) ? 'r' : '-'),
2463 ((prot & PAGE_WRITE) ? 'w' : '-'),
2464 ((prot & PAGE_EXEC) ? 'x' : '-'));
2465
2466 return (0);
2467}
2468
2469/* dump memory mappings */
2470void page_dump(FILE *f)
2471{
2472 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2473 "start", "end", "size", "prot");
2474 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002475}
2476
pbrook53a59602006-03-25 19:31:22 +00002477int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002478{
bellard9fa3e852004-01-04 18:06:42 +00002479 PageDesc *p;
2480
2481 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002482 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002483 return 0;
2484 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002485}
2486
Richard Henderson376a7902010-03-10 15:57:04 -08002487/* Modify the flags of a page and invalidate the code if necessary.
2488 The flag PAGE_WRITE_ORG is positioned automatically depending
2489 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002490void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002491{
Richard Henderson376a7902010-03-10 15:57:04 -08002492 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002493
Richard Henderson376a7902010-03-10 15:57:04 -08002494 /* This function should never be called with addresses outside the
2495 guest address space. If this assert fires, it probably indicates
2496 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002497#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2498 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002499#endif
2500 assert(start < end);
2501
bellard9fa3e852004-01-04 18:06:42 +00002502 start = start & TARGET_PAGE_MASK;
2503 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002504
2505 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002506 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002507 }
2508
2509 for (addr = start, len = end - start;
2510 len != 0;
2511 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2512 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2513
2514 /* If the write protection bit is set, then we invalidate
2515 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002516 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002517 (flags & PAGE_WRITE) &&
2518 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002519 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002520 }
2521 p->flags = flags;
2522 }
bellard9fa3e852004-01-04 18:06:42 +00002523}
2524
ths3d97b402007-11-02 19:02:07 +00002525int page_check_range(target_ulong start, target_ulong len, int flags)
2526{
2527 PageDesc *p;
2528 target_ulong end;
2529 target_ulong addr;
2530
Richard Henderson376a7902010-03-10 15:57:04 -08002531 /* This function should never be called with addresses outside the
2532 guest address space. If this assert fires, it probably indicates
2533 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002534#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2535 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002536#endif
2537
Richard Henderson3e0650a2010-03-29 10:54:42 -07002538 if (len == 0) {
2539 return 0;
2540 }
Richard Henderson376a7902010-03-10 15:57:04 -08002541 if (start + len - 1 < start) {
2542 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002543 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002544 }
balrog55f280c2008-10-28 10:24:11 +00002545
ths3d97b402007-11-02 19:02:07 +00002546 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2547 start = start & TARGET_PAGE_MASK;
2548
Richard Henderson376a7902010-03-10 15:57:04 -08002549 for (addr = start, len = end - start;
2550 len != 0;
2551 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002552 p = page_find(addr >> TARGET_PAGE_BITS);
2553 if( !p )
2554 return -1;
2555 if( !(p->flags & PAGE_VALID) )
2556 return -1;
2557
bellarddae32702007-11-14 10:51:00 +00002558 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002559 return -1;
bellarddae32702007-11-14 10:51:00 +00002560 if (flags & PAGE_WRITE) {
2561 if (!(p->flags & PAGE_WRITE_ORG))
2562 return -1;
2563 /* unprotect the page if it was put read-only because it
2564 contains translated code */
2565 if (!(p->flags & PAGE_WRITE)) {
2566 if (!page_unprotect(addr, 0, NULL))
2567 return -1;
2568 }
2569 return 0;
2570 }
ths3d97b402007-11-02 19:02:07 +00002571 }
2572 return 0;
2573}
2574
bellard9fa3e852004-01-04 18:06:42 +00002575/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002576 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002577int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002578{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002579 unsigned int prot;
2580 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002581 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002582
pbrookc8a706f2008-06-02 16:16:42 +00002583 /* Technically this isn't safe inside a signal handler. However we
2584 know this only ever happens in a synchronous SEGV handler, so in
2585 practice it seems to be ok. */
2586 mmap_lock();
2587
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002588 p = page_find(address >> TARGET_PAGE_BITS);
2589 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002590 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002591 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002592 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002593
bellard9fa3e852004-01-04 18:06:42 +00002594 /* if the page was really writable, then we change its
2595 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002596 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2597 host_start = address & qemu_host_page_mask;
2598 host_end = host_start + qemu_host_page_size;
2599
2600 prot = 0;
2601 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2602 p = page_find(addr >> TARGET_PAGE_BITS);
2603 p->flags |= PAGE_WRITE;
2604 prot |= p->flags;
2605
bellard9fa3e852004-01-04 18:06:42 +00002606 /* and since the content will be modified, we must invalidate
2607 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002608 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002609#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002610 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002611#endif
bellard9fa3e852004-01-04 18:06:42 +00002612 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002613 mprotect((void *)g2h(host_start), qemu_host_page_size,
2614 prot & PAGE_BITS);
2615
2616 mmap_unlock();
2617 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002618 }
pbrookc8a706f2008-06-02 16:16:42 +00002619 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002620 return 0;
2621}
2622
bellard6a00d602005-11-21 23:25:50 +00002623static inline void tlb_set_dirty(CPUState *env,
2624 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002625{
2626}
bellard9fa3e852004-01-04 18:06:42 +00002627#endif /* defined(CONFIG_USER_ONLY) */
2628
pbrooke2eef172008-06-08 01:09:01 +00002629#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002630
Paul Brookc04b2b72010-03-01 03:31:14 +00002631#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2632typedef struct subpage_t {
2633 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002634 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2635 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002636} subpage_t;
2637
Anthony Liguoric227f092009-10-01 16:12:16 -05002638static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2639 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002640static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2641 ram_addr_t orig_memory,
2642 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002643#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2644 need_subpage) \
2645 do { \
2646 if (addr > start_addr) \
2647 start_addr2 = 0; \
2648 else { \
2649 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2650 if (start_addr2 > 0) \
2651 need_subpage = 1; \
2652 } \
2653 \
blueswir149e9fba2007-05-30 17:25:06 +00002654 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002655 end_addr2 = TARGET_PAGE_SIZE - 1; \
2656 else { \
2657 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2658 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2659 need_subpage = 1; \
2660 } \
2661 } while (0)
2662
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002663/* register physical memory.
2664 For RAM, 'size' must be a multiple of the target page size.
2665 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002666 io memory page. The address used when calling the IO function is
2667 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002668 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002669 before calculating this offset. This should not be a problem unless
2670 the low bits of start_addr and region_offset differ. */
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002671void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002672 ram_addr_t size,
2673 ram_addr_t phys_offset,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002674 ram_addr_t region_offset,
2675 bool log_dirty)
bellard33417e72003-08-10 21:47:01 +00002676{
Anthony Liguoric227f092009-10-01 16:12:16 -05002677 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002678 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002679 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002680 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002681 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002682
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002683 assert(size);
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002684 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002685
pbrook67c4d232009-02-23 13:16:07 +00002686 if (phys_offset == IO_MEM_UNASSIGNED) {
2687 region_offset = start_addr;
2688 }
pbrook8da3ff12008-12-01 18:59:50 +00002689 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002690 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002691 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002692
2693 addr = start_addr;
2694 do {
blueswir1db7b5422007-05-26 17:36:03 +00002695 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2696 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002697 ram_addr_t orig_memory = p->phys_offset;
2698 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002699 int need_subpage = 0;
2700
2701 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2702 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002703 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002704 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2705 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002706 &p->phys_offset, orig_memory,
2707 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002708 } else {
2709 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2710 >> IO_MEM_SHIFT];
2711 }
pbrook8da3ff12008-12-01 18:59:50 +00002712 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2713 region_offset);
2714 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002715 } else {
2716 p->phys_offset = phys_offset;
2717 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2718 (phys_offset & IO_MEM_ROMD))
2719 phys_offset += TARGET_PAGE_SIZE;
2720 }
2721 } else {
2722 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2723 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002724 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002725 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002726 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002727 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002728 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002729 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002730 int need_subpage = 0;
2731
2732 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2733 end_addr2, need_subpage);
2734
Richard Hendersonf6405242010-04-22 16:47:31 -07002735 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002736 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002737 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002738 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002739 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002740 phys_offset, region_offset);
2741 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002742 }
2743 }
2744 }
pbrook8da3ff12008-12-01 18:59:50 +00002745 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002746 addr += TARGET_PAGE_SIZE;
2747 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002748
bellard9d420372006-06-25 22:25:22 +00002749 /* since each CPU stores ram addresses in its TLB cache, we must
2750 reset the modified entries */
2751 /* XXX: slow ! */
2752 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2753 tlb_flush(env, 1);
2754 }
bellard33417e72003-08-10 21:47:01 +00002755}
2756
bellardba863452006-09-24 18:41:10 +00002757/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002758ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002759{
2760 PhysPageDesc *p;
2761
2762 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2763 if (!p)
2764 return IO_MEM_UNASSIGNED;
2765 return p->phys_offset;
2766}
2767
Anthony Liguoric227f092009-10-01 16:12:16 -05002768void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002769{
2770 if (kvm_enabled())
2771 kvm_coalesce_mmio_region(addr, size);
2772}
2773
Anthony Liguoric227f092009-10-01 16:12:16 -05002774void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002775{
2776 if (kvm_enabled())
2777 kvm_uncoalesce_mmio_region(addr, size);
2778}
2779
Sheng Yang62a27442010-01-26 19:21:16 +08002780void qemu_flush_coalesced_mmio_buffer(void)
2781{
2782 if (kvm_enabled())
2783 kvm_flush_coalesced_mmio_buffer();
2784}
2785
Marcelo Tosattic9027602010-03-01 20:25:08 -03002786#if defined(__linux__) && !defined(TARGET_S390X)
2787
2788#include <sys/vfs.h>
2789
2790#define HUGETLBFS_MAGIC 0x958458f6
2791
2792static long gethugepagesize(const char *path)
2793{
2794 struct statfs fs;
2795 int ret;
2796
2797 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002798 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002799 } while (ret != 0 && errno == EINTR);
2800
2801 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002802 perror(path);
2803 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002804 }
2805
2806 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002807 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002808
2809 return fs.f_bsize;
2810}
2811
Alex Williamson04b16652010-07-02 11:13:17 -06002812static void *file_ram_alloc(RAMBlock *block,
2813 ram_addr_t memory,
2814 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002815{
2816 char *filename;
2817 void *area;
2818 int fd;
2819#ifdef MAP_POPULATE
2820 int flags;
2821#endif
2822 unsigned long hpagesize;
2823
2824 hpagesize = gethugepagesize(path);
2825 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002826 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002827 }
2828
2829 if (memory < hpagesize) {
2830 return NULL;
2831 }
2832
2833 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2834 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2835 return NULL;
2836 }
2837
2838 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002839 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002840 }
2841
2842 fd = mkstemp(filename);
2843 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002844 perror("unable to create backing store for hugepages");
2845 free(filename);
2846 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002847 }
2848 unlink(filename);
2849 free(filename);
2850
2851 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2852
2853 /*
2854 * ftruncate is not supported by hugetlbfs in older
2855 * hosts, so don't bother bailing out on errors.
2856 * If anything goes wrong with it under other filesystems,
2857 * mmap will fail.
2858 */
2859 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002860 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002861
2862#ifdef MAP_POPULATE
2863 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2864 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2865 * to sidestep this quirk.
2866 */
2867 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2868 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2869#else
2870 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2871#endif
2872 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002873 perror("file_ram_alloc: can't mmap RAM pages");
2874 close(fd);
2875 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002876 }
Alex Williamson04b16652010-07-02 11:13:17 -06002877 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002878 return area;
2879}
2880#endif
2881
Alex Williamsond17b5282010-06-25 11:08:38 -06002882static ram_addr_t find_ram_offset(ram_addr_t size)
2883{
Alex Williamson04b16652010-07-02 11:13:17 -06002884 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002885 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002886
2887 if (QLIST_EMPTY(&ram_list.blocks))
2888 return 0;
2889
2890 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002891 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002892
2893 end = block->offset + block->length;
2894
2895 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2896 if (next_block->offset >= end) {
2897 next = MIN(next, next_block->offset);
2898 }
2899 }
2900 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002901 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002902 mingap = next - end;
2903 }
2904 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002905
2906 if (offset == RAM_ADDR_MAX) {
2907 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2908 (uint64_t)size);
2909 abort();
2910 }
2911
Alex Williamson04b16652010-07-02 11:13:17 -06002912 return offset;
2913}
2914
2915static ram_addr_t last_ram_offset(void)
2916{
Alex Williamsond17b5282010-06-25 11:08:38 -06002917 RAMBlock *block;
2918 ram_addr_t last = 0;
2919
2920 QLIST_FOREACH(block, &ram_list.blocks, next)
2921 last = MAX(last, block->offset + block->length);
2922
2923 return last;
2924}
2925
Cam Macdonell84b89d72010-07-26 18:10:57 -06002926ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
Avi Kivityfce537d2011-12-18 15:48:55 +02002927 ram_addr_t size, void *host,
2928 MemoryRegion *mr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002929{
2930 RAMBlock *new_block, *block;
2931
2932 size = TARGET_PAGE_ALIGN(size);
Anthony Liguori7267c092011-08-20 22:09:37 -05002933 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002934
2935 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2936 char *id = dev->parent_bus->info->get_dev_path(dev);
2937 if (id) {
2938 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002939 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002940 }
2941 }
2942 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2943
2944 QLIST_FOREACH(block, &ram_list.blocks, next) {
2945 if (!strcmp(block->idstr, new_block->idstr)) {
2946 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2947 new_block->idstr);
2948 abort();
2949 }
2950 }
2951
Jun Nakajima432d2682010-08-31 16:41:25 +01002952 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002953 if (host) {
2954 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002955 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002956 } else {
2957 if (mem_path) {
2958#if defined (__linux__) && !defined(TARGET_S390X)
2959 new_block->host = file_ram_alloc(new_block, size, mem_path);
2960 if (!new_block->host) {
2961 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002962 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002963 }
2964#else
2965 fprintf(stderr, "-mem-path option unsupported\n");
2966 exit(1);
2967#endif
2968 } else {
2969#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002970 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2971 an system defined value, which is at least 256GB. Larger systems
2972 have larger values. We put the guest between the end of data
2973 segment (system break) and this value. We use 32GB as a base to
2974 have enough room for the system break to grow. */
2975 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002976 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002977 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002978 if (new_block->host == MAP_FAILED) {
2979 fprintf(stderr, "Allocating RAM failed\n");
2980 abort();
2981 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002982#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002983 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002984 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002985 } else {
2986 new_block->host = qemu_vmalloc(size);
2987 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002988#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002989 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002990 }
2991 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002992 new_block->length = size;
2993
2994 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2995
Anthony Liguori7267c092011-08-20 22:09:37 -05002996 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002997 last_ram_offset() >> TARGET_PAGE_BITS);
2998 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2999 0xff, size >> TARGET_PAGE_BITS);
3000
3001 if (kvm_enabled())
3002 kvm_setup_guest_memory(new_block->host, size);
3003
3004 return new_block->offset;
3005}
3006
Avi Kivityfce537d2011-12-18 15:48:55 +02003007ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size,
3008 MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00003009{
Avi Kivityfce537d2011-12-18 15:48:55 +02003010 return qemu_ram_alloc_from_ptr(dev, name, size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00003011}
bellarde9a1ab12007-02-08 23:08:38 +00003012
Alex Williamson1f2e98b2011-05-03 12:48:09 -06003013void qemu_ram_free_from_ptr(ram_addr_t addr)
3014{
3015 RAMBlock *block;
3016
3017 QLIST_FOREACH(block, &ram_list.blocks, next) {
3018 if (addr == block->offset) {
3019 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05003020 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06003021 return;
3022 }
3023 }
3024}
3025
Anthony Liguoric227f092009-10-01 16:12:16 -05003026void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00003027{
Alex Williamson04b16652010-07-02 11:13:17 -06003028 RAMBlock *block;
3029
3030 QLIST_FOREACH(block, &ram_list.blocks, next) {
3031 if (addr == block->offset) {
3032 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01003033 if (block->flags & RAM_PREALLOC_MASK) {
3034 ;
3035 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06003036#if defined (__linux__) && !defined(TARGET_S390X)
3037 if (block->fd) {
3038 munmap(block->host, block->length);
3039 close(block->fd);
3040 } else {
3041 qemu_vfree(block->host);
3042 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003043#else
3044 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06003045#endif
3046 } else {
3047#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3048 munmap(block->host, block->length);
3049#else
Jan Kiszka868bb332011-06-21 22:59:09 +02003050 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003051 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01003052 } else {
3053 qemu_vfree(block->host);
3054 }
Alex Williamson04b16652010-07-02 11:13:17 -06003055#endif
3056 }
Anthony Liguori7267c092011-08-20 22:09:37 -05003057 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06003058 return;
3059 }
3060 }
3061
bellarde9a1ab12007-02-08 23:08:38 +00003062}
3063
Huang Yingcd19cfa2011-03-02 08:56:19 +01003064#ifndef _WIN32
3065void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3066{
3067 RAMBlock *block;
3068 ram_addr_t offset;
3069 int flags;
3070 void *area, *vaddr;
3071
3072 QLIST_FOREACH(block, &ram_list.blocks, next) {
3073 offset = addr - block->offset;
3074 if (offset < block->length) {
3075 vaddr = block->host + offset;
3076 if (block->flags & RAM_PREALLOC_MASK) {
3077 ;
3078 } else {
3079 flags = MAP_FIXED;
3080 munmap(vaddr, length);
3081 if (mem_path) {
3082#if defined(__linux__) && !defined(TARGET_S390X)
3083 if (block->fd) {
3084#ifdef MAP_POPULATE
3085 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3086 MAP_PRIVATE;
3087#else
3088 flags |= MAP_PRIVATE;
3089#endif
3090 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3091 flags, block->fd, offset);
3092 } else {
3093 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3094 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3095 flags, -1, 0);
3096 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003097#else
3098 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003099#endif
3100 } else {
3101#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3102 flags |= MAP_SHARED | MAP_ANONYMOUS;
3103 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3104 flags, -1, 0);
3105#else
3106 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3107 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3108 flags, -1, 0);
3109#endif
3110 }
3111 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003112 fprintf(stderr, "Could not remap addr: "
3113 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003114 length, addr);
3115 exit(1);
3116 }
3117 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3118 }
3119 return;
3120 }
3121 }
3122}
3123#endif /* !_WIN32 */
3124
pbrookdc828ca2009-04-09 22:21:07 +00003125/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003126 With the exception of the softmmu code in this file, this should
3127 only be used for local memory (e.g. video ram) that the device owns,
3128 and knows it isn't going to access beyond the end of the block.
3129
3130 It should not be used for general purpose DMA.
3131 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3132 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003133void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003134{
pbrook94a6b542009-04-11 17:15:54 +00003135 RAMBlock *block;
3136
Alex Williamsonf471a172010-06-11 11:11:42 -06003137 QLIST_FOREACH(block, &ram_list.blocks, next) {
3138 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003139 /* Move this entry to to start of the list. */
3140 if (block != QLIST_FIRST(&ram_list.blocks)) {
3141 QLIST_REMOVE(block, next);
3142 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3143 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003144 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003145 /* We need to check if the requested address is in the RAM
3146 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003147 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003148 */
3149 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003150 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003151 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003152 block->host =
3153 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003154 }
3155 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003156 return block->host + (addr - block->offset);
3157 }
pbrook94a6b542009-04-11 17:15:54 +00003158 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003159
3160 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3161 abort();
3162
3163 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003164}
3165
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003166/* Return a host pointer to ram allocated with qemu_ram_alloc.
3167 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3168 */
3169void *qemu_safe_ram_ptr(ram_addr_t addr)
3170{
3171 RAMBlock *block;
3172
3173 QLIST_FOREACH(block, &ram_list.blocks, next) {
3174 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003175 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003176 /* We need to check if the requested address is in the RAM
3177 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003178 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003179 */
3180 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003181 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003182 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003183 block->host =
3184 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003185 }
3186 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003187 return block->host + (addr - block->offset);
3188 }
3189 }
3190
3191 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3192 abort();
3193
3194 return NULL;
3195}
3196
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003197/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3198 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003199void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003200{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003201 if (*size == 0) {
3202 return NULL;
3203 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003204 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003205 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003206 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003207 RAMBlock *block;
3208
3209 QLIST_FOREACH(block, &ram_list.blocks, next) {
3210 if (addr - block->offset < block->length) {
3211 if (addr - block->offset + *size > block->length)
3212 *size = block->length - addr + block->offset;
3213 return block->host + (addr - block->offset);
3214 }
3215 }
3216
3217 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3218 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003219 }
3220}
3221
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003222void qemu_put_ram_ptr(void *addr)
3223{
3224 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003225}
3226
Marcelo Tosattie8902612010-10-11 15:31:19 -03003227int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003228{
pbrook94a6b542009-04-11 17:15:54 +00003229 RAMBlock *block;
3230 uint8_t *host = ptr;
3231
Jan Kiszka868bb332011-06-21 22:59:09 +02003232 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003233 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003234 return 0;
3235 }
3236
Alex Williamsonf471a172010-06-11 11:11:42 -06003237 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003238 /* This case append when the block is not mapped. */
3239 if (block->host == NULL) {
3240 continue;
3241 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003242 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003243 *ram_addr = block->offset + (host - block->host);
3244 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003245 }
pbrook94a6b542009-04-11 17:15:54 +00003246 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003247
Marcelo Tosattie8902612010-10-11 15:31:19 -03003248 return -1;
3249}
Alex Williamsonf471a172010-06-11 11:11:42 -06003250
Marcelo Tosattie8902612010-10-11 15:31:19 -03003251/* Some of the softmmu routines need to translate from a host pointer
3252 (typically a TLB entry) back to a ram offset. */
3253ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3254{
3255 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003256
Marcelo Tosattie8902612010-10-11 15:31:19 -03003257 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3258 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3259 abort();
3260 }
3261 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003262}
3263
Anthony Liguoric227f092009-10-01 16:12:16 -05003264static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00003265{
pbrook67d3b952006-12-18 05:03:52 +00003266#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003267 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003268#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003269#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003270 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003271#endif
3272 return 0;
3273}
3274
Anthony Liguoric227f092009-10-01 16:12:16 -05003275static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003276{
3277#ifdef DEBUG_UNASSIGNED
3278 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3279#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003280#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003281 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003282#endif
3283 return 0;
3284}
3285
Anthony Liguoric227f092009-10-01 16:12:16 -05003286static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003287{
3288#ifdef DEBUG_UNASSIGNED
3289 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3290#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003291#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003292 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003293#endif
bellard33417e72003-08-10 21:47:01 +00003294 return 0;
3295}
3296
Anthony Liguoric227f092009-10-01 16:12:16 -05003297static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00003298{
pbrook67d3b952006-12-18 05:03:52 +00003299#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003300 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00003301#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003302#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003303 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003304#endif
3305}
3306
Anthony Liguoric227f092009-10-01 16:12:16 -05003307static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003308{
3309#ifdef DEBUG_UNASSIGNED
3310 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3311#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003312#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003313 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003314#endif
3315}
3316
Anthony Liguoric227f092009-10-01 16:12:16 -05003317static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003318{
3319#ifdef DEBUG_UNASSIGNED
3320 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3321#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003322#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003323 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003324#endif
bellard33417e72003-08-10 21:47:01 +00003325}
3326
Blue Swirld60efc62009-08-25 18:29:31 +00003327static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00003328 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00003329 unassigned_mem_readw,
3330 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00003331};
3332
Blue Swirld60efc62009-08-25 18:29:31 +00003333static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00003334 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00003335 unassigned_mem_writew,
3336 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00003337};
3338
Anthony Liguoric227f092009-10-01 16:12:16 -05003339static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003340 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003341{
bellard3a7d9292005-08-21 09:26:42 +00003342 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003343 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003344 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3345#if !defined(CONFIG_USER_ONLY)
3346 tb_invalidate_phys_page_fast(ram_addr, 1);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003347 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003348#endif
3349 }
pbrook5579c7f2009-04-11 14:47:08 +00003350 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003351 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003352 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003353 /* we remove the notdirty callback only if the code has been
3354 flushed */
3355 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003356 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003357}
3358
Anthony Liguoric227f092009-10-01 16:12:16 -05003359static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003360 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003361{
bellard3a7d9292005-08-21 09:26:42 +00003362 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003363 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003364 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3365#if !defined(CONFIG_USER_ONLY)
3366 tb_invalidate_phys_page_fast(ram_addr, 2);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003367 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003368#endif
3369 }
pbrook5579c7f2009-04-11 14:47:08 +00003370 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003371 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003372 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003373 /* we remove the notdirty callback only if the code has been
3374 flushed */
3375 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003376 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003377}
3378
Anthony Liguoric227f092009-10-01 16:12:16 -05003379static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003380 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003381{
bellard3a7d9292005-08-21 09:26:42 +00003382 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003383 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003384 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3385#if !defined(CONFIG_USER_ONLY)
3386 tb_invalidate_phys_page_fast(ram_addr, 4);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003387 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003388#endif
3389 }
pbrook5579c7f2009-04-11 14:47:08 +00003390 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003391 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003392 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003393 /* we remove the notdirty callback only if the code has been
3394 flushed */
3395 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003396 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003397}
3398
Blue Swirld60efc62009-08-25 18:29:31 +00003399static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00003400 NULL, /* never used */
3401 NULL, /* never used */
3402 NULL, /* never used */
3403};
3404
Blue Swirld60efc62009-08-25 18:29:31 +00003405static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00003406 notdirty_mem_writeb,
3407 notdirty_mem_writew,
3408 notdirty_mem_writel,
3409};
3410
pbrook0f459d12008-06-09 00:20:13 +00003411/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003412static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003413{
3414 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003415 target_ulong pc, cs_base;
3416 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003417 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003418 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003419 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003420
aliguori06d55cc2008-11-18 20:24:06 +00003421 if (env->watchpoint_hit) {
3422 /* We re-entered the check after replacing the TB. Now raise
3423 * the debug interrupt so that is will trigger after the
3424 * current instruction. */
3425 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3426 return;
3427 }
pbrook2e70f6e2008-06-29 01:03:05 +00003428 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003429 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003430 if ((vaddr == (wp->vaddr & len_mask) ||
3431 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003432 wp->flags |= BP_WATCHPOINT_HIT;
3433 if (!env->watchpoint_hit) {
3434 env->watchpoint_hit = wp;
3435 tb = tb_find_pc(env->mem_io_pc);
3436 if (!tb) {
3437 cpu_abort(env, "check_watchpoint: could not find TB for "
3438 "pc=%p", (void *)env->mem_io_pc);
3439 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003440 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003441 tb_phys_invalidate(tb, -1);
3442 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3443 env->exception_index = EXCP_DEBUG;
3444 } else {
3445 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3446 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3447 }
3448 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003449 }
aliguori6e140f22008-11-18 20:37:55 +00003450 } else {
3451 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003452 }
3453 }
3454}
3455
pbrook6658ffb2007-03-16 23:58:11 +00003456/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3457 so these check for a hit then pass through to the normal out-of-line
3458 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003459static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003460{
aliguorib4051332008-11-18 20:14:20 +00003461 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003462 return ldub_phys(addr);
3463}
3464
Anthony Liguoric227f092009-10-01 16:12:16 -05003465static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003466{
aliguorib4051332008-11-18 20:14:20 +00003467 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003468 return lduw_phys(addr);
3469}
3470
Anthony Liguoric227f092009-10-01 16:12:16 -05003471static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003472{
aliguorib4051332008-11-18 20:14:20 +00003473 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003474 return ldl_phys(addr);
3475}
3476
Anthony Liguoric227f092009-10-01 16:12:16 -05003477static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003478 uint32_t val)
3479{
aliguorib4051332008-11-18 20:14:20 +00003480 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003481 stb_phys(addr, val);
3482}
3483
Anthony Liguoric227f092009-10-01 16:12:16 -05003484static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003485 uint32_t val)
3486{
aliguorib4051332008-11-18 20:14:20 +00003487 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003488 stw_phys(addr, val);
3489}
3490
Anthony Liguoric227f092009-10-01 16:12:16 -05003491static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003492 uint32_t val)
3493{
aliguorib4051332008-11-18 20:14:20 +00003494 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003495 stl_phys(addr, val);
3496}
3497
Blue Swirld60efc62009-08-25 18:29:31 +00003498static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003499 watch_mem_readb,
3500 watch_mem_readw,
3501 watch_mem_readl,
3502};
3503
Blue Swirld60efc62009-08-25 18:29:31 +00003504static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003505 watch_mem_writeb,
3506 watch_mem_writew,
3507 watch_mem_writel,
3508};
pbrook6658ffb2007-03-16 23:58:11 +00003509
Richard Hendersonf6405242010-04-22 16:47:31 -07003510static inline uint32_t subpage_readlen (subpage_t *mmio,
3511 target_phys_addr_t addr,
3512 unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003513{
Richard Hendersonf6405242010-04-22 16:47:31 -07003514 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003515#if defined(DEBUG_SUBPAGE)
3516 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3517 mmio, len, addr, idx);
3518#endif
blueswir1db7b5422007-05-26 17:36:03 +00003519
Richard Hendersonf6405242010-04-22 16:47:31 -07003520 addr += mmio->region_offset[idx];
3521 idx = mmio->sub_io_index[idx];
3522 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
blueswir1db7b5422007-05-26 17:36:03 +00003523}
3524
Anthony Liguoric227f092009-10-01 16:12:16 -05003525static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
Richard Hendersonf6405242010-04-22 16:47:31 -07003526 uint32_t value, unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003527{
Richard Hendersonf6405242010-04-22 16:47:31 -07003528 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003529#if defined(DEBUG_SUBPAGE)
Richard Hendersonf6405242010-04-22 16:47:31 -07003530 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3531 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003532#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003533
3534 addr += mmio->region_offset[idx];
3535 idx = mmio->sub_io_index[idx];
3536 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00003537}
3538
Anthony Liguoric227f092009-10-01 16:12:16 -05003539static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003540{
blueswir1db7b5422007-05-26 17:36:03 +00003541 return subpage_readlen(opaque, addr, 0);
3542}
3543
Anthony Liguoric227f092009-10-01 16:12:16 -05003544static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003545 uint32_t value)
3546{
blueswir1db7b5422007-05-26 17:36:03 +00003547 subpage_writelen(opaque, addr, value, 0);
3548}
3549
Anthony Liguoric227f092009-10-01 16:12:16 -05003550static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003551{
blueswir1db7b5422007-05-26 17:36:03 +00003552 return subpage_readlen(opaque, addr, 1);
3553}
3554
Anthony Liguoric227f092009-10-01 16:12:16 -05003555static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003556 uint32_t value)
3557{
blueswir1db7b5422007-05-26 17:36:03 +00003558 subpage_writelen(opaque, addr, value, 1);
3559}
3560
Anthony Liguoric227f092009-10-01 16:12:16 -05003561static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003562{
blueswir1db7b5422007-05-26 17:36:03 +00003563 return subpage_readlen(opaque, addr, 2);
3564}
3565
Richard Hendersonf6405242010-04-22 16:47:31 -07003566static void subpage_writel (void *opaque, target_phys_addr_t addr,
3567 uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003568{
blueswir1db7b5422007-05-26 17:36:03 +00003569 subpage_writelen(opaque, addr, value, 2);
3570}
3571
Blue Swirld60efc62009-08-25 18:29:31 +00003572static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003573 &subpage_readb,
3574 &subpage_readw,
3575 &subpage_readl,
3576};
3577
Blue Swirld60efc62009-08-25 18:29:31 +00003578static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003579 &subpage_writeb,
3580 &subpage_writew,
3581 &subpage_writel,
3582};
3583
Andreas Färber56384e82011-11-30 16:26:21 +01003584static uint32_t subpage_ram_readb(void *opaque, target_phys_addr_t addr)
3585{
3586 ram_addr_t raddr = addr;
3587 void *ptr = qemu_get_ram_ptr(raddr);
3588 return ldub_p(ptr);
3589}
3590
3591static void subpage_ram_writeb(void *opaque, target_phys_addr_t addr,
3592 uint32_t value)
3593{
3594 ram_addr_t raddr = addr;
3595 void *ptr = qemu_get_ram_ptr(raddr);
3596 stb_p(ptr, value);
3597}
3598
3599static uint32_t subpage_ram_readw(void *opaque, target_phys_addr_t addr)
3600{
3601 ram_addr_t raddr = addr;
3602 void *ptr = qemu_get_ram_ptr(raddr);
3603 return lduw_p(ptr);
3604}
3605
3606static void subpage_ram_writew(void *opaque, target_phys_addr_t addr,
3607 uint32_t value)
3608{
3609 ram_addr_t raddr = addr;
3610 void *ptr = qemu_get_ram_ptr(raddr);
3611 stw_p(ptr, value);
3612}
3613
3614static uint32_t subpage_ram_readl(void *opaque, target_phys_addr_t addr)
3615{
3616 ram_addr_t raddr = addr;
3617 void *ptr = qemu_get_ram_ptr(raddr);
3618 return ldl_p(ptr);
3619}
3620
3621static void subpage_ram_writel(void *opaque, target_phys_addr_t addr,
3622 uint32_t value)
3623{
3624 ram_addr_t raddr = addr;
3625 void *ptr = qemu_get_ram_ptr(raddr);
3626 stl_p(ptr, value);
3627}
3628
3629static CPUReadMemoryFunc * const subpage_ram_read[] = {
3630 &subpage_ram_readb,
3631 &subpage_ram_readw,
3632 &subpage_ram_readl,
3633};
3634
3635static CPUWriteMemoryFunc * const subpage_ram_write[] = {
3636 &subpage_ram_writeb,
3637 &subpage_ram_writew,
3638 &subpage_ram_writel,
3639};
3640
Anthony Liguoric227f092009-10-01 16:12:16 -05003641static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3642 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003643{
3644 int idx, eidx;
3645
3646 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3647 return -1;
3648 idx = SUBPAGE_IDX(start);
3649 eidx = SUBPAGE_IDX(end);
3650#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003651 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003652 mmio, start, end, idx, eidx, memory);
3653#endif
Andreas Färber56384e82011-11-30 16:26:21 +01003654 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
3655 memory = IO_MEM_SUBPAGE_RAM;
3656 }
Richard Hendersonf6405242010-04-22 16:47:31 -07003657 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003658 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003659 mmio->sub_io_index[idx] = memory;
3660 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003661 }
3662
3663 return 0;
3664}
3665
Richard Hendersonf6405242010-04-22 16:47:31 -07003666static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3667 ram_addr_t orig_memory,
3668 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003669{
Anthony Liguoric227f092009-10-01 16:12:16 -05003670 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003671 int subpage_memory;
3672
Anthony Liguori7267c092011-08-20 22:09:37 -05003673 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003674
3675 mmio->base = base;
Alexander Graf2507c122010-12-08 12:05:37 +01003676 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3677 DEVICE_NATIVE_ENDIAN);
blueswir1db7b5422007-05-26 17:36:03 +00003678#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003679 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3680 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003681#endif
aliguori1eec6142009-02-05 22:06:18 +00003682 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003683 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003684
3685 return mmio;
3686}
3687
aliguori88715652009-02-11 15:20:58 +00003688static int get_free_io_mem_idx(void)
3689{
3690 int i;
3691
3692 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3693 if (!io_mem_used[i]) {
3694 io_mem_used[i] = 1;
3695 return i;
3696 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003697 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003698 return -1;
3699}
3700
Alexander Grafdd310532010-12-08 12:05:36 +01003701/*
3702 * Usually, devices operate in little endian mode. There are devices out
3703 * there that operate in big endian too. Each device gets byte swapped
3704 * mmio if plugged onto a CPU that does the other endianness.
3705 *
3706 * CPU Device swap?
3707 *
3708 * little little no
3709 * little big yes
3710 * big little yes
3711 * big big no
3712 */
3713
3714typedef struct SwapEndianContainer {
3715 CPUReadMemoryFunc *read[3];
3716 CPUWriteMemoryFunc *write[3];
3717 void *opaque;
3718} SwapEndianContainer;
3719
3720static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3721{
3722 uint32_t val;
3723 SwapEndianContainer *c = opaque;
3724 val = c->read[0](c->opaque, addr);
3725 return val;
3726}
3727
3728static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3729{
3730 uint32_t val;
3731 SwapEndianContainer *c = opaque;
3732 val = bswap16(c->read[1](c->opaque, addr));
3733 return val;
3734}
3735
3736static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3737{
3738 uint32_t val;
3739 SwapEndianContainer *c = opaque;
3740 val = bswap32(c->read[2](c->opaque, addr));
3741 return val;
3742}
3743
3744static CPUReadMemoryFunc * const swapendian_readfn[3]={
3745 swapendian_mem_readb,
3746 swapendian_mem_readw,
3747 swapendian_mem_readl
3748};
3749
3750static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3751 uint32_t val)
3752{
3753 SwapEndianContainer *c = opaque;
3754 c->write[0](c->opaque, addr, val);
3755}
3756
3757static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3758 uint32_t val)
3759{
3760 SwapEndianContainer *c = opaque;
3761 c->write[1](c->opaque, addr, bswap16(val));
3762}
3763
3764static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3765 uint32_t val)
3766{
3767 SwapEndianContainer *c = opaque;
3768 c->write[2](c->opaque, addr, bswap32(val));
3769}
3770
3771static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3772 swapendian_mem_writeb,
3773 swapendian_mem_writew,
3774 swapendian_mem_writel
3775};
3776
3777static void swapendian_init(int io_index)
3778{
Anthony Liguori7267c092011-08-20 22:09:37 -05003779 SwapEndianContainer *c = g_malloc(sizeof(SwapEndianContainer));
Alexander Grafdd310532010-12-08 12:05:36 +01003780 int i;
3781
3782 /* Swap mmio for big endian targets */
3783 c->opaque = io_mem_opaque[io_index];
3784 for (i = 0; i < 3; i++) {
3785 c->read[i] = io_mem_read[io_index][i];
3786 c->write[i] = io_mem_write[io_index][i];
3787
3788 io_mem_read[io_index][i] = swapendian_readfn[i];
3789 io_mem_write[io_index][i] = swapendian_writefn[i];
3790 }
3791 io_mem_opaque[io_index] = c;
3792}
3793
3794static void swapendian_del(int io_index)
3795{
3796 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
Anthony Liguori7267c092011-08-20 22:09:37 -05003797 g_free(io_mem_opaque[io_index]);
Alexander Grafdd310532010-12-08 12:05:36 +01003798 }
3799}
3800
bellard33417e72003-08-10 21:47:01 +00003801/* mem_read and mem_write are arrays of functions containing the
3802 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003803 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003804 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003805 modified. If it is zero, a new io zone is allocated. The return
3806 value can be used with cpu_register_physical_memory(). (-1) is
3807 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003808static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003809 CPUReadMemoryFunc * const *mem_read,
3810 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003811 void *opaque, enum device_endian endian)
bellard33417e72003-08-10 21:47:01 +00003812{
Richard Henderson3cab7212010-05-07 09:52:51 -07003813 int i;
3814
bellard33417e72003-08-10 21:47:01 +00003815 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003816 io_index = get_free_io_mem_idx();
3817 if (io_index == -1)
3818 return io_index;
bellard33417e72003-08-10 21:47:01 +00003819 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003820 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003821 if (io_index >= IO_MEM_NB_ENTRIES)
3822 return -1;
3823 }
bellardb5ff1b32005-11-26 10:38:39 +00003824
Richard Henderson3cab7212010-05-07 09:52:51 -07003825 for (i = 0; i < 3; ++i) {
3826 io_mem_read[io_index][i]
3827 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3828 }
3829 for (i = 0; i < 3; ++i) {
3830 io_mem_write[io_index][i]
3831 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3832 }
bellarda4193c82004-06-03 14:01:43 +00003833 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003834
Alexander Grafdd310532010-12-08 12:05:36 +01003835 switch (endian) {
3836 case DEVICE_BIG_ENDIAN:
3837#ifndef TARGET_WORDS_BIGENDIAN
3838 swapendian_init(io_index);
3839#endif
3840 break;
3841 case DEVICE_LITTLE_ENDIAN:
3842#ifdef TARGET_WORDS_BIGENDIAN
3843 swapendian_init(io_index);
3844#endif
3845 break;
3846 case DEVICE_NATIVE_ENDIAN:
3847 default:
3848 break;
3849 }
3850
Richard Hendersonf6405242010-04-22 16:47:31 -07003851 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003852}
bellard61382a52003-10-27 21:22:23 +00003853
Blue Swirld60efc62009-08-25 18:29:31 +00003854int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3855 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003856 void *opaque, enum device_endian endian)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003857{
Alexander Graf2507c122010-12-08 12:05:37 +01003858 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003859}
3860
aliguori88715652009-02-11 15:20:58 +00003861void cpu_unregister_io_memory(int io_table_address)
3862{
3863 int i;
3864 int io_index = io_table_address >> IO_MEM_SHIFT;
3865
Alexander Grafdd310532010-12-08 12:05:36 +01003866 swapendian_del(io_index);
3867
aliguori88715652009-02-11 15:20:58 +00003868 for (i=0;i < 3; i++) {
3869 io_mem_read[io_index][i] = unassigned_mem_read[i];
3870 io_mem_write[io_index][i] = unassigned_mem_write[i];
3871 }
3872 io_mem_opaque[io_index] = NULL;
3873 io_mem_used[io_index] = 0;
3874}
3875
Avi Kivitye9179ce2009-06-14 11:38:52 +03003876static void io_mem_init(void)
3877{
3878 int i;
3879
Alexander Graf2507c122010-12-08 12:05:37 +01003880 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3881 unassigned_mem_write, NULL,
3882 DEVICE_NATIVE_ENDIAN);
3883 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3884 unassigned_mem_write, NULL,
3885 DEVICE_NATIVE_ENDIAN);
3886 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3887 notdirty_mem_write, NULL,
3888 DEVICE_NATIVE_ENDIAN);
Andreas Färber56384e82011-11-30 16:26:21 +01003889 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM, subpage_ram_read,
3890 subpage_ram_write, NULL,
3891 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003892 for (i=0; i<5; i++)
3893 io_mem_used[i] = 1;
3894
3895 io_mem_watch = cpu_register_io_memory(watch_mem_read,
Alexander Graf2507c122010-12-08 12:05:37 +01003896 watch_mem_write, NULL,
3897 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003898}
3899
Avi Kivity62152b82011-07-26 14:26:14 +03003900static void memory_map_init(void)
3901{
Anthony Liguori7267c092011-08-20 22:09:37 -05003902 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003903 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003904 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003905
Anthony Liguori7267c092011-08-20 22:09:37 -05003906 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003907 memory_region_init(system_io, "io", 65536);
3908 set_system_io_map(system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003909}
3910
3911MemoryRegion *get_system_memory(void)
3912{
3913 return system_memory;
3914}
3915
Avi Kivity309cb472011-08-08 16:09:03 +03003916MemoryRegion *get_system_io(void)
3917{
3918 return system_io;
3919}
3920
pbrooke2eef172008-06-08 01:09:01 +00003921#endif /* !defined(CONFIG_USER_ONLY) */
3922
bellard13eb76e2004-01-24 15:23:36 +00003923/* physical memory access (slow version, mainly for debug) */
3924#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003925int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3926 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003927{
3928 int l, flags;
3929 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003930 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003931
3932 while (len > 0) {
3933 page = addr & TARGET_PAGE_MASK;
3934 l = (page + TARGET_PAGE_SIZE) - addr;
3935 if (l > len)
3936 l = len;
3937 flags = page_get_flags(page);
3938 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003939 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003940 if (is_write) {
3941 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003942 return -1;
bellard579a97f2007-11-11 14:26:47 +00003943 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003944 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003945 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003946 memcpy(p, buf, l);
3947 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003948 } else {
3949 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003950 return -1;
bellard579a97f2007-11-11 14:26:47 +00003951 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003952 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003953 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003954 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003955 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003956 }
3957 len -= l;
3958 buf += l;
3959 addr += l;
3960 }
Paul Brooka68fe892010-03-01 00:08:59 +00003961 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003962}
bellard8df1cd02005-01-28 22:37:22 +00003963
bellard13eb76e2004-01-24 15:23:36 +00003964#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003965void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003966 int len, int is_write)
3967{
3968 int l, io_index;
3969 uint8_t *ptr;
3970 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003971 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003972 ram_addr_t pd;
bellard92e873b2004-05-21 14:52:29 +00003973 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003974
bellard13eb76e2004-01-24 15:23:36 +00003975 while (len > 0) {
3976 page = addr & TARGET_PAGE_MASK;
3977 l = (page + TARGET_PAGE_SIZE) - addr;
3978 if (l > len)
3979 l = len;
bellard92e873b2004-05-21 14:52:29 +00003980 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003981 if (!p) {
3982 pd = IO_MEM_UNASSIGNED;
3983 } else {
3984 pd = p->phys_offset;
3985 }
ths3b46e622007-09-17 08:09:54 +00003986
bellard13eb76e2004-01-24 15:23:36 +00003987 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003988 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003989 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003990 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003991 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003992 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003993 /* XXX: could force cpu_single_env to NULL to avoid
3994 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003995 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003996 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003997 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003998 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003999 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00004000 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00004001 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00004002 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00004003 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00004004 l = 2;
4005 } else {
bellard1c213d12005-09-03 10:49:04 +00004006 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00004007 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00004008 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00004009 l = 1;
4010 }
4011 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00004012 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00004013 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00004014 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004015 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00004016 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00004017 if (!cpu_physical_memory_is_dirty(addr1)) {
4018 /* invalidate code */
4019 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4020 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004021 cpu_physical_memory_set_dirty_flags(
4022 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004023 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004024 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00004025 }
4026 } else {
ths5fafdf22007-09-16 21:08:06 +00004027 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00004028 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05004029 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00004030 /* I/O case */
4031 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004032 if (p)
aurel326c2934d2009-02-18 21:37:17 +00004033 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4034 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00004035 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00004036 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00004037 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00004038 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00004039 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00004040 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00004041 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00004042 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00004043 l = 2;
4044 } else {
bellard1c213d12005-09-03 10:49:04 +00004045 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00004046 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00004047 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00004048 l = 1;
4049 }
4050 } else {
4051 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004052 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
4053 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
4054 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00004055 }
4056 }
4057 len -= l;
4058 buf += l;
4059 addr += l;
4060 }
4061}
bellard8df1cd02005-01-28 22:37:22 +00004062
bellardd0ecd2a2006-04-23 17:14:48 +00004063/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05004064void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00004065 const uint8_t *buf, int len)
4066{
4067 int l;
4068 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05004069 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00004070 unsigned long pd;
4071 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00004072
bellardd0ecd2a2006-04-23 17:14:48 +00004073 while (len > 0) {
4074 page = addr & TARGET_PAGE_MASK;
4075 l = (page + TARGET_PAGE_SIZE) - addr;
4076 if (l > len)
4077 l = len;
4078 p = phys_page_find(page >> TARGET_PAGE_BITS);
4079 if (!p) {
4080 pd = IO_MEM_UNASSIGNED;
4081 } else {
4082 pd = p->phys_offset;
4083 }
ths3b46e622007-09-17 08:09:54 +00004084
bellardd0ecd2a2006-04-23 17:14:48 +00004085 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00004086 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
4087 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00004088 /* do nothing */
4089 } else {
4090 unsigned long addr1;
4091 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4092 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004093 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00004094 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004095 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00004096 }
4097 len -= l;
4098 buf += l;
4099 addr += l;
4100 }
4101}
4102
aliguori6d16c2f2009-01-22 16:59:11 +00004103typedef struct {
4104 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05004105 target_phys_addr_t addr;
4106 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00004107} BounceBuffer;
4108
4109static BounceBuffer bounce;
4110
aliguoriba223c22009-01-22 16:59:16 +00004111typedef struct MapClient {
4112 void *opaque;
4113 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00004114 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00004115} MapClient;
4116
Blue Swirl72cf2d42009-09-12 07:36:22 +00004117static QLIST_HEAD(map_client_list, MapClient) map_client_list
4118 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004119
4120void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4121{
Anthony Liguori7267c092011-08-20 22:09:37 -05004122 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00004123
4124 client->opaque = opaque;
4125 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00004126 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00004127 return client;
4128}
4129
4130void cpu_unregister_map_client(void *_client)
4131{
4132 MapClient *client = (MapClient *)_client;
4133
Blue Swirl72cf2d42009-09-12 07:36:22 +00004134 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05004135 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00004136}
4137
4138static void cpu_notify_map_clients(void)
4139{
4140 MapClient *client;
4141
Blue Swirl72cf2d42009-09-12 07:36:22 +00004142 while (!QLIST_EMPTY(&map_client_list)) {
4143 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004144 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09004145 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00004146 }
4147}
4148
aliguori6d16c2f2009-01-22 16:59:11 +00004149/* Map a physical memory region into a host virtual address.
4150 * May map a subset of the requested range, given by and returned in *plen.
4151 * May return NULL if resources needed to perform the mapping are exhausted.
4152 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00004153 * Use cpu_register_map_client() to know when retrying the map operation is
4154 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00004155 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004156void *cpu_physical_memory_map(target_phys_addr_t addr,
4157 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00004158 int is_write)
4159{
Anthony Liguoric227f092009-10-01 16:12:16 -05004160 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004161 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00004162 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004163 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00004164 unsigned long pd;
4165 PhysPageDesc *p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00004166 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004167 ram_addr_t rlen;
4168 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004169
4170 while (len > 0) {
4171 page = addr & TARGET_PAGE_MASK;
4172 l = (page + TARGET_PAGE_SIZE) - addr;
4173 if (l > len)
4174 l = len;
4175 p = phys_page_find(page >> TARGET_PAGE_BITS);
4176 if (!p) {
4177 pd = IO_MEM_UNASSIGNED;
4178 } else {
4179 pd = p->phys_offset;
4180 }
4181
4182 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004183 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004184 break;
4185 }
4186 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4187 bounce.addr = addr;
4188 bounce.len = l;
4189 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004190 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004191 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004192
4193 *plen = l;
4194 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004195 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004196 if (!todo) {
4197 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4198 }
aliguori6d16c2f2009-01-22 16:59:11 +00004199
4200 len -= l;
4201 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004202 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004203 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004204 rlen = todo;
4205 ret = qemu_ram_ptr_length(raddr, &rlen);
4206 *plen = rlen;
4207 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004208}
4209
4210/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4211 * Will also mark the memory as dirty if is_write == 1. access_len gives
4212 * the amount of memory that was actually read or written by the caller.
4213 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004214void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4215 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004216{
4217 if (buffer != bounce.buffer) {
4218 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004219 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004220 while (access_len) {
4221 unsigned l;
4222 l = TARGET_PAGE_SIZE;
4223 if (l > access_len)
4224 l = access_len;
4225 if (!cpu_physical_memory_is_dirty(addr1)) {
4226 /* invalidate code */
4227 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4228 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004229 cpu_physical_memory_set_dirty_flags(
4230 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004231 }
4232 addr1 += l;
4233 access_len -= l;
4234 }
4235 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004236 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004237 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004238 }
aliguori6d16c2f2009-01-22 16:59:11 +00004239 return;
4240 }
4241 if (is_write) {
4242 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4243 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004244 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004245 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004246 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004247}
bellardd0ecd2a2006-04-23 17:14:48 +00004248
bellard8df1cd02005-01-28 22:37:22 +00004249/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004250static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4251 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004252{
4253 int io_index;
4254 uint8_t *ptr;
4255 uint32_t val;
4256 unsigned long pd;
4257 PhysPageDesc *p;
4258
4259 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4260 if (!p) {
4261 pd = IO_MEM_UNASSIGNED;
4262 } else {
4263 pd = p->phys_offset;
4264 }
ths3b46e622007-09-17 08:09:54 +00004265
ths5fafdf22007-09-16 21:08:06 +00004266 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00004267 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00004268 /* I/O case */
4269 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004270 if (p)
4271 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004272 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004273#if defined(TARGET_WORDS_BIGENDIAN)
4274 if (endian == DEVICE_LITTLE_ENDIAN) {
4275 val = bswap32(val);
4276 }
4277#else
4278 if (endian == DEVICE_BIG_ENDIAN) {
4279 val = bswap32(val);
4280 }
4281#endif
bellard8df1cd02005-01-28 22:37:22 +00004282 } else {
4283 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004284 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004285 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004286 switch (endian) {
4287 case DEVICE_LITTLE_ENDIAN:
4288 val = ldl_le_p(ptr);
4289 break;
4290 case DEVICE_BIG_ENDIAN:
4291 val = ldl_be_p(ptr);
4292 break;
4293 default:
4294 val = ldl_p(ptr);
4295 break;
4296 }
bellard8df1cd02005-01-28 22:37:22 +00004297 }
4298 return val;
4299}
4300
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004301uint32_t ldl_phys(target_phys_addr_t addr)
4302{
4303 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4304}
4305
4306uint32_t ldl_le_phys(target_phys_addr_t addr)
4307{
4308 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4309}
4310
4311uint32_t ldl_be_phys(target_phys_addr_t addr)
4312{
4313 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4314}
4315
bellard84b7b8e2005-11-28 21:19:04 +00004316/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004317static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4318 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004319{
4320 int io_index;
4321 uint8_t *ptr;
4322 uint64_t val;
4323 unsigned long pd;
4324 PhysPageDesc *p;
4325
4326 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4327 if (!p) {
4328 pd = IO_MEM_UNASSIGNED;
4329 } else {
4330 pd = p->phys_offset;
4331 }
ths3b46e622007-09-17 08:09:54 +00004332
bellard2a4188a2006-06-25 21:54:59 +00004333 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4334 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00004335 /* I/O case */
4336 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004337 if (p)
4338 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004339
4340 /* XXX This is broken when device endian != cpu endian.
4341 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004342#ifdef TARGET_WORDS_BIGENDIAN
4343 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4344 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4345#else
4346 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4347 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4348#endif
4349 } else {
4350 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004351 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004352 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004353 switch (endian) {
4354 case DEVICE_LITTLE_ENDIAN:
4355 val = ldq_le_p(ptr);
4356 break;
4357 case DEVICE_BIG_ENDIAN:
4358 val = ldq_be_p(ptr);
4359 break;
4360 default:
4361 val = ldq_p(ptr);
4362 break;
4363 }
bellard84b7b8e2005-11-28 21:19:04 +00004364 }
4365 return val;
4366}
4367
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004368uint64_t ldq_phys(target_phys_addr_t addr)
4369{
4370 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4371}
4372
4373uint64_t ldq_le_phys(target_phys_addr_t addr)
4374{
4375 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4376}
4377
4378uint64_t ldq_be_phys(target_phys_addr_t addr)
4379{
4380 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4381}
4382
bellardaab33092005-10-30 20:48:42 +00004383/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004384uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004385{
4386 uint8_t val;
4387 cpu_physical_memory_read(addr, &val, 1);
4388 return val;
4389}
4390
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004391/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004392static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4393 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004394{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004395 int io_index;
4396 uint8_t *ptr;
4397 uint64_t val;
4398 unsigned long pd;
4399 PhysPageDesc *p;
4400
4401 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4402 if (!p) {
4403 pd = IO_MEM_UNASSIGNED;
4404 } else {
4405 pd = p->phys_offset;
4406 }
4407
4408 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4409 !(pd & IO_MEM_ROMD)) {
4410 /* I/O case */
4411 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4412 if (p)
4413 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4414 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004415#if defined(TARGET_WORDS_BIGENDIAN)
4416 if (endian == DEVICE_LITTLE_ENDIAN) {
4417 val = bswap16(val);
4418 }
4419#else
4420 if (endian == DEVICE_BIG_ENDIAN) {
4421 val = bswap16(val);
4422 }
4423#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004424 } else {
4425 /* RAM case */
4426 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4427 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004428 switch (endian) {
4429 case DEVICE_LITTLE_ENDIAN:
4430 val = lduw_le_p(ptr);
4431 break;
4432 case DEVICE_BIG_ENDIAN:
4433 val = lduw_be_p(ptr);
4434 break;
4435 default:
4436 val = lduw_p(ptr);
4437 break;
4438 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004439 }
4440 return val;
bellardaab33092005-10-30 20:48:42 +00004441}
4442
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004443uint32_t lduw_phys(target_phys_addr_t addr)
4444{
4445 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4446}
4447
4448uint32_t lduw_le_phys(target_phys_addr_t addr)
4449{
4450 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4451}
4452
4453uint32_t lduw_be_phys(target_phys_addr_t addr)
4454{
4455 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4456}
4457
bellard8df1cd02005-01-28 22:37:22 +00004458/* warning: addr must be aligned. The ram page is not masked as dirty
4459 and the code inside is not invalidated. It is useful if the dirty
4460 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004461void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004462{
4463 int io_index;
4464 uint8_t *ptr;
4465 unsigned long pd;
4466 PhysPageDesc *p;
4467
4468 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4469 if (!p) {
4470 pd = IO_MEM_UNASSIGNED;
4471 } else {
4472 pd = p->phys_offset;
4473 }
ths3b46e622007-09-17 08:09:54 +00004474
bellard3a7d9292005-08-21 09:26:42 +00004475 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004476 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004477 if (p)
4478 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004479 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4480 } else {
aliguori74576192008-10-06 14:02:03 +00004481 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004482 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004483 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004484
4485 if (unlikely(in_migration)) {
4486 if (!cpu_physical_memory_is_dirty(addr1)) {
4487 /* invalidate code */
4488 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4489 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004490 cpu_physical_memory_set_dirty_flags(
4491 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004492 }
4493 }
bellard8df1cd02005-01-28 22:37:22 +00004494 }
4495}
4496
Anthony Liguoric227f092009-10-01 16:12:16 -05004497void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004498{
4499 int io_index;
4500 uint8_t *ptr;
4501 unsigned long pd;
4502 PhysPageDesc *p;
4503
4504 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4505 if (!p) {
4506 pd = IO_MEM_UNASSIGNED;
4507 } else {
4508 pd = p->phys_offset;
4509 }
ths3b46e622007-09-17 08:09:54 +00004510
j_mayerbc98a7e2007-04-04 07:55:12 +00004511 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4512 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004513 if (p)
4514 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004515#ifdef TARGET_WORDS_BIGENDIAN
4516 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4517 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4518#else
4519 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4520 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4521#endif
4522 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004523 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004524 (addr & ~TARGET_PAGE_MASK);
4525 stq_p(ptr, val);
4526 }
4527}
4528
bellard8df1cd02005-01-28 22:37:22 +00004529/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004530static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4531 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004532{
4533 int io_index;
4534 uint8_t *ptr;
4535 unsigned long pd;
4536 PhysPageDesc *p;
4537
4538 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4539 if (!p) {
4540 pd = IO_MEM_UNASSIGNED;
4541 } else {
4542 pd = p->phys_offset;
4543 }
ths3b46e622007-09-17 08:09:54 +00004544
bellard3a7d9292005-08-21 09:26:42 +00004545 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004546 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004547 if (p)
4548 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004549#if defined(TARGET_WORDS_BIGENDIAN)
4550 if (endian == DEVICE_LITTLE_ENDIAN) {
4551 val = bswap32(val);
4552 }
4553#else
4554 if (endian == DEVICE_BIG_ENDIAN) {
4555 val = bswap32(val);
4556 }
4557#endif
bellard8df1cd02005-01-28 22:37:22 +00004558 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4559 } else {
4560 unsigned long addr1;
4561 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4562 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004563 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004564 switch (endian) {
4565 case DEVICE_LITTLE_ENDIAN:
4566 stl_le_p(ptr, val);
4567 break;
4568 case DEVICE_BIG_ENDIAN:
4569 stl_be_p(ptr, val);
4570 break;
4571 default:
4572 stl_p(ptr, val);
4573 break;
4574 }
bellard3a7d9292005-08-21 09:26:42 +00004575 if (!cpu_physical_memory_is_dirty(addr1)) {
4576 /* invalidate code */
4577 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4578 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004579 cpu_physical_memory_set_dirty_flags(addr1,
4580 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004581 }
bellard8df1cd02005-01-28 22:37:22 +00004582 }
4583}
4584
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004585void stl_phys(target_phys_addr_t addr, uint32_t val)
4586{
4587 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4588}
4589
4590void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4591{
4592 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4593}
4594
4595void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4596{
4597 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4598}
4599
bellardaab33092005-10-30 20:48:42 +00004600/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004601void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004602{
4603 uint8_t v = val;
4604 cpu_physical_memory_write(addr, &v, 1);
4605}
4606
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004607/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004608static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4609 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004610{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004611 int io_index;
4612 uint8_t *ptr;
4613 unsigned long pd;
4614 PhysPageDesc *p;
4615
4616 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4617 if (!p) {
4618 pd = IO_MEM_UNASSIGNED;
4619 } else {
4620 pd = p->phys_offset;
4621 }
4622
4623 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4624 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4625 if (p)
4626 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004627#if defined(TARGET_WORDS_BIGENDIAN)
4628 if (endian == DEVICE_LITTLE_ENDIAN) {
4629 val = bswap16(val);
4630 }
4631#else
4632 if (endian == DEVICE_BIG_ENDIAN) {
4633 val = bswap16(val);
4634 }
4635#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004636 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4637 } else {
4638 unsigned long addr1;
4639 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4640 /* RAM case */
4641 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004642 switch (endian) {
4643 case DEVICE_LITTLE_ENDIAN:
4644 stw_le_p(ptr, val);
4645 break;
4646 case DEVICE_BIG_ENDIAN:
4647 stw_be_p(ptr, val);
4648 break;
4649 default:
4650 stw_p(ptr, val);
4651 break;
4652 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004653 if (!cpu_physical_memory_is_dirty(addr1)) {
4654 /* invalidate code */
4655 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4656 /* set dirty bit */
4657 cpu_physical_memory_set_dirty_flags(addr1,
4658 (0xff & ~CODE_DIRTY_FLAG));
4659 }
4660 }
bellardaab33092005-10-30 20:48:42 +00004661}
4662
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004663void stw_phys(target_phys_addr_t addr, uint32_t val)
4664{
4665 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4666}
4667
4668void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4669{
4670 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4671}
4672
4673void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4674{
4675 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4676}
4677
bellardaab33092005-10-30 20:48:42 +00004678/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004679void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004680{
4681 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004682 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004683}
4684
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004685void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4686{
4687 val = cpu_to_le64(val);
4688 cpu_physical_memory_write(addr, &val, 8);
4689}
4690
4691void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4692{
4693 val = cpu_to_be64(val);
4694 cpu_physical_memory_write(addr, &val, 8);
4695}
4696
aliguori5e2972f2009-03-28 17:51:36 +00004697/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004698int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004699 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004700{
4701 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004702 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004703 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004704
4705 while (len > 0) {
4706 page = addr & TARGET_PAGE_MASK;
4707 phys_addr = cpu_get_phys_page_debug(env, page);
4708 /* if no physical page mapped, return an error */
4709 if (phys_addr == -1)
4710 return -1;
4711 l = (page + TARGET_PAGE_SIZE) - addr;
4712 if (l > len)
4713 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004714 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004715 if (is_write)
4716 cpu_physical_memory_write_rom(phys_addr, buf, l);
4717 else
aliguori5e2972f2009-03-28 17:51:36 +00004718 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004719 len -= l;
4720 buf += l;
4721 addr += l;
4722 }
4723 return 0;
4724}
Paul Brooka68fe892010-03-01 00:08:59 +00004725#endif
bellard13eb76e2004-01-24 15:23:36 +00004726
pbrook2e70f6e2008-06-29 01:03:05 +00004727/* in deterministic execution mode, instructions doing device I/Os
4728 must be at the end of the TB */
4729void cpu_io_recompile(CPUState *env, void *retaddr)
4730{
4731 TranslationBlock *tb;
4732 uint32_t n, cflags;
4733 target_ulong pc, cs_base;
4734 uint64_t flags;
4735
4736 tb = tb_find_pc((unsigned long)retaddr);
4737 if (!tb) {
4738 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4739 retaddr);
4740 }
4741 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004742 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004743 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004744 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004745 n = n - env->icount_decr.u16.low;
4746 /* Generate a new TB ending on the I/O insn. */
4747 n++;
4748 /* On MIPS and SH, delay slot instructions can only be restarted if
4749 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004750 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004751 branch. */
4752#if defined(TARGET_MIPS)
4753 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4754 env->active_tc.PC -= 4;
4755 env->icount_decr.u16.low++;
4756 env->hflags &= ~MIPS_HFLAG_BMASK;
4757 }
4758#elif defined(TARGET_SH4)
4759 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4760 && n > 1) {
4761 env->pc -= 2;
4762 env->icount_decr.u16.low++;
4763 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4764 }
4765#endif
4766 /* This should never happen. */
4767 if (n > CF_COUNT_MASK)
4768 cpu_abort(env, "TB too big during recompile");
4769
4770 cflags = n | CF_LAST_IO;
4771 pc = tb->pc;
4772 cs_base = tb->cs_base;
4773 flags = tb->flags;
4774 tb_phys_invalidate(tb, -1);
4775 /* FIXME: In theory this could raise an exception. In practice
4776 we have already translated the block once so it's probably ok. */
4777 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004778 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004779 the first in the TB) then we end up generating a whole new TB and
4780 repeating the fault, which is horribly inefficient.
4781 Better would be to execute just this insn uncached, or generate a
4782 second new TB. */
4783 cpu_resume_from_signal(env, NULL);
4784}
4785
Paul Brookb3755a92010-03-12 16:54:58 +00004786#if !defined(CONFIG_USER_ONLY)
4787
Stefan Weil055403b2010-10-22 23:03:32 +02004788void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004789{
4790 int i, target_code_size, max_target_code_size;
4791 int direct_jmp_count, direct_jmp2_count, cross_page;
4792 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004793
bellarde3db7222005-01-26 22:00:47 +00004794 target_code_size = 0;
4795 max_target_code_size = 0;
4796 cross_page = 0;
4797 direct_jmp_count = 0;
4798 direct_jmp2_count = 0;
4799 for(i = 0; i < nb_tbs; i++) {
4800 tb = &tbs[i];
4801 target_code_size += tb->size;
4802 if (tb->size > max_target_code_size)
4803 max_target_code_size = tb->size;
4804 if (tb->page_addr[1] != -1)
4805 cross_page++;
4806 if (tb->tb_next_offset[0] != 0xffff) {
4807 direct_jmp_count++;
4808 if (tb->tb_next_offset[1] != 0xffff) {
4809 direct_jmp2_count++;
4810 }
4811 }
4812 }
4813 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004814 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004815 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004816 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4817 cpu_fprintf(f, "TB count %d/%d\n",
4818 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004819 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004820 nb_tbs ? target_code_size / nb_tbs : 0,
4821 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004822 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004823 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4824 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004825 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4826 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004827 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4828 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004829 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004830 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4831 direct_jmp2_count,
4832 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004833 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004834 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4835 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4836 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004837 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004838}
4839
bellard61382a52003-10-27 21:22:23 +00004840#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004841#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004842#define GETPC() NULL
4843#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004844#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004845
4846#define SHIFT 0
4847#include "softmmu_template.h"
4848
4849#define SHIFT 1
4850#include "softmmu_template.h"
4851
4852#define SHIFT 2
4853#include "softmmu_template.h"
4854
4855#define SHIFT 3
4856#include "softmmu_template.h"
4857
4858#undef env
4859
4860#endif