blob: c1e045d32cf3000ddef294d9852dc60cc3731375 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
bellardfd6ce8f2003-05-14 19:00:11 +000060//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000061//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000062//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000063//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000064
65/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000066//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000068
ths1196be32007-03-17 15:17:58 +000069//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000070//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000071
pbrook99773bd2006-04-16 15:14:59 +000072#if !defined(CONFIG_USER_ONLY)
73/* TB consistency checks only implemented for usermode emulation. */
74#undef DEBUG_TB_CHECK
75#endif
76
bellard9fa3e852004-01-04 18:06:42 +000077#define SMC_BITMAP_USE_THRESHOLD 10
78
blueswir1bdaf78e2008-10-04 07:24:27 +000079static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020080static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000081TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000082static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000083/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050084spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000085
blueswir1141ac462008-07-26 15:05:57 +000086#if defined(__arm__) || defined(__sparc_v9__)
87/* The prologue must be reachable with a direct jump. ARM and Sparc64
88 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000089 section close to code segment. */
90#define code_gen_section \
91 __attribute__((__section__(".gen_code"))) \
92 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020093#elif defined(_WIN32)
94/* Maximum alignment for Win32 is 16. */
95#define code_gen_section \
96 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000097#else
98#define code_gen_section \
99 __attribute__((aligned (32)))
100#endif
101
102uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000103static uint8_t *code_gen_buffer;
104static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000105/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200107static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000108
pbrooke2eef172008-06-08 01:09:01 +0000109#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000110int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000111static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000112
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200113RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300114
115static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300116static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300117
pbrooke2eef172008-06-08 01:09:01 +0000118#endif
bellard9fa3e852004-01-04 18:06:42 +0000119
bellard6a00d602005-11-21 23:25:50 +0000120CPUState *first_cpu;
121/* current CPU in the current thread. It is only valid inside
122 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000123CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000124/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000125 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000126 2 = Adaptive rate instruction counting. */
127int use_icount = 0;
128/* Current instruction counter. While executing translated code this may
129 include some instructions that have not yet been executed. */
130int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000131
bellard54936002003-05-13 00:25:15 +0000132typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000133 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000134 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count;
138 uint8_t *code_bitmap;
139#if defined(CONFIG_USER_ONLY)
140 unsigned long flags;
141#endif
bellard54936002003-05-13 00:25:15 +0000142} PageDesc;
143
Paul Brook41c1b1c2010-03-12 16:54:58 +0000144/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800145 while in user mode we want it to be based on virtual addresses. */
146#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
149#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800150# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000151#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000154#endif
bellard54936002003-05-13 00:25:15 +0000155
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156/* Size of the L2 (and L3, etc) page tables. */
157#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000158#define L2_SIZE (1 << L2_BITS)
159
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800160/* The bits remaining after N lower levels of page tables. */
161#define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163#define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
165
166/* Size of the L1 page table. Avoid silly small sizes. */
167#if P_L1_BITS_REM < 4
168#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
169#else
170#define P_L1_BITS P_L1_BITS_REM
171#endif
172
173#if V_L1_BITS_REM < 4
174#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
175#else
176#define V_L1_BITS V_L1_BITS_REM
177#endif
178
179#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
181
182#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
184
bellard83fb7ad2004-07-05 21:25:26 +0000185unsigned long qemu_real_host_page_size;
186unsigned long qemu_host_page_bits;
187unsigned long qemu_host_page_size;
188unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000189
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800190/* This is a multi-level map on the virtual address space.
191 The bottom level has pointers to PageDesc. */
192static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000193
pbrooke2eef172008-06-08 01:09:01 +0000194#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000195typedef struct PhysPageDesc {
196 /* offset in host memory of the page + io_index in the low bits */
197 ram_addr_t phys_offset;
198 ram_addr_t region_offset;
199} PhysPageDesc;
200
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800201/* This is a multi-level map on the physical address space.
202 The bottom level has pointers to PhysPageDesc. */
203static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000204
pbrooke2eef172008-06-08 01:09:01 +0000205static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300206static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000207
bellard33417e72003-08-10 21:47:01 +0000208/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000209CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
210CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000211void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000212static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000213static int io_mem_watch;
214#endif
bellard33417e72003-08-10 21:47:01 +0000215
bellard34865132003-10-05 14:28:56 +0000216/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200217#ifdef WIN32
218static const char *logfilename = "qemu.log";
219#else
blueswir1d9b630f2008-10-05 09:57:08 +0000220static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200221#endif
bellard34865132003-10-05 14:28:56 +0000222FILE *logfile;
223int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000224static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000225
bellarde3db7222005-01-26 22:00:47 +0000226/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000227#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000228static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000229#endif
bellarde3db7222005-01-26 22:00:47 +0000230static int tb_flush_count;
231static int tb_phys_invalidate_count;
232
bellard7cb69ca2008-05-10 10:55:51 +0000233#ifdef _WIN32
234static void map_exec(void *addr, long size)
235{
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
239
240}
241#else
242static void map_exec(void *addr, long size)
243{
bellard43694152008-05-29 09:35:57 +0000244 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000245
bellard43694152008-05-29 09:35:57 +0000246 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000247 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000248 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000249
250 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000251 end += page_size - 1;
252 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000253
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
256}
257#endif
258
bellardb346ff42003-06-15 20:05:50 +0000259static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000260{
bellard83fb7ad2004-07-05 21:25:26 +0000261 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000262 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000263#ifdef _WIN32
264 {
265 SYSTEM_INFO system_info;
266
267 GetSystemInfo(&system_info);
268 qemu_real_host_page_size = system_info.dwPageSize;
269 }
270#else
271 qemu_real_host_page_size = getpagesize();
272#endif
bellard83fb7ad2004-07-05 21:25:26 +0000273 if (qemu_host_page_size == 0)
274 qemu_host_page_size = qemu_real_host_page_size;
275 if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 qemu_host_page_size = TARGET_PAGE_SIZE;
277 qemu_host_page_bits = 0;
278 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
279 qemu_host_page_bits++;
280 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000281
Paul Brook2e9a5712010-05-05 16:32:59 +0100282#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000283 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100284#ifdef HAVE_KINFO_GETVMMAP
285 struct kinfo_vmentry *freep;
286 int i, cnt;
287
288 freep = kinfo_getvmmap(getpid(), &cnt);
289 if (freep) {
290 mmap_lock();
291 for (i = 0; i < cnt; i++) {
292 unsigned long startaddr, endaddr;
293
294 startaddr = freep[i].kve_start;
295 endaddr = freep[i].kve_end;
296 if (h2g_valid(startaddr)) {
297 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
298
299 if (h2g_valid(endaddr)) {
300 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200301 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100302 } else {
303#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
304 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200305 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100306#endif
307 }
308 }
309 }
310 free(freep);
311 mmap_unlock();
312 }
313#else
balrog50a95692007-12-12 01:16:23 +0000314 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000315
pbrook07765902008-05-31 16:33:53 +0000316 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800317
Aurelien Jarnofd436902010-04-10 17:20:36 +0200318 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000319 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800320 mmap_lock();
321
balrog50a95692007-12-12 01:16:23 +0000322 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800323 unsigned long startaddr, endaddr;
324 int n;
325
326 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
327
328 if (n == 2 && h2g_valid(startaddr)) {
329 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
330
331 if (h2g_valid(endaddr)) {
332 endaddr = h2g(endaddr);
333 } else {
334 endaddr = ~0ul;
335 }
336 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000337 }
338 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800339
balrog50a95692007-12-12 01:16:23 +0000340 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800341 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000342 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100343#endif
balrog50a95692007-12-12 01:16:23 +0000344 }
345#endif
bellard54936002003-05-13 00:25:15 +0000346}
347
Paul Brook41c1b1c2010-03-12 16:54:58 +0000348static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000349{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000350 PageDesc *pd;
351 void **lp;
352 int i;
353
pbrook17e23772008-06-09 13:47:45 +0000354#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500355 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800356# define ALLOC(P, SIZE) \
357 do { \
358 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
359 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800360 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000361#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500363 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000364#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800365
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800366 /* Level 1. Always allocated. */
367 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
368
369 /* Level 2..N-1. */
370 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
371 void **p = *lp;
372
373 if (p == NULL) {
374 if (!alloc) {
375 return NULL;
376 }
377 ALLOC(p, sizeof(void *) * L2_SIZE);
378 *lp = p;
379 }
380
381 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000382 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800383
384 pd = *lp;
385 if (pd == NULL) {
386 if (!alloc) {
387 return NULL;
388 }
389 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
390 *lp = pd;
391 }
392
393#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800394
395 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000396}
397
Paul Brook41c1b1c2010-03-12 16:54:58 +0000398static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000399{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800400 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000401}
402
Paul Brook6d9a1302010-02-28 23:55:53 +0000403#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500404static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000405{
pbrooke3f4e2a2006-04-08 20:02:06 +0000406 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800407 void **lp;
408 int i;
bellard92e873b2004-05-21 14:52:29 +0000409
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800410 /* Level 1. Always allocated. */
411 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000412
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800413 /* Level 2..N-1. */
414 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
415 void **p = *lp;
416 if (p == NULL) {
417 if (!alloc) {
418 return NULL;
419 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500420 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800421 }
422 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000423 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800424
pbrooke3f4e2a2006-04-08 20:02:06 +0000425 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800426 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000427 int i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800428
429 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000430 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800431 }
432
Anthony Liguori7267c092011-08-20 22:09:37 -0500433 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800434
pbrook67c4d232009-02-23 13:16:07 +0000435 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800436 pd[i].phys_offset = IO_MEM_UNASSIGNED;
437 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000438 }
bellard92e873b2004-05-21 14:52:29 +0000439 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800440
441 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000442}
443
Anthony Liguoric227f092009-10-01 16:12:16 -0500444static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000445{
bellard108c49b2005-07-24 12:55:09 +0000446 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000447}
448
Anthony Liguoric227f092009-10-01 16:12:16 -0500449static void tlb_protect_code(ram_addr_t ram_addr);
450static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000451 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000452#define mmap_lock() do { } while(0)
453#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000454#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000455
bellard43694152008-05-29 09:35:57 +0000456#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
457
458#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100459/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000460 user mode. It will change when a dedicated libc will be used */
461#define USE_STATIC_CODE_GEN_BUFFER
462#endif
463
464#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200465static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
466 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000467#endif
468
blueswir18fcd3692008-08-17 20:26:25 +0000469static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000470{
bellard43694152008-05-29 09:35:57 +0000471#ifdef USE_STATIC_CODE_GEN_BUFFER
472 code_gen_buffer = static_code_gen_buffer;
473 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
474 map_exec(code_gen_buffer, code_gen_buffer_size);
475#else
bellard26a5f132008-05-28 12:30:31 +0000476 code_gen_buffer_size = tb_size;
477 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000478#if defined(CONFIG_USER_ONLY)
479 /* in user mode, phys_ram_size is not meaningful */
480 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
481#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100482 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000483 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000484#endif
bellard26a5f132008-05-28 12:30:31 +0000485 }
486 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
487 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
488 /* The code gen buffer location may have constraints depending on
489 the host cpu and OS */
490#if defined(__linux__)
491 {
492 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000493 void *start = NULL;
494
bellard26a5f132008-05-28 12:30:31 +0000495 flags = MAP_PRIVATE | MAP_ANONYMOUS;
496#if defined(__x86_64__)
497 flags |= MAP_32BIT;
498 /* Cannot map more than that */
499 if (code_gen_buffer_size > (800 * 1024 * 1024))
500 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000501#elif defined(__sparc_v9__)
502 // Map the buffer below 2G, so we can use direct calls and branches
503 flags |= MAP_FIXED;
504 start = (void *) 0x60000000UL;
505 if (code_gen_buffer_size > (512 * 1024 * 1024))
506 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000507#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000508 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000509 flags |= MAP_FIXED;
510 start = (void *) 0x01000000UL;
511 if (code_gen_buffer_size > 16 * 1024 * 1024)
512 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700513#elif defined(__s390x__)
514 /* Map the buffer so that we can use direct calls and branches. */
515 /* We have a +- 4GB range on the branches; leave some slop. */
516 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
517 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
518 }
519 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000520#endif
blueswir1141ac462008-07-26 15:05:57 +0000521 code_gen_buffer = mmap(start, code_gen_buffer_size,
522 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000523 flags, -1, 0);
524 if (code_gen_buffer == MAP_FAILED) {
525 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
526 exit(1);
527 }
528 }
Bradcbb608a2010-12-20 21:25:40 -0500529#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000530 || defined(__DragonFly__) || defined(__OpenBSD__) \
531 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000532 {
533 int flags;
534 void *addr = NULL;
535 flags = MAP_PRIVATE | MAP_ANONYMOUS;
536#if defined(__x86_64__)
537 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
538 * 0x40000000 is free */
539 flags |= MAP_FIXED;
540 addr = (void *)0x40000000;
541 /* Cannot map more than that */
542 if (code_gen_buffer_size > (800 * 1024 * 1024))
543 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000544#elif defined(__sparc_v9__)
545 // Map the buffer below 2G, so we can use direct calls and branches
546 flags |= MAP_FIXED;
547 addr = (void *) 0x60000000UL;
548 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
549 code_gen_buffer_size = (512 * 1024 * 1024);
550 }
aliguori06e67a82008-09-27 15:32:41 +0000551#endif
552 code_gen_buffer = mmap(addr, code_gen_buffer_size,
553 PROT_WRITE | PROT_READ | PROT_EXEC,
554 flags, -1, 0);
555 if (code_gen_buffer == MAP_FAILED) {
556 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
557 exit(1);
558 }
559 }
bellard26a5f132008-05-28 12:30:31 +0000560#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500561 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000562 map_exec(code_gen_buffer, code_gen_buffer_size);
563#endif
bellard43694152008-05-29 09:35:57 +0000564#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000565 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100566 code_gen_buffer_max_size = code_gen_buffer_size -
567 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000568 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500569 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000570}
571
572/* Must be called before using the QEMU cpus. 'tb_size' is the size
573 (in bytes) allocated to the translation buffer. Zero means default
574 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200575void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000576{
bellard26a5f132008-05-28 12:30:31 +0000577 cpu_gen_init();
578 code_gen_alloc(tb_size);
579 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000580 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700581#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
582 /* There's no guest base to take into account, so go ahead and
583 initialize the prologue now. */
584 tcg_prologue_init(&tcg_ctx);
585#endif
bellard26a5f132008-05-28 12:30:31 +0000586}
587
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200588bool tcg_enabled(void)
589{
590 return code_gen_buffer != NULL;
591}
592
593void cpu_exec_init_all(void)
594{
595#if !defined(CONFIG_USER_ONLY)
596 memory_map_init();
597 io_mem_init();
598#endif
599}
600
pbrook9656f322008-07-01 20:01:19 +0000601#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
602
Juan Quintelae59fb372009-09-29 22:48:21 +0200603static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200604{
605 CPUState *env = opaque;
606
aurel323098dba2009-03-07 21:28:24 +0000607 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
608 version_id is increased. */
609 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000610 tlb_flush(env, 1);
611
612 return 0;
613}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200614
615static const VMStateDescription vmstate_cpu_common = {
616 .name = "cpu_common",
617 .version_id = 1,
618 .minimum_version_id = 1,
619 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200620 .post_load = cpu_common_post_load,
621 .fields = (VMStateField []) {
622 VMSTATE_UINT32(halted, CPUState),
623 VMSTATE_UINT32(interrupt_request, CPUState),
624 VMSTATE_END_OF_LIST()
625 }
626};
pbrook9656f322008-07-01 20:01:19 +0000627#endif
628
Glauber Costa950f1472009-06-09 12:15:18 -0400629CPUState *qemu_get_cpu(int cpu)
630{
631 CPUState *env = first_cpu;
632
633 while (env) {
634 if (env->cpu_index == cpu)
635 break;
636 env = env->next_cpu;
637 }
638
639 return env;
640}
641
bellard6a00d602005-11-21 23:25:50 +0000642void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000643{
bellard6a00d602005-11-21 23:25:50 +0000644 CPUState **penv;
645 int cpu_index;
646
pbrookc2764712009-03-07 15:24:59 +0000647#if defined(CONFIG_USER_ONLY)
648 cpu_list_lock();
649#endif
bellard6a00d602005-11-21 23:25:50 +0000650 env->next_cpu = NULL;
651 penv = &first_cpu;
652 cpu_index = 0;
653 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700654 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000655 cpu_index++;
656 }
657 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000658 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000659 QTAILQ_INIT(&env->breakpoints);
660 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100661#ifndef CONFIG_USER_ONLY
662 env->thread_id = qemu_get_thread_id();
663#endif
bellard6a00d602005-11-21 23:25:50 +0000664 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000665#if defined(CONFIG_USER_ONLY)
666 cpu_list_unlock();
667#endif
pbrookb3c77242008-06-30 16:31:04 +0000668#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600669 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
670 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000671 cpu_save, cpu_load, env);
672#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000673}
674
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100675/* Allocate a new translation block. Flush the translation buffer if
676 too many translation blocks or too much generated code. */
677static TranslationBlock *tb_alloc(target_ulong pc)
678{
679 TranslationBlock *tb;
680
681 if (nb_tbs >= code_gen_max_blocks ||
682 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
683 return NULL;
684 tb = &tbs[nb_tbs++];
685 tb->pc = pc;
686 tb->cflags = 0;
687 return tb;
688}
689
690void tb_free(TranslationBlock *tb)
691{
692 /* In practice this is mostly used for single use temporary TB
693 Ignore the hard cases and just back up if this TB happens to
694 be the last one generated. */
695 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
696 code_gen_ptr = tb->tc_ptr;
697 nb_tbs--;
698 }
699}
700
bellard9fa3e852004-01-04 18:06:42 +0000701static inline void invalidate_page_bitmap(PageDesc *p)
702{
703 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500704 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000705 p->code_bitmap = NULL;
706 }
707 p->code_write_count = 0;
708}
709
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800710/* Set to NULL all the 'first_tb' fields in all PageDescs. */
711
712static void page_flush_tb_1 (int level, void **lp)
713{
714 int i;
715
716 if (*lp == NULL) {
717 return;
718 }
719 if (level == 0) {
720 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000721 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800722 pd[i].first_tb = NULL;
723 invalidate_page_bitmap(pd + i);
724 }
725 } else {
726 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000727 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800728 page_flush_tb_1 (level - 1, pp + i);
729 }
730 }
731}
732
bellardfd6ce8f2003-05-14 19:00:11 +0000733static void page_flush_tb(void)
734{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800735 int i;
736 for (i = 0; i < V_L1_SIZE; i++) {
737 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000738 }
739}
740
741/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000742/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000743void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000744{
bellard6a00d602005-11-21 23:25:50 +0000745 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000746#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000747 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
748 (unsigned long)(code_gen_ptr - code_gen_buffer),
749 nb_tbs, nb_tbs > 0 ?
750 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000751#endif
bellard26a5f132008-05-28 12:30:31 +0000752 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000753 cpu_abort(env1, "Internal error: code buffer overflow\n");
754
bellardfd6ce8f2003-05-14 19:00:11 +0000755 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000756
bellard6a00d602005-11-21 23:25:50 +0000757 for(env = first_cpu; env != NULL; env = env->next_cpu) {
758 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
759 }
bellard9fa3e852004-01-04 18:06:42 +0000760
bellard8a8a6082004-10-03 13:36:49 +0000761 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000762 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000763
bellardfd6ce8f2003-05-14 19:00:11 +0000764 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000765 /* XXX: flush processor icache at this point if cache flush is
766 expensive */
bellarde3db7222005-01-26 22:00:47 +0000767 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000768}
769
770#ifdef DEBUG_TB_CHECK
771
j_mayerbc98a7e2007-04-04 07:55:12 +0000772static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000773{
774 TranslationBlock *tb;
775 int i;
776 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000777 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
778 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000779 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
780 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000781 printf("ERROR invalidate: address=" TARGET_FMT_lx
782 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000783 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000784 }
785 }
786 }
787}
788
789/* verify that all the pages have correct rights for code */
790static void tb_page_check(void)
791{
792 TranslationBlock *tb;
793 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000794
pbrook99773bd2006-04-16 15:14:59 +0000795 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
796 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000797 flags1 = page_get_flags(tb->pc);
798 flags2 = page_get_flags(tb->pc + tb->size - 1);
799 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
800 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000801 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000802 }
803 }
804 }
805}
806
807#endif
808
809/* invalidate one TB */
810static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
811 int next_offset)
812{
813 TranslationBlock *tb1;
814 for(;;) {
815 tb1 = *ptb;
816 if (tb1 == tb) {
817 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
818 break;
819 }
820 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
821 }
822}
823
bellard9fa3e852004-01-04 18:06:42 +0000824static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
825{
826 TranslationBlock *tb1;
827 unsigned int n1;
828
829 for(;;) {
830 tb1 = *ptb;
831 n1 = (long)tb1 & 3;
832 tb1 = (TranslationBlock *)((long)tb1 & ~3);
833 if (tb1 == tb) {
834 *ptb = tb1->page_next[n1];
835 break;
836 }
837 ptb = &tb1->page_next[n1];
838 }
839}
840
bellardd4e81642003-05-25 16:46:15 +0000841static inline void tb_jmp_remove(TranslationBlock *tb, int n)
842{
843 TranslationBlock *tb1, **ptb;
844 unsigned int n1;
845
846 ptb = &tb->jmp_next[n];
847 tb1 = *ptb;
848 if (tb1) {
849 /* find tb(n) in circular list */
850 for(;;) {
851 tb1 = *ptb;
852 n1 = (long)tb1 & 3;
853 tb1 = (TranslationBlock *)((long)tb1 & ~3);
854 if (n1 == n && tb1 == tb)
855 break;
856 if (n1 == 2) {
857 ptb = &tb1->jmp_first;
858 } else {
859 ptb = &tb1->jmp_next[n1];
860 }
861 }
862 /* now we can suppress tb(n) from the list */
863 *ptb = tb->jmp_next[n];
864
865 tb->jmp_next[n] = NULL;
866 }
867}
868
869/* reset the jump entry 'n' of a TB so that it is not chained to
870 another TB */
871static inline void tb_reset_jump(TranslationBlock *tb, int n)
872{
873 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
874}
875
Paul Brook41c1b1c2010-03-12 16:54:58 +0000876void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000877{
bellard6a00d602005-11-21 23:25:50 +0000878 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000879 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000880 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000881 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000882 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000883
bellard9fa3e852004-01-04 18:06:42 +0000884 /* remove the TB from the hash list */
885 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
886 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000887 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000888 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000889
bellard9fa3e852004-01-04 18:06:42 +0000890 /* remove the TB from the page list */
891 if (tb->page_addr[0] != page_addr) {
892 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
893 tb_page_remove(&p->first_tb, tb);
894 invalidate_page_bitmap(p);
895 }
896 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
897 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
898 tb_page_remove(&p->first_tb, tb);
899 invalidate_page_bitmap(p);
900 }
901
bellard8a40a182005-11-20 10:35:40 +0000902 tb_invalidated_flag = 1;
903
904 /* remove the TB from the hash list */
905 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000906 for(env = first_cpu; env != NULL; env = env->next_cpu) {
907 if (env->tb_jmp_cache[h] == tb)
908 env->tb_jmp_cache[h] = NULL;
909 }
bellard8a40a182005-11-20 10:35:40 +0000910
911 /* suppress this TB from the two jump lists */
912 tb_jmp_remove(tb, 0);
913 tb_jmp_remove(tb, 1);
914
915 /* suppress any remaining jumps to this TB */
916 tb1 = tb->jmp_first;
917 for(;;) {
918 n1 = (long)tb1 & 3;
919 if (n1 == 2)
920 break;
921 tb1 = (TranslationBlock *)((long)tb1 & ~3);
922 tb2 = tb1->jmp_next[n1];
923 tb_reset_jump(tb1, n1);
924 tb1->jmp_next[n1] = NULL;
925 tb1 = tb2;
926 }
927 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
928
bellarde3db7222005-01-26 22:00:47 +0000929 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000930}
931
932static inline void set_bits(uint8_t *tab, int start, int len)
933{
934 int end, mask, end1;
935
936 end = start + len;
937 tab += start >> 3;
938 mask = 0xff << (start & 7);
939 if ((start & ~7) == (end & ~7)) {
940 if (start < end) {
941 mask &= ~(0xff << (end & 7));
942 *tab |= mask;
943 }
944 } else {
945 *tab++ |= mask;
946 start = (start + 8) & ~7;
947 end1 = end & ~7;
948 while (start < end1) {
949 *tab++ = 0xff;
950 start += 8;
951 }
952 if (start < end) {
953 mask = ~(0xff << (end & 7));
954 *tab |= mask;
955 }
956 }
957}
958
959static void build_page_bitmap(PageDesc *p)
960{
961 int n, tb_start, tb_end;
962 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000963
Anthony Liguori7267c092011-08-20 22:09:37 -0500964 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000965
966 tb = p->first_tb;
967 while (tb != NULL) {
968 n = (long)tb & 3;
969 tb = (TranslationBlock *)((long)tb & ~3);
970 /* NOTE: this is subtle as a TB may span two physical pages */
971 if (n == 0) {
972 /* NOTE: tb_end may be after the end of the page, but
973 it is not a problem */
974 tb_start = tb->pc & ~TARGET_PAGE_MASK;
975 tb_end = tb_start + tb->size;
976 if (tb_end > TARGET_PAGE_SIZE)
977 tb_end = TARGET_PAGE_SIZE;
978 } else {
979 tb_start = 0;
980 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
981 }
982 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
983 tb = tb->page_next[n];
984 }
985}
986
pbrook2e70f6e2008-06-29 01:03:05 +0000987TranslationBlock *tb_gen_code(CPUState *env,
988 target_ulong pc, target_ulong cs_base,
989 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000990{
991 TranslationBlock *tb;
992 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000993 tb_page_addr_t phys_pc, phys_page2;
994 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000995 int code_gen_size;
996
Paul Brook41c1b1c2010-03-12 16:54:58 +0000997 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000998 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000999 if (!tb) {
1000 /* flush must be done */
1001 tb_flush(env);
1002 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001003 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001004 /* Don't forget to invalidate previous TB info. */
1005 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001006 }
1007 tc_ptr = code_gen_ptr;
1008 tb->tc_ptr = tc_ptr;
1009 tb->cs_base = cs_base;
1010 tb->flags = flags;
1011 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001012 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001013 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001014
bellardd720b932004-04-25 17:57:43 +00001015 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001016 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001017 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001018 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001019 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001020 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001021 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001022 return tb;
bellardd720b932004-04-25 17:57:43 +00001023}
ths3b46e622007-09-17 08:09:54 +00001024
bellard9fa3e852004-01-04 18:06:42 +00001025/* invalidate all TBs which intersect with the target physical page
1026 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001027 the same physical page. 'is_cpu_write_access' should be true if called
1028 from a real cpu write access: the virtual CPU will exit the current
1029 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001030void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001031 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001032{
aliguori6b917542008-11-18 19:46:41 +00001033 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001034 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001035 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001036 PageDesc *p;
1037 int n;
1038#ifdef TARGET_HAS_PRECISE_SMC
1039 int current_tb_not_found = is_cpu_write_access;
1040 TranslationBlock *current_tb = NULL;
1041 int current_tb_modified = 0;
1042 target_ulong current_pc = 0;
1043 target_ulong current_cs_base = 0;
1044 int current_flags = 0;
1045#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001046
1047 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001048 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001049 return;
ths5fafdf22007-09-16 21:08:06 +00001050 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001051 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1052 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001053 /* build code bitmap */
1054 build_page_bitmap(p);
1055 }
1056
1057 /* we remove all the TBs in the range [start, end[ */
1058 /* XXX: see if in some cases it could be faster to invalidate all the code */
1059 tb = p->first_tb;
1060 while (tb != NULL) {
1061 n = (long)tb & 3;
1062 tb = (TranslationBlock *)((long)tb & ~3);
1063 tb_next = tb->page_next[n];
1064 /* NOTE: this is subtle as a TB may span two physical pages */
1065 if (n == 0) {
1066 /* NOTE: tb_end may be after the end of the page, but
1067 it is not a problem */
1068 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1069 tb_end = tb_start + tb->size;
1070 } else {
1071 tb_start = tb->page_addr[1];
1072 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1073 }
1074 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001075#ifdef TARGET_HAS_PRECISE_SMC
1076 if (current_tb_not_found) {
1077 current_tb_not_found = 0;
1078 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001079 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001080 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001081 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001082 }
1083 }
1084 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001085 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001086 /* If we are modifying the current TB, we must stop
1087 its execution. We could be more precise by checking
1088 that the modification is after the current PC, but it
1089 would require a specialized function to partially
1090 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001091
bellardd720b932004-04-25 17:57:43 +00001092 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001093 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001094 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1095 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001096 }
1097#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001098 /* we need to do that to handle the case where a signal
1099 occurs while doing tb_phys_invalidate() */
1100 saved_tb = NULL;
1101 if (env) {
1102 saved_tb = env->current_tb;
1103 env->current_tb = NULL;
1104 }
bellard9fa3e852004-01-04 18:06:42 +00001105 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001106 if (env) {
1107 env->current_tb = saved_tb;
1108 if (env->interrupt_request && env->current_tb)
1109 cpu_interrupt(env, env->interrupt_request);
1110 }
bellard9fa3e852004-01-04 18:06:42 +00001111 }
1112 tb = tb_next;
1113 }
1114#if !defined(CONFIG_USER_ONLY)
1115 /* if no code remaining, no need to continue to use slow writes */
1116 if (!p->first_tb) {
1117 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001118 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001119 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001120 }
1121 }
1122#endif
1123#ifdef TARGET_HAS_PRECISE_SMC
1124 if (current_tb_modified) {
1125 /* we generate a block containing just the instruction
1126 modifying the memory. It will ensure that it cannot modify
1127 itself */
bellardea1c1802004-06-14 18:56:36 +00001128 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001129 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001130 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001131 }
1132#endif
1133}
1134
1135/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001136static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001137{
1138 PageDesc *p;
1139 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001140#if 0
bellarda4193c82004-06-03 14:01:43 +00001141 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001142 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1143 cpu_single_env->mem_io_vaddr, len,
1144 cpu_single_env->eip,
1145 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001146 }
1147#endif
bellard9fa3e852004-01-04 18:06:42 +00001148 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001149 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001150 return;
1151 if (p->code_bitmap) {
1152 offset = start & ~TARGET_PAGE_MASK;
1153 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1154 if (b & ((1 << len) - 1))
1155 goto do_invalidate;
1156 } else {
1157 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001158 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001159 }
1160}
1161
bellard9fa3e852004-01-04 18:06:42 +00001162#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001163static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001164 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001165{
aliguori6b917542008-11-18 19:46:41 +00001166 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001167 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001168 int n;
bellardd720b932004-04-25 17:57:43 +00001169#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001170 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001171 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001172 int current_tb_modified = 0;
1173 target_ulong current_pc = 0;
1174 target_ulong current_cs_base = 0;
1175 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001176#endif
bellard9fa3e852004-01-04 18:06:42 +00001177
1178 addr &= TARGET_PAGE_MASK;
1179 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001180 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001181 return;
1182 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001183#ifdef TARGET_HAS_PRECISE_SMC
1184 if (tb && pc != 0) {
1185 current_tb = tb_find_pc(pc);
1186 }
1187#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001188 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001189 n = (long)tb & 3;
1190 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001191#ifdef TARGET_HAS_PRECISE_SMC
1192 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001193 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001194 /* If we are modifying the current TB, we must stop
1195 its execution. We could be more precise by checking
1196 that the modification is after the current PC, but it
1197 would require a specialized function to partially
1198 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001199
bellardd720b932004-04-25 17:57:43 +00001200 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001201 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001202 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1203 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001204 }
1205#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001206 tb_phys_invalidate(tb, addr);
1207 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001208 }
1209 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001210#ifdef TARGET_HAS_PRECISE_SMC
1211 if (current_tb_modified) {
1212 /* we generate a block containing just the instruction
1213 modifying the memory. It will ensure that it cannot modify
1214 itself */
bellardea1c1802004-06-14 18:56:36 +00001215 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001216 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001217 cpu_resume_from_signal(env, puc);
1218 }
1219#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001220}
bellard9fa3e852004-01-04 18:06:42 +00001221#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001222
1223/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001224static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001225 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001226{
1227 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001228#ifndef CONFIG_USER_ONLY
1229 bool page_already_protected;
1230#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001231
bellard9fa3e852004-01-04 18:06:42 +00001232 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001233 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001234 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001235#ifndef CONFIG_USER_ONLY
1236 page_already_protected = p->first_tb != NULL;
1237#endif
bellard9fa3e852004-01-04 18:06:42 +00001238 p->first_tb = (TranslationBlock *)((long)tb | n);
1239 invalidate_page_bitmap(p);
1240
bellard107db442004-06-22 18:48:46 +00001241#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001242
bellard9fa3e852004-01-04 18:06:42 +00001243#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001244 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001245 target_ulong addr;
1246 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001247 int prot;
1248
bellardfd6ce8f2003-05-14 19:00:11 +00001249 /* force the host page as non writable (writes will have a
1250 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001251 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001252 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001253 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1254 addr += TARGET_PAGE_SIZE) {
1255
1256 p2 = page_find (addr >> TARGET_PAGE_BITS);
1257 if (!p2)
1258 continue;
1259 prot |= p2->flags;
1260 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001261 }
ths5fafdf22007-09-16 21:08:06 +00001262 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001263 (prot & PAGE_BITS) & ~PAGE_WRITE);
1264#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001265 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001266 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001267#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001268 }
bellard9fa3e852004-01-04 18:06:42 +00001269#else
1270 /* if some code is already present, then the pages are already
1271 protected. So we handle the case where only the first TB is
1272 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001273 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001274 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001275 }
1276#endif
bellardd720b932004-04-25 17:57:43 +00001277
1278#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001279}
1280
bellard9fa3e852004-01-04 18:06:42 +00001281/* add a new TB and link it to the physical page tables. phys_page2 is
1282 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001283void tb_link_page(TranslationBlock *tb,
1284 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001285{
bellard9fa3e852004-01-04 18:06:42 +00001286 unsigned int h;
1287 TranslationBlock **ptb;
1288
pbrookc8a706f2008-06-02 16:16:42 +00001289 /* Grab the mmap lock to stop another thread invalidating this TB
1290 before we are done. */
1291 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001292 /* add in the physical hash table */
1293 h = tb_phys_hash_func(phys_pc);
1294 ptb = &tb_phys_hash[h];
1295 tb->phys_hash_next = *ptb;
1296 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001297
1298 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001299 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1300 if (phys_page2 != -1)
1301 tb_alloc_page(tb, 1, phys_page2);
1302 else
1303 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001304
bellardd4e81642003-05-25 16:46:15 +00001305 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1306 tb->jmp_next[0] = NULL;
1307 tb->jmp_next[1] = NULL;
1308
1309 /* init original jump addresses */
1310 if (tb->tb_next_offset[0] != 0xffff)
1311 tb_reset_jump(tb, 0);
1312 if (tb->tb_next_offset[1] != 0xffff)
1313 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001314
1315#ifdef DEBUG_TB_CHECK
1316 tb_page_check();
1317#endif
pbrookc8a706f2008-06-02 16:16:42 +00001318 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001319}
1320
bellarda513fe12003-05-27 23:29:48 +00001321/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1322 tb[1].tc_ptr. Return NULL if not found */
1323TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1324{
1325 int m_min, m_max, m;
1326 unsigned long v;
1327 TranslationBlock *tb;
1328
1329 if (nb_tbs <= 0)
1330 return NULL;
1331 if (tc_ptr < (unsigned long)code_gen_buffer ||
1332 tc_ptr >= (unsigned long)code_gen_ptr)
1333 return NULL;
1334 /* binary search (cf Knuth) */
1335 m_min = 0;
1336 m_max = nb_tbs - 1;
1337 while (m_min <= m_max) {
1338 m = (m_min + m_max) >> 1;
1339 tb = &tbs[m];
1340 v = (unsigned long)tb->tc_ptr;
1341 if (v == tc_ptr)
1342 return tb;
1343 else if (tc_ptr < v) {
1344 m_max = m - 1;
1345 } else {
1346 m_min = m + 1;
1347 }
ths5fafdf22007-09-16 21:08:06 +00001348 }
bellarda513fe12003-05-27 23:29:48 +00001349 return &tbs[m_max];
1350}
bellard75012672003-06-21 13:11:07 +00001351
bellardea041c02003-06-25 16:16:50 +00001352static void tb_reset_jump_recursive(TranslationBlock *tb);
1353
1354static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1355{
1356 TranslationBlock *tb1, *tb_next, **ptb;
1357 unsigned int n1;
1358
1359 tb1 = tb->jmp_next[n];
1360 if (tb1 != NULL) {
1361 /* find head of list */
1362 for(;;) {
1363 n1 = (long)tb1 & 3;
1364 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1365 if (n1 == 2)
1366 break;
1367 tb1 = tb1->jmp_next[n1];
1368 }
1369 /* we are now sure now that tb jumps to tb1 */
1370 tb_next = tb1;
1371
1372 /* remove tb from the jmp_first list */
1373 ptb = &tb_next->jmp_first;
1374 for(;;) {
1375 tb1 = *ptb;
1376 n1 = (long)tb1 & 3;
1377 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1378 if (n1 == n && tb1 == tb)
1379 break;
1380 ptb = &tb1->jmp_next[n1];
1381 }
1382 *ptb = tb->jmp_next[n];
1383 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001384
bellardea041c02003-06-25 16:16:50 +00001385 /* suppress the jump to next tb in generated code */
1386 tb_reset_jump(tb, n);
1387
bellard01243112004-01-04 15:48:17 +00001388 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001389 tb_reset_jump_recursive(tb_next);
1390 }
1391}
1392
1393static void tb_reset_jump_recursive(TranslationBlock *tb)
1394{
1395 tb_reset_jump_recursive2(tb, 0);
1396 tb_reset_jump_recursive2(tb, 1);
1397}
1398
bellard1fddef42005-04-17 19:16:13 +00001399#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001400#if defined(CONFIG_USER_ONLY)
1401static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1402{
1403 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1404}
1405#else
bellardd720b932004-04-25 17:57:43 +00001406static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1407{
Anthony Liguoric227f092009-10-01 16:12:16 -05001408 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001409 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001410 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001411 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001412
pbrookc2f07f82006-04-08 17:14:56 +00001413 addr = cpu_get_phys_page_debug(env, pc);
1414 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1415 if (!p) {
1416 pd = IO_MEM_UNASSIGNED;
1417 } else {
1418 pd = p->phys_offset;
1419 }
1420 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001421 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001422}
bellardc27004e2005-01-03 23:35:10 +00001423#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001424#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001425
Paul Brookc527ee82010-03-01 03:31:14 +00001426#if defined(CONFIG_USER_ONLY)
1427void cpu_watchpoint_remove_all(CPUState *env, int mask)
1428
1429{
1430}
1431
1432int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1433 int flags, CPUWatchpoint **watchpoint)
1434{
1435 return -ENOSYS;
1436}
1437#else
pbrook6658ffb2007-03-16 23:58:11 +00001438/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001439int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1440 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001441{
aliguorib4051332008-11-18 20:14:20 +00001442 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001443 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001444
aliguorib4051332008-11-18 20:14:20 +00001445 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1446 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1447 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1448 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1449 return -EINVAL;
1450 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001451 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001452
aliguoria1d1bb32008-11-18 20:07:32 +00001453 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001454 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001455 wp->flags = flags;
1456
aliguori2dc9f412008-11-18 20:56:59 +00001457 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001458 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001459 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001460 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001461 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001462
pbrook6658ffb2007-03-16 23:58:11 +00001463 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001464
1465 if (watchpoint)
1466 *watchpoint = wp;
1467 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001468}
1469
aliguoria1d1bb32008-11-18 20:07:32 +00001470/* Remove a specific watchpoint. */
1471int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1472 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001473{
aliguorib4051332008-11-18 20:14:20 +00001474 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001475 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001476
Blue Swirl72cf2d42009-09-12 07:36:22 +00001477 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001478 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001479 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001480 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001481 return 0;
1482 }
1483 }
aliguoria1d1bb32008-11-18 20:07:32 +00001484 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001485}
1486
aliguoria1d1bb32008-11-18 20:07:32 +00001487/* Remove a specific watchpoint by reference. */
1488void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1489{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001490 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001491
aliguoria1d1bb32008-11-18 20:07:32 +00001492 tlb_flush_page(env, watchpoint->vaddr);
1493
Anthony Liguori7267c092011-08-20 22:09:37 -05001494 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001495}
1496
aliguoria1d1bb32008-11-18 20:07:32 +00001497/* Remove all matching watchpoints. */
1498void cpu_watchpoint_remove_all(CPUState *env, int mask)
1499{
aliguoric0ce9982008-11-25 22:13:57 +00001500 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001501
Blue Swirl72cf2d42009-09-12 07:36:22 +00001502 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001503 if (wp->flags & mask)
1504 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001505 }
aliguoria1d1bb32008-11-18 20:07:32 +00001506}
Paul Brookc527ee82010-03-01 03:31:14 +00001507#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001508
1509/* Add a breakpoint. */
1510int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1511 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001512{
bellard1fddef42005-04-17 19:16:13 +00001513#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001514 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001515
Anthony Liguori7267c092011-08-20 22:09:37 -05001516 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001517
1518 bp->pc = pc;
1519 bp->flags = flags;
1520
aliguori2dc9f412008-11-18 20:56:59 +00001521 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001522 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001523 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001524 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001525 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001526
1527 breakpoint_invalidate(env, pc);
1528
1529 if (breakpoint)
1530 *breakpoint = bp;
1531 return 0;
1532#else
1533 return -ENOSYS;
1534#endif
1535}
1536
1537/* Remove a specific breakpoint. */
1538int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1539{
1540#if defined(TARGET_HAS_ICE)
1541 CPUBreakpoint *bp;
1542
Blue Swirl72cf2d42009-09-12 07:36:22 +00001543 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001544 if (bp->pc == pc && bp->flags == flags) {
1545 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001546 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001547 }
bellard4c3a88a2003-07-26 12:06:08 +00001548 }
aliguoria1d1bb32008-11-18 20:07:32 +00001549 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001550#else
aliguoria1d1bb32008-11-18 20:07:32 +00001551 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001552#endif
1553}
1554
aliguoria1d1bb32008-11-18 20:07:32 +00001555/* Remove a specific breakpoint by reference. */
1556void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001557{
bellard1fddef42005-04-17 19:16:13 +00001558#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001559 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001560
aliguoria1d1bb32008-11-18 20:07:32 +00001561 breakpoint_invalidate(env, breakpoint->pc);
1562
Anthony Liguori7267c092011-08-20 22:09:37 -05001563 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001564#endif
1565}
1566
1567/* Remove all matching breakpoints. */
1568void cpu_breakpoint_remove_all(CPUState *env, int mask)
1569{
1570#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001571 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001572
Blue Swirl72cf2d42009-09-12 07:36:22 +00001573 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001574 if (bp->flags & mask)
1575 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001576 }
bellard4c3a88a2003-07-26 12:06:08 +00001577#endif
1578}
1579
bellardc33a3462003-07-29 20:50:33 +00001580/* enable or disable single step mode. EXCP_DEBUG is returned by the
1581 CPU loop after each instruction */
1582void cpu_single_step(CPUState *env, int enabled)
1583{
bellard1fddef42005-04-17 19:16:13 +00001584#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001585 if (env->singlestep_enabled != enabled) {
1586 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001587 if (kvm_enabled())
1588 kvm_update_guest_debug(env, 0);
1589 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001590 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001591 /* XXX: only flush what is necessary */
1592 tb_flush(env);
1593 }
bellardc33a3462003-07-29 20:50:33 +00001594 }
1595#endif
1596}
1597
bellard34865132003-10-05 14:28:56 +00001598/* enable or disable low levels log */
1599void cpu_set_log(int log_flags)
1600{
1601 loglevel = log_flags;
1602 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001603 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001604 if (!logfile) {
1605 perror(logfilename);
1606 _exit(1);
1607 }
bellard9fa3e852004-01-04 18:06:42 +00001608#if !defined(CONFIG_SOFTMMU)
1609 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1610 {
blueswir1b55266b2008-09-20 08:07:15 +00001611 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001612 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1613 }
Filip Navarabf65f532009-07-27 10:02:04 -05001614#elif !defined(_WIN32)
1615 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001616 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001617#endif
pbrooke735b912007-06-30 13:53:24 +00001618 log_append = 1;
1619 }
1620 if (!loglevel && logfile) {
1621 fclose(logfile);
1622 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001623 }
1624}
1625
1626void cpu_set_log_filename(const char *filename)
1627{
1628 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001629 if (logfile) {
1630 fclose(logfile);
1631 logfile = NULL;
1632 }
1633 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001634}
bellardc33a3462003-07-29 20:50:33 +00001635
aurel323098dba2009-03-07 21:28:24 +00001636static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001637{
pbrookd5975362008-06-07 20:50:51 +00001638 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1639 problem and hope the cpu will stop of its own accord. For userspace
1640 emulation this often isn't actually as bad as it sounds. Often
1641 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001642 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001643 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001644
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001645 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001646 tb = env->current_tb;
1647 /* if the cpu is currently executing code, we must unlink it and
1648 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001649 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001650 env->current_tb = NULL;
1651 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001652 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001653 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001654}
1655
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001656#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001657/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001658static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001659{
1660 int old_mask;
1661
1662 old_mask = env->interrupt_request;
1663 env->interrupt_request |= mask;
1664
aliguori8edac962009-04-24 18:03:45 +00001665 /*
1666 * If called from iothread context, wake the target cpu in
1667 * case its halted.
1668 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001669 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001670 qemu_cpu_kick(env);
1671 return;
1672 }
aliguori8edac962009-04-24 18:03:45 +00001673
pbrook2e70f6e2008-06-29 01:03:05 +00001674 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001675 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001676 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001677 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001678 cpu_abort(env, "Raised interrupt while not in I/O function");
1679 }
pbrook2e70f6e2008-06-29 01:03:05 +00001680 } else {
aurel323098dba2009-03-07 21:28:24 +00001681 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001682 }
1683}
1684
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001685CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1686
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001687#else /* CONFIG_USER_ONLY */
1688
1689void cpu_interrupt(CPUState *env, int mask)
1690{
1691 env->interrupt_request |= mask;
1692 cpu_unlink_tb(env);
1693}
1694#endif /* CONFIG_USER_ONLY */
1695
bellardb54ad042004-05-20 13:42:52 +00001696void cpu_reset_interrupt(CPUState *env, int mask)
1697{
1698 env->interrupt_request &= ~mask;
1699}
1700
aurel323098dba2009-03-07 21:28:24 +00001701void cpu_exit(CPUState *env)
1702{
1703 env->exit_request = 1;
1704 cpu_unlink_tb(env);
1705}
1706
blueswir1c7cd6a32008-10-02 18:27:46 +00001707const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001708 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001709 "show generated host assembly code for each compiled TB" },
1710 { CPU_LOG_TB_IN_ASM, "in_asm",
1711 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001712 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001713 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001714 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001715 "show micro ops "
1716#ifdef TARGET_I386
1717 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001718#endif
blueswir1e01a1152008-03-14 17:37:11 +00001719 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001720 { CPU_LOG_INT, "int",
1721 "show interrupts/exceptions in short format" },
1722 { CPU_LOG_EXEC, "exec",
1723 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001724 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001725 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001726#ifdef TARGET_I386
1727 { CPU_LOG_PCALL, "pcall",
1728 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001729 { CPU_LOG_RESET, "cpu_reset",
1730 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001731#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001732#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001733 { CPU_LOG_IOPORT, "ioport",
1734 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001735#endif
bellardf193c792004-03-21 17:06:25 +00001736 { 0, NULL, NULL },
1737};
1738
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001739#ifndef CONFIG_USER_ONLY
1740static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1741 = QLIST_HEAD_INITIALIZER(memory_client_list);
1742
1743static void cpu_notify_set_memory(target_phys_addr_t start_addr,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001744 ram_addr_t size,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001745 ram_addr_t phys_offset,
1746 bool log_dirty)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001747{
1748 CPUPhysMemoryClient *client;
1749 QLIST_FOREACH(client, &memory_client_list, list) {
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001750 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001751 }
1752}
1753
1754static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001755 target_phys_addr_t end)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001756{
1757 CPUPhysMemoryClient *client;
1758 QLIST_FOREACH(client, &memory_client_list, list) {
1759 int r = client->sync_dirty_bitmap(client, start, end);
1760 if (r < 0)
1761 return r;
1762 }
1763 return 0;
1764}
1765
1766static int cpu_notify_migration_log(int enable)
1767{
1768 CPUPhysMemoryClient *client;
1769 QLIST_FOREACH(client, &memory_client_list, list) {
1770 int r = client->migration_log(client, enable);
1771 if (r < 0)
1772 return r;
1773 }
1774 return 0;
1775}
1776
Alex Williamson2173a752011-05-03 12:36:58 -06001777struct last_map {
1778 target_phys_addr_t start_addr;
1779 ram_addr_t size;
1780 ram_addr_t phys_offset;
1781};
1782
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001783/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1784 * address. Each intermediate table provides the next L2_BITs of guest
1785 * physical address space. The number of levels vary based on host and
1786 * guest configuration, making it efficient to build the final guest
1787 * physical address by seeding the L1 offset and shifting and adding in
1788 * each L2 offset as we recurse through them. */
Alex Williamson2173a752011-05-03 12:36:58 -06001789static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1790 void **lp, target_phys_addr_t addr,
1791 struct last_map *map)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001792{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001793 int i;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001794
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001795 if (*lp == NULL) {
1796 return;
1797 }
1798 if (level == 0) {
1799 PhysPageDesc *pd = *lp;
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001800 addr <<= L2_BITS + TARGET_PAGE_BITS;
Paul Brook7296aba2010-03-14 14:58:46 +00001801 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001802 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
Alex Williamson2173a752011-05-03 12:36:58 -06001803 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1804
1805 if (map->size &&
1806 start_addr == map->start_addr + map->size &&
1807 pd[i].phys_offset == map->phys_offset + map->size) {
1808
1809 map->size += TARGET_PAGE_SIZE;
1810 continue;
1811 } else if (map->size) {
1812 client->set_memory(client, map->start_addr,
1813 map->size, map->phys_offset, false);
1814 }
1815
1816 map->start_addr = start_addr;
1817 map->size = TARGET_PAGE_SIZE;
1818 map->phys_offset = pd[i].phys_offset;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001819 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001820 }
1821 } else {
1822 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001823 for (i = 0; i < L2_SIZE; ++i) {
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001824 phys_page_for_each_1(client, level - 1, pp + i,
Alex Williamson2173a752011-05-03 12:36:58 -06001825 (addr << L2_BITS) | i, map);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001826 }
1827 }
1828}
1829
1830static void phys_page_for_each(CPUPhysMemoryClient *client)
1831{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001832 int i;
Alex Williamson2173a752011-05-03 12:36:58 -06001833 struct last_map map = { };
1834
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001835 for (i = 0; i < P_L1_SIZE; ++i) {
1836 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
Alex Williamson2173a752011-05-03 12:36:58 -06001837 l1_phys_map + i, i, &map);
1838 }
1839 if (map.size) {
1840 client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1841 false);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001842 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001843}
1844
1845void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1846{
1847 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1848 phys_page_for_each(client);
1849}
1850
1851void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1852{
1853 QLIST_REMOVE(client, list);
1854}
1855#endif
1856
bellardf193c792004-03-21 17:06:25 +00001857static int cmp1(const char *s1, int n, const char *s2)
1858{
1859 if (strlen(s2) != n)
1860 return 0;
1861 return memcmp(s1, s2, n) == 0;
1862}
ths3b46e622007-09-17 08:09:54 +00001863
bellardf193c792004-03-21 17:06:25 +00001864/* takes a comma separated list of log masks. Return 0 if error. */
1865int cpu_str_to_log_mask(const char *str)
1866{
blueswir1c7cd6a32008-10-02 18:27:46 +00001867 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001868 int mask;
1869 const char *p, *p1;
1870
1871 p = str;
1872 mask = 0;
1873 for(;;) {
1874 p1 = strchr(p, ',');
1875 if (!p1)
1876 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001877 if(cmp1(p,p1-p,"all")) {
1878 for(item = cpu_log_items; item->mask != 0; item++) {
1879 mask |= item->mask;
1880 }
1881 } else {
1882 for(item = cpu_log_items; item->mask != 0; item++) {
1883 if (cmp1(p, p1 - p, item->name))
1884 goto found;
1885 }
1886 return 0;
bellardf193c792004-03-21 17:06:25 +00001887 }
bellardf193c792004-03-21 17:06:25 +00001888 found:
1889 mask |= item->mask;
1890 if (*p1 != ',')
1891 break;
1892 p = p1 + 1;
1893 }
1894 return mask;
1895}
bellardea041c02003-06-25 16:16:50 +00001896
bellard75012672003-06-21 13:11:07 +00001897void cpu_abort(CPUState *env, const char *fmt, ...)
1898{
1899 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001900 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001901
1902 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001903 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001904 fprintf(stderr, "qemu: fatal: ");
1905 vfprintf(stderr, fmt, ap);
1906 fprintf(stderr, "\n");
1907#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001908 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1909#else
1910 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001911#endif
aliguori93fcfe32009-01-15 22:34:14 +00001912 if (qemu_log_enabled()) {
1913 qemu_log("qemu: fatal: ");
1914 qemu_log_vprintf(fmt, ap2);
1915 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001916#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001917 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001918#else
aliguori93fcfe32009-01-15 22:34:14 +00001919 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001920#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001921 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001922 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001923 }
pbrook493ae1f2007-11-23 16:53:59 +00001924 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001925 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001926#if defined(CONFIG_USER_ONLY)
1927 {
1928 struct sigaction act;
1929 sigfillset(&act.sa_mask);
1930 act.sa_handler = SIG_DFL;
1931 sigaction(SIGABRT, &act, NULL);
1932 }
1933#endif
bellard75012672003-06-21 13:11:07 +00001934 abort();
1935}
1936
thsc5be9f02007-02-28 20:20:53 +00001937CPUState *cpu_copy(CPUState *env)
1938{
ths01ba9812007-12-09 02:22:57 +00001939 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001940 CPUState *next_cpu = new_env->next_cpu;
1941 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001942#if defined(TARGET_HAS_ICE)
1943 CPUBreakpoint *bp;
1944 CPUWatchpoint *wp;
1945#endif
1946
thsc5be9f02007-02-28 20:20:53 +00001947 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001948
1949 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001950 new_env->next_cpu = next_cpu;
1951 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001952
1953 /* Clone all break/watchpoints.
1954 Note: Once we support ptrace with hw-debug register access, make sure
1955 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001956 QTAILQ_INIT(&env->breakpoints);
1957 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001958#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001959 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001960 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1961 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001962 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001963 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1964 wp->flags, NULL);
1965 }
1966#endif
1967
thsc5be9f02007-02-28 20:20:53 +00001968 return new_env;
1969}
1970
bellard01243112004-01-04 15:48:17 +00001971#if !defined(CONFIG_USER_ONLY)
1972
edgar_igl5c751e92008-05-06 08:44:21 +00001973static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1974{
1975 unsigned int i;
1976
1977 /* Discard jump cache entries for any tb which might potentially
1978 overlap the flushed page. */
1979 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1980 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001981 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001982
1983 i = tb_jmp_cache_hash_page(addr);
1984 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001985 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001986}
1987
Igor Kovalenko08738982009-07-12 02:15:40 +04001988static CPUTLBEntry s_cputlb_empty_entry = {
1989 .addr_read = -1,
1990 .addr_write = -1,
1991 .addr_code = -1,
1992 .addend = -1,
1993};
1994
bellardee8b7022004-02-03 23:35:10 +00001995/* NOTE: if flush_global is true, also flush global entries (not
1996 implemented yet) */
1997void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001998{
bellard33417e72003-08-10 21:47:01 +00001999 int i;
bellard01243112004-01-04 15:48:17 +00002000
bellard9fa3e852004-01-04 18:06:42 +00002001#if defined(DEBUG_TLB)
2002 printf("tlb_flush:\n");
2003#endif
bellard01243112004-01-04 15:48:17 +00002004 /* must reset current TB so that interrupts cannot modify the
2005 links while we are modifying them */
2006 env->current_tb = NULL;
2007
bellard33417e72003-08-10 21:47:01 +00002008 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002009 int mmu_idx;
2010 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04002011 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002012 }
bellard33417e72003-08-10 21:47:01 +00002013 }
bellard9fa3e852004-01-04 18:06:42 +00002014
bellard8a40a182005-11-20 10:35:40 +00002015 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00002016
Paul Brookd4c430a2010-03-17 02:14:28 +00002017 env->tlb_flush_addr = -1;
2018 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00002019 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00002020}
2021
bellard274da6b2004-05-20 21:56:27 +00002022static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00002023{
ths5fafdf22007-09-16 21:08:06 +00002024 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00002025 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002026 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00002027 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002028 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00002029 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04002030 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00002031 }
bellard61382a52003-10-27 21:22:23 +00002032}
2033
bellard2e126692004-04-25 21:28:44 +00002034void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00002035{
bellard8a40a182005-11-20 10:35:40 +00002036 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002037 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00002038
bellard9fa3e852004-01-04 18:06:42 +00002039#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00002040 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00002041#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00002042 /* Check if we need to flush due to large pages. */
2043 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2044#if defined(DEBUG_TLB)
2045 printf("tlb_flush_page: forced full flush ("
2046 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2047 env->tlb_flush_addr, env->tlb_flush_mask);
2048#endif
2049 tlb_flush(env, 1);
2050 return;
2051 }
bellard01243112004-01-04 15:48:17 +00002052 /* must reset current TB so that interrupts cannot modify the
2053 links while we are modifying them */
2054 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002055
bellard61382a52003-10-27 21:22:23 +00002056 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002057 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002058 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2059 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002060
edgar_igl5c751e92008-05-06 08:44:21 +00002061 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002062}
2063
bellard9fa3e852004-01-04 18:06:42 +00002064/* update the TLBs so that writes to code in the virtual page 'addr'
2065 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002066static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002067{
ths5fafdf22007-09-16 21:08:06 +00002068 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002069 ram_addr + TARGET_PAGE_SIZE,
2070 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002071}
2072
bellard9fa3e852004-01-04 18:06:42 +00002073/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002074 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002075static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002076 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002077{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002078 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002079}
2080
ths5fafdf22007-09-16 21:08:06 +00002081static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002082 unsigned long start, unsigned long length)
2083{
2084 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00002085 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2086 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002087 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002088 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002089 }
2090 }
2091}
2092
pbrook5579c7f2009-04-11 14:47:08 +00002093/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002094void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002095 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002096{
2097 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002098 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002099 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002100
2101 start &= TARGET_PAGE_MASK;
2102 end = TARGET_PAGE_ALIGN(end);
2103
2104 length = end - start;
2105 if (length == 0)
2106 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002107 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002108
bellard1ccde1c2004-02-06 19:46:14 +00002109 /* we modify the TLB cache so that the dirty bit will be set again
2110 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002111 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002112 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002113 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002114 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002115 != (end - 1) - start) {
2116 abort();
2117 }
2118
bellard6a00d602005-11-21 23:25:50 +00002119 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002120 int mmu_idx;
2121 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2122 for(i = 0; i < CPU_TLB_SIZE; i++)
2123 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2124 start1, length);
2125 }
bellard6a00d602005-11-21 23:25:50 +00002126 }
bellard1ccde1c2004-02-06 19:46:14 +00002127}
2128
aliguori74576192008-10-06 14:02:03 +00002129int cpu_physical_memory_set_dirty_tracking(int enable)
2130{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002131 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002132 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002133 ret = cpu_notify_migration_log(!!enable);
2134 return ret;
aliguori74576192008-10-06 14:02:03 +00002135}
2136
2137int cpu_physical_memory_get_dirty_tracking(void)
2138{
2139 return in_migration;
2140}
2141
Anthony Liguoric227f092009-10-01 16:12:16 -05002142int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2143 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00002144{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02002145 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02002146
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002147 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02002148 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00002149}
2150
Anthony PERARDe5896b12011-02-07 12:19:23 +01002151int cpu_physical_log_start(target_phys_addr_t start_addr,
2152 ram_addr_t size)
2153{
2154 CPUPhysMemoryClient *client;
2155 QLIST_FOREACH(client, &memory_client_list, list) {
2156 if (client->log_start) {
2157 int r = client->log_start(client, start_addr, size);
2158 if (r < 0) {
2159 return r;
2160 }
2161 }
2162 }
2163 return 0;
2164}
2165
2166int cpu_physical_log_stop(target_phys_addr_t start_addr,
2167 ram_addr_t size)
2168{
2169 CPUPhysMemoryClient *client;
2170 QLIST_FOREACH(client, &memory_client_list, list) {
2171 if (client->log_stop) {
2172 int r = client->log_stop(client, start_addr, size);
2173 if (r < 0) {
2174 return r;
2175 }
2176 }
2177 }
2178 return 0;
2179}
2180
bellard3a7d9292005-08-21 09:26:42 +00002181static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2182{
Anthony Liguoric227f092009-10-01 16:12:16 -05002183 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002184 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002185
bellard84b7b8e2005-11-28 21:19:04 +00002186 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002187 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2188 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002189 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002190 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002191 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002192 }
2193 }
2194}
2195
2196/* update the TLB according to the current state of the dirty bits */
2197void cpu_tlb_update_dirty(CPUState *env)
2198{
2199 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002200 int mmu_idx;
2201 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2202 for(i = 0; i < CPU_TLB_SIZE; i++)
2203 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2204 }
bellard3a7d9292005-08-21 09:26:42 +00002205}
2206
pbrook0f459d12008-06-09 00:20:13 +00002207static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002208{
pbrook0f459d12008-06-09 00:20:13 +00002209 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2210 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002211}
2212
pbrook0f459d12008-06-09 00:20:13 +00002213/* update the TLB corresponding to virtual page vaddr
2214 so that it is no longer dirty */
2215static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002216{
bellard1ccde1c2004-02-06 19:46:14 +00002217 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002218 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002219
pbrook0f459d12008-06-09 00:20:13 +00002220 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002221 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002222 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2223 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002224}
2225
Paul Brookd4c430a2010-03-17 02:14:28 +00002226/* Our TLB does not support large pages, so remember the area covered by
2227 large pages and trigger a full TLB flush if these are invalidated. */
2228static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2229 target_ulong size)
2230{
2231 target_ulong mask = ~(size - 1);
2232
2233 if (env->tlb_flush_addr == (target_ulong)-1) {
2234 env->tlb_flush_addr = vaddr & mask;
2235 env->tlb_flush_mask = mask;
2236 return;
2237 }
2238 /* Extend the existing region to include the new page.
2239 This is a compromise between unnecessary flushes and the cost
2240 of maintaining a full variable size TLB. */
2241 mask &= env->tlb_flush_mask;
2242 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2243 mask <<= 1;
2244 }
2245 env->tlb_flush_addr &= mask;
2246 env->tlb_flush_mask = mask;
2247}
2248
2249/* Add a new TLB entry. At most one entry for a given virtual address
2250 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2251 supplied size is only used by tlb_flush_page. */
2252void tlb_set_page(CPUState *env, target_ulong vaddr,
2253 target_phys_addr_t paddr, int prot,
2254 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002255{
bellard92e873b2004-05-21 14:52:29 +00002256 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002257 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002258 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002259 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002260 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002261 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002262 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002263 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002264 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002265
Paul Brookd4c430a2010-03-17 02:14:28 +00002266 assert(size >= TARGET_PAGE_SIZE);
2267 if (size != TARGET_PAGE_SIZE) {
2268 tlb_add_large_page(env, vaddr, size);
2269 }
bellard92e873b2004-05-21 14:52:29 +00002270 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002271 if (!p) {
2272 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002273 } else {
2274 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002275 }
2276#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002277 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2278 " prot=%x idx=%d pd=0x%08lx\n",
2279 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002280#endif
2281
pbrook0f459d12008-06-09 00:20:13 +00002282 address = vaddr;
2283 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2284 /* IO memory case (romd handled later) */
2285 address |= TLB_MMIO;
2286 }
pbrook5579c7f2009-04-11 14:47:08 +00002287 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002288 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2289 /* Normal RAM. */
2290 iotlb = pd & TARGET_PAGE_MASK;
2291 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2292 iotlb |= IO_MEM_NOTDIRTY;
2293 else
2294 iotlb |= IO_MEM_ROM;
2295 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002296 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002297 It would be nice to pass an offset from the base address
2298 of that region. This would avoid having to special case RAM,
2299 and avoid full address decoding in every device.
2300 We can't use the high bits of pd for this because
2301 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002302 iotlb = (pd & ~TARGET_PAGE_MASK);
2303 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002304 iotlb += p->region_offset;
2305 } else {
2306 iotlb += paddr;
2307 }
pbrook0f459d12008-06-09 00:20:13 +00002308 }
pbrook6658ffb2007-03-16 23:58:11 +00002309
pbrook0f459d12008-06-09 00:20:13 +00002310 code_address = address;
2311 /* Make accesses to pages with watchpoints go via the
2312 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002313 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002314 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002315 /* Avoid trapping reads of pages with a write breakpoint. */
2316 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2317 iotlb = io_mem_watch + paddr;
2318 address |= TLB_MMIO;
2319 break;
2320 }
pbrook6658ffb2007-03-16 23:58:11 +00002321 }
pbrook0f459d12008-06-09 00:20:13 +00002322 }
balrogd79acba2007-06-26 20:01:13 +00002323
pbrook0f459d12008-06-09 00:20:13 +00002324 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2325 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2326 te = &env->tlb_table[mmu_idx][index];
2327 te->addend = addend - vaddr;
2328 if (prot & PAGE_READ) {
2329 te->addr_read = address;
2330 } else {
2331 te->addr_read = -1;
2332 }
edgar_igl5c751e92008-05-06 08:44:21 +00002333
pbrook0f459d12008-06-09 00:20:13 +00002334 if (prot & PAGE_EXEC) {
2335 te->addr_code = code_address;
2336 } else {
2337 te->addr_code = -1;
2338 }
2339 if (prot & PAGE_WRITE) {
2340 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2341 (pd & IO_MEM_ROMD)) {
2342 /* Write access calls the I/O callback. */
2343 te->addr_write = address | TLB_MMIO;
2344 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2345 !cpu_physical_memory_is_dirty(pd)) {
2346 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002347 } else {
pbrook0f459d12008-06-09 00:20:13 +00002348 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002349 }
pbrook0f459d12008-06-09 00:20:13 +00002350 } else {
2351 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002352 }
bellard9fa3e852004-01-04 18:06:42 +00002353}
2354
bellard01243112004-01-04 15:48:17 +00002355#else
2356
bellardee8b7022004-02-03 23:35:10 +00002357void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002358{
2359}
2360
bellard2e126692004-04-25 21:28:44 +00002361void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002362{
2363}
2364
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002365/*
2366 * Walks guest process memory "regions" one by one
2367 * and calls callback function 'fn' for each region.
2368 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002369
2370struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002371{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002372 walk_memory_regions_fn fn;
2373 void *priv;
2374 unsigned long start;
2375 int prot;
2376};
bellard9fa3e852004-01-04 18:06:42 +00002377
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002378static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002379 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002380{
2381 if (data->start != -1ul) {
2382 int rc = data->fn(data->priv, data->start, end, data->prot);
2383 if (rc != 0) {
2384 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002385 }
bellard33417e72003-08-10 21:47:01 +00002386 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002387
2388 data->start = (new_prot ? end : -1ul);
2389 data->prot = new_prot;
2390
2391 return 0;
2392}
2393
2394static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002395 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002396{
Paul Brookb480d9b2010-03-12 23:23:29 +00002397 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002398 int i, rc;
2399
2400 if (*lp == NULL) {
2401 return walk_memory_regions_end(data, base, 0);
2402 }
2403
2404 if (level == 0) {
2405 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002406 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002407 int prot = pd[i].flags;
2408
2409 pa = base | (i << TARGET_PAGE_BITS);
2410 if (prot != data->prot) {
2411 rc = walk_memory_regions_end(data, pa, prot);
2412 if (rc != 0) {
2413 return rc;
2414 }
2415 }
2416 }
2417 } else {
2418 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002419 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002420 pa = base | ((abi_ulong)i <<
2421 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002422 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2423 if (rc != 0) {
2424 return rc;
2425 }
2426 }
2427 }
2428
2429 return 0;
2430}
2431
2432int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2433{
2434 struct walk_memory_regions_data data;
2435 unsigned long i;
2436
2437 data.fn = fn;
2438 data.priv = priv;
2439 data.start = -1ul;
2440 data.prot = 0;
2441
2442 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002443 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002444 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2445 if (rc != 0) {
2446 return rc;
2447 }
2448 }
2449
2450 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002451}
2452
Paul Brookb480d9b2010-03-12 23:23:29 +00002453static int dump_region(void *priv, abi_ulong start,
2454 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002455{
2456 FILE *f = (FILE *)priv;
2457
Paul Brookb480d9b2010-03-12 23:23:29 +00002458 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2459 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002460 start, end, end - start,
2461 ((prot & PAGE_READ) ? 'r' : '-'),
2462 ((prot & PAGE_WRITE) ? 'w' : '-'),
2463 ((prot & PAGE_EXEC) ? 'x' : '-'));
2464
2465 return (0);
2466}
2467
2468/* dump memory mappings */
2469void page_dump(FILE *f)
2470{
2471 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2472 "start", "end", "size", "prot");
2473 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002474}
2475
pbrook53a59602006-03-25 19:31:22 +00002476int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002477{
bellard9fa3e852004-01-04 18:06:42 +00002478 PageDesc *p;
2479
2480 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002481 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002482 return 0;
2483 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002484}
2485
Richard Henderson376a7902010-03-10 15:57:04 -08002486/* Modify the flags of a page and invalidate the code if necessary.
2487 The flag PAGE_WRITE_ORG is positioned automatically depending
2488 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002489void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002490{
Richard Henderson376a7902010-03-10 15:57:04 -08002491 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002492
Richard Henderson376a7902010-03-10 15:57:04 -08002493 /* This function should never be called with addresses outside the
2494 guest address space. If this assert fires, it probably indicates
2495 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002496#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2497 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002498#endif
2499 assert(start < end);
2500
bellard9fa3e852004-01-04 18:06:42 +00002501 start = start & TARGET_PAGE_MASK;
2502 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002503
2504 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002505 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002506 }
2507
2508 for (addr = start, len = end - start;
2509 len != 0;
2510 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2511 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2512
2513 /* If the write protection bit is set, then we invalidate
2514 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002515 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002516 (flags & PAGE_WRITE) &&
2517 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002518 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002519 }
2520 p->flags = flags;
2521 }
bellard9fa3e852004-01-04 18:06:42 +00002522}
2523
ths3d97b402007-11-02 19:02:07 +00002524int page_check_range(target_ulong start, target_ulong len, int flags)
2525{
2526 PageDesc *p;
2527 target_ulong end;
2528 target_ulong addr;
2529
Richard Henderson376a7902010-03-10 15:57:04 -08002530 /* This function should never be called with addresses outside the
2531 guest address space. If this assert fires, it probably indicates
2532 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002533#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2534 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002535#endif
2536
Richard Henderson3e0650a2010-03-29 10:54:42 -07002537 if (len == 0) {
2538 return 0;
2539 }
Richard Henderson376a7902010-03-10 15:57:04 -08002540 if (start + len - 1 < start) {
2541 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002542 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002543 }
balrog55f280c2008-10-28 10:24:11 +00002544
ths3d97b402007-11-02 19:02:07 +00002545 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2546 start = start & TARGET_PAGE_MASK;
2547
Richard Henderson376a7902010-03-10 15:57:04 -08002548 for (addr = start, len = end - start;
2549 len != 0;
2550 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002551 p = page_find(addr >> TARGET_PAGE_BITS);
2552 if( !p )
2553 return -1;
2554 if( !(p->flags & PAGE_VALID) )
2555 return -1;
2556
bellarddae32702007-11-14 10:51:00 +00002557 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002558 return -1;
bellarddae32702007-11-14 10:51:00 +00002559 if (flags & PAGE_WRITE) {
2560 if (!(p->flags & PAGE_WRITE_ORG))
2561 return -1;
2562 /* unprotect the page if it was put read-only because it
2563 contains translated code */
2564 if (!(p->flags & PAGE_WRITE)) {
2565 if (!page_unprotect(addr, 0, NULL))
2566 return -1;
2567 }
2568 return 0;
2569 }
ths3d97b402007-11-02 19:02:07 +00002570 }
2571 return 0;
2572}
2573
bellard9fa3e852004-01-04 18:06:42 +00002574/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002575 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002576int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002577{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002578 unsigned int prot;
2579 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002580 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002581
pbrookc8a706f2008-06-02 16:16:42 +00002582 /* Technically this isn't safe inside a signal handler. However we
2583 know this only ever happens in a synchronous SEGV handler, so in
2584 practice it seems to be ok. */
2585 mmap_lock();
2586
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002587 p = page_find(address >> TARGET_PAGE_BITS);
2588 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002589 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002590 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002591 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002592
bellard9fa3e852004-01-04 18:06:42 +00002593 /* if the page was really writable, then we change its
2594 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002595 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2596 host_start = address & qemu_host_page_mask;
2597 host_end = host_start + qemu_host_page_size;
2598
2599 prot = 0;
2600 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2601 p = page_find(addr >> TARGET_PAGE_BITS);
2602 p->flags |= PAGE_WRITE;
2603 prot |= p->flags;
2604
bellard9fa3e852004-01-04 18:06:42 +00002605 /* and since the content will be modified, we must invalidate
2606 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002607 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002608#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002609 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002610#endif
bellard9fa3e852004-01-04 18:06:42 +00002611 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002612 mprotect((void *)g2h(host_start), qemu_host_page_size,
2613 prot & PAGE_BITS);
2614
2615 mmap_unlock();
2616 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002617 }
pbrookc8a706f2008-06-02 16:16:42 +00002618 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002619 return 0;
2620}
2621
bellard6a00d602005-11-21 23:25:50 +00002622static inline void tlb_set_dirty(CPUState *env,
2623 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002624{
2625}
bellard9fa3e852004-01-04 18:06:42 +00002626#endif /* defined(CONFIG_USER_ONLY) */
2627
pbrooke2eef172008-06-08 01:09:01 +00002628#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002629
Paul Brookc04b2b72010-03-01 03:31:14 +00002630#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2631typedef struct subpage_t {
2632 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002633 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2634 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002635} subpage_t;
2636
Anthony Liguoric227f092009-10-01 16:12:16 -05002637static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2638 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002639static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2640 ram_addr_t orig_memory,
2641 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002642#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2643 need_subpage) \
2644 do { \
2645 if (addr > start_addr) \
2646 start_addr2 = 0; \
2647 else { \
2648 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2649 if (start_addr2 > 0) \
2650 need_subpage = 1; \
2651 } \
2652 \
blueswir149e9fba2007-05-30 17:25:06 +00002653 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002654 end_addr2 = TARGET_PAGE_SIZE - 1; \
2655 else { \
2656 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2657 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2658 need_subpage = 1; \
2659 } \
2660 } while (0)
2661
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002662/* register physical memory.
2663 For RAM, 'size' must be a multiple of the target page size.
2664 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002665 io memory page. The address used when calling the IO function is
2666 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002667 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002668 before calculating this offset. This should not be a problem unless
2669 the low bits of start_addr and region_offset differ. */
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002670void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002671 ram_addr_t size,
2672 ram_addr_t phys_offset,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002673 ram_addr_t region_offset,
2674 bool log_dirty)
bellard33417e72003-08-10 21:47:01 +00002675{
Anthony Liguoric227f092009-10-01 16:12:16 -05002676 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002677 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002678 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002679 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002680 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002681
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002682 assert(size);
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002683 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002684
pbrook67c4d232009-02-23 13:16:07 +00002685 if (phys_offset == IO_MEM_UNASSIGNED) {
2686 region_offset = start_addr;
2687 }
pbrook8da3ff12008-12-01 18:59:50 +00002688 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002689 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002690 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002691
2692 addr = start_addr;
2693 do {
blueswir1db7b5422007-05-26 17:36:03 +00002694 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2695 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002696 ram_addr_t orig_memory = p->phys_offset;
2697 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002698 int need_subpage = 0;
2699
2700 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2701 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002702 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002703 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2704 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002705 &p->phys_offset, orig_memory,
2706 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002707 } else {
2708 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2709 >> IO_MEM_SHIFT];
2710 }
pbrook8da3ff12008-12-01 18:59:50 +00002711 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2712 region_offset);
2713 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002714 } else {
2715 p->phys_offset = phys_offset;
2716 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2717 (phys_offset & IO_MEM_ROMD))
2718 phys_offset += TARGET_PAGE_SIZE;
2719 }
2720 } else {
2721 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2722 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002723 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002724 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002725 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002726 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002727 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002728 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002729 int need_subpage = 0;
2730
2731 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2732 end_addr2, need_subpage);
2733
Richard Hendersonf6405242010-04-22 16:47:31 -07002734 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002735 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002736 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002737 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002738 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002739 phys_offset, region_offset);
2740 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002741 }
2742 }
2743 }
pbrook8da3ff12008-12-01 18:59:50 +00002744 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002745 addr += TARGET_PAGE_SIZE;
2746 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002747
bellard9d420372006-06-25 22:25:22 +00002748 /* since each CPU stores ram addresses in its TLB cache, we must
2749 reset the modified entries */
2750 /* XXX: slow ! */
2751 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2752 tlb_flush(env, 1);
2753 }
bellard33417e72003-08-10 21:47:01 +00002754}
2755
bellardba863452006-09-24 18:41:10 +00002756/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002757ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002758{
2759 PhysPageDesc *p;
2760
2761 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2762 if (!p)
2763 return IO_MEM_UNASSIGNED;
2764 return p->phys_offset;
2765}
2766
Anthony Liguoric227f092009-10-01 16:12:16 -05002767void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002768{
2769 if (kvm_enabled())
2770 kvm_coalesce_mmio_region(addr, size);
2771}
2772
Anthony Liguoric227f092009-10-01 16:12:16 -05002773void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002774{
2775 if (kvm_enabled())
2776 kvm_uncoalesce_mmio_region(addr, size);
2777}
2778
Sheng Yang62a27442010-01-26 19:21:16 +08002779void qemu_flush_coalesced_mmio_buffer(void)
2780{
2781 if (kvm_enabled())
2782 kvm_flush_coalesced_mmio_buffer();
2783}
2784
Marcelo Tosattic9027602010-03-01 20:25:08 -03002785#if defined(__linux__) && !defined(TARGET_S390X)
2786
2787#include <sys/vfs.h>
2788
2789#define HUGETLBFS_MAGIC 0x958458f6
2790
2791static long gethugepagesize(const char *path)
2792{
2793 struct statfs fs;
2794 int ret;
2795
2796 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002797 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002798 } while (ret != 0 && errno == EINTR);
2799
2800 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002801 perror(path);
2802 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002803 }
2804
2805 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002806 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002807
2808 return fs.f_bsize;
2809}
2810
Alex Williamson04b16652010-07-02 11:13:17 -06002811static void *file_ram_alloc(RAMBlock *block,
2812 ram_addr_t memory,
2813 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002814{
2815 char *filename;
2816 void *area;
2817 int fd;
2818#ifdef MAP_POPULATE
2819 int flags;
2820#endif
2821 unsigned long hpagesize;
2822
2823 hpagesize = gethugepagesize(path);
2824 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002825 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002826 }
2827
2828 if (memory < hpagesize) {
2829 return NULL;
2830 }
2831
2832 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2833 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2834 return NULL;
2835 }
2836
2837 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002838 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002839 }
2840
2841 fd = mkstemp(filename);
2842 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002843 perror("unable to create backing store for hugepages");
2844 free(filename);
2845 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002846 }
2847 unlink(filename);
2848 free(filename);
2849
2850 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2851
2852 /*
2853 * ftruncate is not supported by hugetlbfs in older
2854 * hosts, so don't bother bailing out on errors.
2855 * If anything goes wrong with it under other filesystems,
2856 * mmap will fail.
2857 */
2858 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002859 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002860
2861#ifdef MAP_POPULATE
2862 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2863 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2864 * to sidestep this quirk.
2865 */
2866 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2867 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2868#else
2869 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2870#endif
2871 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002872 perror("file_ram_alloc: can't mmap RAM pages");
2873 close(fd);
2874 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002875 }
Alex Williamson04b16652010-07-02 11:13:17 -06002876 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002877 return area;
2878}
2879#endif
2880
Alex Williamsond17b5282010-06-25 11:08:38 -06002881static ram_addr_t find_ram_offset(ram_addr_t size)
2882{
Alex Williamson04b16652010-07-02 11:13:17 -06002883 RAMBlock *block, *next_block;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002884 ram_addr_t offset = 0, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002885
2886 if (QLIST_EMPTY(&ram_list.blocks))
2887 return 0;
2888
2889 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002890 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002891
2892 end = block->offset + block->length;
2893
2894 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2895 if (next_block->offset >= end) {
2896 next = MIN(next, next_block->offset);
2897 }
2898 }
2899 if (next - end >= size && next - end < mingap) {
2900 offset = end;
2901 mingap = next - end;
2902 }
2903 }
2904 return offset;
2905}
2906
2907static ram_addr_t last_ram_offset(void)
2908{
Alex Williamsond17b5282010-06-25 11:08:38 -06002909 RAMBlock *block;
2910 ram_addr_t last = 0;
2911
2912 QLIST_FOREACH(block, &ram_list.blocks, next)
2913 last = MAX(last, block->offset + block->length);
2914
2915 return last;
2916}
2917
Cam Macdonell84b89d72010-07-26 18:10:57 -06002918ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002919 ram_addr_t size, void *host)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002920{
2921 RAMBlock *new_block, *block;
2922
2923 size = TARGET_PAGE_ALIGN(size);
Anthony Liguori7267c092011-08-20 22:09:37 -05002924 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002925
2926 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2927 char *id = dev->parent_bus->info->get_dev_path(dev);
2928 if (id) {
2929 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002930 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002931 }
2932 }
2933 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2934
2935 QLIST_FOREACH(block, &ram_list.blocks, next) {
2936 if (!strcmp(block->idstr, new_block->idstr)) {
2937 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2938 new_block->idstr);
2939 abort();
2940 }
2941 }
2942
Jun Nakajima432d2682010-08-31 16:41:25 +01002943 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002944 if (host) {
2945 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002946 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002947 } else {
2948 if (mem_path) {
2949#if defined (__linux__) && !defined(TARGET_S390X)
2950 new_block->host = file_ram_alloc(new_block, size, mem_path);
2951 if (!new_block->host) {
2952 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002953 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002954 }
2955#else
2956 fprintf(stderr, "-mem-path option unsupported\n");
2957 exit(1);
2958#endif
2959 } else {
2960#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002961 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2962 an system defined value, which is at least 256GB. Larger systems
2963 have larger values. We put the guest between the end of data
2964 segment (system break) and this value. We use 32GB as a base to
2965 have enough room for the system break to grow. */
2966 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002967 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002968 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002969 if (new_block->host == MAP_FAILED) {
2970 fprintf(stderr, "Allocating RAM failed\n");
2971 abort();
2972 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002973#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002974 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002975 xen_ram_alloc(new_block->offset, size);
2976 } else {
2977 new_block->host = qemu_vmalloc(size);
2978 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002979#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002980 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002981 }
2982 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002983 new_block->length = size;
2984
2985 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2986
Anthony Liguori7267c092011-08-20 22:09:37 -05002987 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002988 last_ram_offset() >> TARGET_PAGE_BITS);
2989 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2990 0xff, size >> TARGET_PAGE_BITS);
2991
2992 if (kvm_enabled())
2993 kvm_setup_guest_memory(new_block->host, size);
2994
2995 return new_block->offset;
2996}
2997
Alex Williamson1724f042010-06-25 11:09:35 -06002998ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002999{
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09003000 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
pbrook94a6b542009-04-11 17:15:54 +00003001}
bellarde9a1ab12007-02-08 23:08:38 +00003002
Alex Williamson1f2e98b2011-05-03 12:48:09 -06003003void qemu_ram_free_from_ptr(ram_addr_t addr)
3004{
3005 RAMBlock *block;
3006
3007 QLIST_FOREACH(block, &ram_list.blocks, next) {
3008 if (addr == block->offset) {
3009 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05003010 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06003011 return;
3012 }
3013 }
3014}
3015
Anthony Liguoric227f092009-10-01 16:12:16 -05003016void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00003017{
Alex Williamson04b16652010-07-02 11:13:17 -06003018 RAMBlock *block;
3019
3020 QLIST_FOREACH(block, &ram_list.blocks, next) {
3021 if (addr == block->offset) {
3022 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01003023 if (block->flags & RAM_PREALLOC_MASK) {
3024 ;
3025 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06003026#if defined (__linux__) && !defined(TARGET_S390X)
3027 if (block->fd) {
3028 munmap(block->host, block->length);
3029 close(block->fd);
3030 } else {
3031 qemu_vfree(block->host);
3032 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003033#else
3034 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06003035#endif
3036 } else {
3037#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3038 munmap(block->host, block->length);
3039#else
Jan Kiszka868bb332011-06-21 22:59:09 +02003040 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003041 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01003042 } else {
3043 qemu_vfree(block->host);
3044 }
Alex Williamson04b16652010-07-02 11:13:17 -06003045#endif
3046 }
Anthony Liguori7267c092011-08-20 22:09:37 -05003047 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06003048 return;
3049 }
3050 }
3051
bellarde9a1ab12007-02-08 23:08:38 +00003052}
3053
Huang Yingcd19cfa2011-03-02 08:56:19 +01003054#ifndef _WIN32
3055void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3056{
3057 RAMBlock *block;
3058 ram_addr_t offset;
3059 int flags;
3060 void *area, *vaddr;
3061
3062 QLIST_FOREACH(block, &ram_list.blocks, next) {
3063 offset = addr - block->offset;
3064 if (offset < block->length) {
3065 vaddr = block->host + offset;
3066 if (block->flags & RAM_PREALLOC_MASK) {
3067 ;
3068 } else {
3069 flags = MAP_FIXED;
3070 munmap(vaddr, length);
3071 if (mem_path) {
3072#if defined(__linux__) && !defined(TARGET_S390X)
3073 if (block->fd) {
3074#ifdef MAP_POPULATE
3075 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3076 MAP_PRIVATE;
3077#else
3078 flags |= MAP_PRIVATE;
3079#endif
3080 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3081 flags, block->fd, offset);
3082 } else {
3083 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3084 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3085 flags, -1, 0);
3086 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003087#else
3088 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003089#endif
3090 } else {
3091#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3092 flags |= MAP_SHARED | MAP_ANONYMOUS;
3093 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3094 flags, -1, 0);
3095#else
3096 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3097 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3098 flags, -1, 0);
3099#endif
3100 }
3101 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003102 fprintf(stderr, "Could not remap addr: "
3103 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003104 length, addr);
3105 exit(1);
3106 }
3107 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3108 }
3109 return;
3110 }
3111 }
3112}
3113#endif /* !_WIN32 */
3114
pbrookdc828ca2009-04-09 22:21:07 +00003115/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003116 With the exception of the softmmu code in this file, this should
3117 only be used for local memory (e.g. video ram) that the device owns,
3118 and knows it isn't going to access beyond the end of the block.
3119
3120 It should not be used for general purpose DMA.
3121 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3122 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003123void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003124{
pbrook94a6b542009-04-11 17:15:54 +00003125 RAMBlock *block;
3126
Alex Williamsonf471a172010-06-11 11:11:42 -06003127 QLIST_FOREACH(block, &ram_list.blocks, next) {
3128 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003129 /* Move this entry to to start of the list. */
3130 if (block != QLIST_FIRST(&ram_list.blocks)) {
3131 QLIST_REMOVE(block, next);
3132 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3133 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003134 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003135 /* We need to check if the requested address is in the RAM
3136 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003137 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003138 */
3139 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003140 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003141 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003142 block->host =
3143 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003144 }
3145 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003146 return block->host + (addr - block->offset);
3147 }
pbrook94a6b542009-04-11 17:15:54 +00003148 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003149
3150 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3151 abort();
3152
3153 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003154}
3155
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003156/* Return a host pointer to ram allocated with qemu_ram_alloc.
3157 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3158 */
3159void *qemu_safe_ram_ptr(ram_addr_t addr)
3160{
3161 RAMBlock *block;
3162
3163 QLIST_FOREACH(block, &ram_list.blocks, next) {
3164 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003165 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003166 /* We need to check if the requested address is in the RAM
3167 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003168 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003169 */
3170 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003171 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003172 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003173 block->host =
3174 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003175 }
3176 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003177 return block->host + (addr - block->offset);
3178 }
3179 }
3180
3181 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3182 abort();
3183
3184 return NULL;
3185}
3186
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003187/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3188 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003189void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003190{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003191 if (*size == 0) {
3192 return NULL;
3193 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003194 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003195 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003196 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003197 RAMBlock *block;
3198
3199 QLIST_FOREACH(block, &ram_list.blocks, next) {
3200 if (addr - block->offset < block->length) {
3201 if (addr - block->offset + *size > block->length)
3202 *size = block->length - addr + block->offset;
3203 return block->host + (addr - block->offset);
3204 }
3205 }
3206
3207 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3208 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003209 }
3210}
3211
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003212void qemu_put_ram_ptr(void *addr)
3213{
3214 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003215}
3216
Marcelo Tosattie8902612010-10-11 15:31:19 -03003217int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003218{
pbrook94a6b542009-04-11 17:15:54 +00003219 RAMBlock *block;
3220 uint8_t *host = ptr;
3221
Jan Kiszka868bb332011-06-21 22:59:09 +02003222 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003223 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003224 return 0;
3225 }
3226
Alex Williamsonf471a172010-06-11 11:11:42 -06003227 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003228 /* This case append when the block is not mapped. */
3229 if (block->host == NULL) {
3230 continue;
3231 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003232 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003233 *ram_addr = block->offset + (host - block->host);
3234 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003235 }
pbrook94a6b542009-04-11 17:15:54 +00003236 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003237
Marcelo Tosattie8902612010-10-11 15:31:19 -03003238 return -1;
3239}
Alex Williamsonf471a172010-06-11 11:11:42 -06003240
Marcelo Tosattie8902612010-10-11 15:31:19 -03003241/* Some of the softmmu routines need to translate from a host pointer
3242 (typically a TLB entry) back to a ram offset. */
3243ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3244{
3245 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003246
Marcelo Tosattie8902612010-10-11 15:31:19 -03003247 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3248 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3249 abort();
3250 }
3251 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003252}
3253
Anthony Liguoric227f092009-10-01 16:12:16 -05003254static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00003255{
pbrook67d3b952006-12-18 05:03:52 +00003256#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003257 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003258#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003259#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003260 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003261#endif
3262 return 0;
3263}
3264
Anthony Liguoric227f092009-10-01 16:12:16 -05003265static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003266{
3267#ifdef DEBUG_UNASSIGNED
3268 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3269#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003270#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003271 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003272#endif
3273 return 0;
3274}
3275
Anthony Liguoric227f092009-10-01 16:12:16 -05003276static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003277{
3278#ifdef DEBUG_UNASSIGNED
3279 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3280#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003281#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003282 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003283#endif
bellard33417e72003-08-10 21:47:01 +00003284 return 0;
3285}
3286
Anthony Liguoric227f092009-10-01 16:12:16 -05003287static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00003288{
pbrook67d3b952006-12-18 05:03:52 +00003289#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003290 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00003291#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003292#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003293 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003294#endif
3295}
3296
Anthony Liguoric227f092009-10-01 16:12:16 -05003297static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003298{
3299#ifdef DEBUG_UNASSIGNED
3300 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3301#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003302#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003303 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003304#endif
3305}
3306
Anthony Liguoric227f092009-10-01 16:12:16 -05003307static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003308{
3309#ifdef DEBUG_UNASSIGNED
3310 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3311#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003312#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003313 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003314#endif
bellard33417e72003-08-10 21:47:01 +00003315}
3316
Blue Swirld60efc62009-08-25 18:29:31 +00003317static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00003318 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00003319 unassigned_mem_readw,
3320 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00003321};
3322
Blue Swirld60efc62009-08-25 18:29:31 +00003323static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00003324 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00003325 unassigned_mem_writew,
3326 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00003327};
3328
Anthony Liguoric227f092009-10-01 16:12:16 -05003329static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003330 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003331{
bellard3a7d9292005-08-21 09:26:42 +00003332 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003333 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003334 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3335#if !defined(CONFIG_USER_ONLY)
3336 tb_invalidate_phys_page_fast(ram_addr, 1);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003337 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003338#endif
3339 }
pbrook5579c7f2009-04-11 14:47:08 +00003340 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003341 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003342 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003343 /* we remove the notdirty callback only if the code has been
3344 flushed */
3345 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003346 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003347}
3348
Anthony Liguoric227f092009-10-01 16:12:16 -05003349static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003350 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003351{
bellard3a7d9292005-08-21 09:26:42 +00003352 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003353 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003354 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3355#if !defined(CONFIG_USER_ONLY)
3356 tb_invalidate_phys_page_fast(ram_addr, 2);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003357 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003358#endif
3359 }
pbrook5579c7f2009-04-11 14:47:08 +00003360 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003361 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003362 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003363 /* we remove the notdirty callback only if the code has been
3364 flushed */
3365 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003366 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003367}
3368
Anthony Liguoric227f092009-10-01 16:12:16 -05003369static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003370 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003371{
bellard3a7d9292005-08-21 09:26:42 +00003372 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003373 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003374 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3375#if !defined(CONFIG_USER_ONLY)
3376 tb_invalidate_phys_page_fast(ram_addr, 4);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003377 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003378#endif
3379 }
pbrook5579c7f2009-04-11 14:47:08 +00003380 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003381 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003382 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003383 /* we remove the notdirty callback only if the code has been
3384 flushed */
3385 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003386 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003387}
3388
Blue Swirld60efc62009-08-25 18:29:31 +00003389static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00003390 NULL, /* never used */
3391 NULL, /* never used */
3392 NULL, /* never used */
3393};
3394
Blue Swirld60efc62009-08-25 18:29:31 +00003395static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00003396 notdirty_mem_writeb,
3397 notdirty_mem_writew,
3398 notdirty_mem_writel,
3399};
3400
pbrook0f459d12008-06-09 00:20:13 +00003401/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003402static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003403{
3404 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003405 target_ulong pc, cs_base;
3406 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003407 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003408 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003409 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003410
aliguori06d55cc2008-11-18 20:24:06 +00003411 if (env->watchpoint_hit) {
3412 /* We re-entered the check after replacing the TB. Now raise
3413 * the debug interrupt so that is will trigger after the
3414 * current instruction. */
3415 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3416 return;
3417 }
pbrook2e70f6e2008-06-29 01:03:05 +00003418 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003419 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003420 if ((vaddr == (wp->vaddr & len_mask) ||
3421 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003422 wp->flags |= BP_WATCHPOINT_HIT;
3423 if (!env->watchpoint_hit) {
3424 env->watchpoint_hit = wp;
3425 tb = tb_find_pc(env->mem_io_pc);
3426 if (!tb) {
3427 cpu_abort(env, "check_watchpoint: could not find TB for "
3428 "pc=%p", (void *)env->mem_io_pc);
3429 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003430 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003431 tb_phys_invalidate(tb, -1);
3432 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3433 env->exception_index = EXCP_DEBUG;
3434 } else {
3435 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3436 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3437 }
3438 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003439 }
aliguori6e140f22008-11-18 20:37:55 +00003440 } else {
3441 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003442 }
3443 }
3444}
3445
pbrook6658ffb2007-03-16 23:58:11 +00003446/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3447 so these check for a hit then pass through to the normal out-of-line
3448 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003449static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003450{
aliguorib4051332008-11-18 20:14:20 +00003451 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003452 return ldub_phys(addr);
3453}
3454
Anthony Liguoric227f092009-10-01 16:12:16 -05003455static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003456{
aliguorib4051332008-11-18 20:14:20 +00003457 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003458 return lduw_phys(addr);
3459}
3460
Anthony Liguoric227f092009-10-01 16:12:16 -05003461static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003462{
aliguorib4051332008-11-18 20:14:20 +00003463 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003464 return ldl_phys(addr);
3465}
3466
Anthony Liguoric227f092009-10-01 16:12:16 -05003467static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003468 uint32_t val)
3469{
aliguorib4051332008-11-18 20:14:20 +00003470 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003471 stb_phys(addr, val);
3472}
3473
Anthony Liguoric227f092009-10-01 16:12:16 -05003474static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003475 uint32_t val)
3476{
aliguorib4051332008-11-18 20:14:20 +00003477 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003478 stw_phys(addr, val);
3479}
3480
Anthony Liguoric227f092009-10-01 16:12:16 -05003481static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003482 uint32_t val)
3483{
aliguorib4051332008-11-18 20:14:20 +00003484 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003485 stl_phys(addr, val);
3486}
3487
Blue Swirld60efc62009-08-25 18:29:31 +00003488static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003489 watch_mem_readb,
3490 watch_mem_readw,
3491 watch_mem_readl,
3492};
3493
Blue Swirld60efc62009-08-25 18:29:31 +00003494static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003495 watch_mem_writeb,
3496 watch_mem_writew,
3497 watch_mem_writel,
3498};
pbrook6658ffb2007-03-16 23:58:11 +00003499
Richard Hendersonf6405242010-04-22 16:47:31 -07003500static inline uint32_t subpage_readlen (subpage_t *mmio,
3501 target_phys_addr_t addr,
3502 unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003503{
Richard Hendersonf6405242010-04-22 16:47:31 -07003504 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003505#if defined(DEBUG_SUBPAGE)
3506 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3507 mmio, len, addr, idx);
3508#endif
blueswir1db7b5422007-05-26 17:36:03 +00003509
Richard Hendersonf6405242010-04-22 16:47:31 -07003510 addr += mmio->region_offset[idx];
3511 idx = mmio->sub_io_index[idx];
3512 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
blueswir1db7b5422007-05-26 17:36:03 +00003513}
3514
Anthony Liguoric227f092009-10-01 16:12:16 -05003515static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
Richard Hendersonf6405242010-04-22 16:47:31 -07003516 uint32_t value, unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003517{
Richard Hendersonf6405242010-04-22 16:47:31 -07003518 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003519#if defined(DEBUG_SUBPAGE)
Richard Hendersonf6405242010-04-22 16:47:31 -07003520 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3521 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003522#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003523
3524 addr += mmio->region_offset[idx];
3525 idx = mmio->sub_io_index[idx];
3526 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00003527}
3528
Anthony Liguoric227f092009-10-01 16:12:16 -05003529static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003530{
blueswir1db7b5422007-05-26 17:36:03 +00003531 return subpage_readlen(opaque, addr, 0);
3532}
3533
Anthony Liguoric227f092009-10-01 16:12:16 -05003534static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003535 uint32_t value)
3536{
blueswir1db7b5422007-05-26 17:36:03 +00003537 subpage_writelen(opaque, addr, value, 0);
3538}
3539
Anthony Liguoric227f092009-10-01 16:12:16 -05003540static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003541{
blueswir1db7b5422007-05-26 17:36:03 +00003542 return subpage_readlen(opaque, addr, 1);
3543}
3544
Anthony Liguoric227f092009-10-01 16:12:16 -05003545static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003546 uint32_t value)
3547{
blueswir1db7b5422007-05-26 17:36:03 +00003548 subpage_writelen(opaque, addr, value, 1);
3549}
3550
Anthony Liguoric227f092009-10-01 16:12:16 -05003551static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003552{
blueswir1db7b5422007-05-26 17:36:03 +00003553 return subpage_readlen(opaque, addr, 2);
3554}
3555
Richard Hendersonf6405242010-04-22 16:47:31 -07003556static void subpage_writel (void *opaque, target_phys_addr_t addr,
3557 uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003558{
blueswir1db7b5422007-05-26 17:36:03 +00003559 subpage_writelen(opaque, addr, value, 2);
3560}
3561
Blue Swirld60efc62009-08-25 18:29:31 +00003562static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003563 &subpage_readb,
3564 &subpage_readw,
3565 &subpage_readl,
3566};
3567
Blue Swirld60efc62009-08-25 18:29:31 +00003568static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003569 &subpage_writeb,
3570 &subpage_writew,
3571 &subpage_writel,
3572};
3573
Anthony Liguoric227f092009-10-01 16:12:16 -05003574static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3575 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003576{
3577 int idx, eidx;
3578
3579 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3580 return -1;
3581 idx = SUBPAGE_IDX(start);
3582 eidx = SUBPAGE_IDX(end);
3583#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003584 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003585 mmio, start, end, idx, eidx, memory);
3586#endif
Gleb Natapov95c318f2010-07-29 10:41:45 +03003587 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3588 memory = IO_MEM_UNASSIGNED;
Richard Hendersonf6405242010-04-22 16:47:31 -07003589 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003590 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003591 mmio->sub_io_index[idx] = memory;
3592 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003593 }
3594
3595 return 0;
3596}
3597
Richard Hendersonf6405242010-04-22 16:47:31 -07003598static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3599 ram_addr_t orig_memory,
3600 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003601{
Anthony Liguoric227f092009-10-01 16:12:16 -05003602 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003603 int subpage_memory;
3604
Anthony Liguori7267c092011-08-20 22:09:37 -05003605 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003606
3607 mmio->base = base;
Alexander Graf2507c122010-12-08 12:05:37 +01003608 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3609 DEVICE_NATIVE_ENDIAN);
blueswir1db7b5422007-05-26 17:36:03 +00003610#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003611 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3612 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003613#endif
aliguori1eec6142009-02-05 22:06:18 +00003614 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003615 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003616
3617 return mmio;
3618}
3619
aliguori88715652009-02-11 15:20:58 +00003620static int get_free_io_mem_idx(void)
3621{
3622 int i;
3623
3624 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3625 if (!io_mem_used[i]) {
3626 io_mem_used[i] = 1;
3627 return i;
3628 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003629 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003630 return -1;
3631}
3632
Alexander Grafdd310532010-12-08 12:05:36 +01003633/*
3634 * Usually, devices operate in little endian mode. There are devices out
3635 * there that operate in big endian too. Each device gets byte swapped
3636 * mmio if plugged onto a CPU that does the other endianness.
3637 *
3638 * CPU Device swap?
3639 *
3640 * little little no
3641 * little big yes
3642 * big little yes
3643 * big big no
3644 */
3645
3646typedef struct SwapEndianContainer {
3647 CPUReadMemoryFunc *read[3];
3648 CPUWriteMemoryFunc *write[3];
3649 void *opaque;
3650} SwapEndianContainer;
3651
3652static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3653{
3654 uint32_t val;
3655 SwapEndianContainer *c = opaque;
3656 val = c->read[0](c->opaque, addr);
3657 return val;
3658}
3659
3660static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3661{
3662 uint32_t val;
3663 SwapEndianContainer *c = opaque;
3664 val = bswap16(c->read[1](c->opaque, addr));
3665 return val;
3666}
3667
3668static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3669{
3670 uint32_t val;
3671 SwapEndianContainer *c = opaque;
3672 val = bswap32(c->read[2](c->opaque, addr));
3673 return val;
3674}
3675
3676static CPUReadMemoryFunc * const swapendian_readfn[3]={
3677 swapendian_mem_readb,
3678 swapendian_mem_readw,
3679 swapendian_mem_readl
3680};
3681
3682static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3683 uint32_t val)
3684{
3685 SwapEndianContainer *c = opaque;
3686 c->write[0](c->opaque, addr, val);
3687}
3688
3689static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3690 uint32_t val)
3691{
3692 SwapEndianContainer *c = opaque;
3693 c->write[1](c->opaque, addr, bswap16(val));
3694}
3695
3696static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3697 uint32_t val)
3698{
3699 SwapEndianContainer *c = opaque;
3700 c->write[2](c->opaque, addr, bswap32(val));
3701}
3702
3703static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3704 swapendian_mem_writeb,
3705 swapendian_mem_writew,
3706 swapendian_mem_writel
3707};
3708
3709static void swapendian_init(int io_index)
3710{
Anthony Liguori7267c092011-08-20 22:09:37 -05003711 SwapEndianContainer *c = g_malloc(sizeof(SwapEndianContainer));
Alexander Grafdd310532010-12-08 12:05:36 +01003712 int i;
3713
3714 /* Swap mmio for big endian targets */
3715 c->opaque = io_mem_opaque[io_index];
3716 for (i = 0; i < 3; i++) {
3717 c->read[i] = io_mem_read[io_index][i];
3718 c->write[i] = io_mem_write[io_index][i];
3719
3720 io_mem_read[io_index][i] = swapendian_readfn[i];
3721 io_mem_write[io_index][i] = swapendian_writefn[i];
3722 }
3723 io_mem_opaque[io_index] = c;
3724}
3725
3726static void swapendian_del(int io_index)
3727{
3728 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
Anthony Liguori7267c092011-08-20 22:09:37 -05003729 g_free(io_mem_opaque[io_index]);
Alexander Grafdd310532010-12-08 12:05:36 +01003730 }
3731}
3732
bellard33417e72003-08-10 21:47:01 +00003733/* mem_read and mem_write are arrays of functions containing the
3734 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003735 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003736 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003737 modified. If it is zero, a new io zone is allocated. The return
3738 value can be used with cpu_register_physical_memory(). (-1) is
3739 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003740static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003741 CPUReadMemoryFunc * const *mem_read,
3742 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003743 void *opaque, enum device_endian endian)
bellard33417e72003-08-10 21:47:01 +00003744{
Richard Henderson3cab7212010-05-07 09:52:51 -07003745 int i;
3746
bellard33417e72003-08-10 21:47:01 +00003747 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003748 io_index = get_free_io_mem_idx();
3749 if (io_index == -1)
3750 return io_index;
bellard33417e72003-08-10 21:47:01 +00003751 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003752 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003753 if (io_index >= IO_MEM_NB_ENTRIES)
3754 return -1;
3755 }
bellardb5ff1b32005-11-26 10:38:39 +00003756
Richard Henderson3cab7212010-05-07 09:52:51 -07003757 for (i = 0; i < 3; ++i) {
3758 io_mem_read[io_index][i]
3759 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3760 }
3761 for (i = 0; i < 3; ++i) {
3762 io_mem_write[io_index][i]
3763 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3764 }
bellarda4193c82004-06-03 14:01:43 +00003765 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003766
Alexander Grafdd310532010-12-08 12:05:36 +01003767 switch (endian) {
3768 case DEVICE_BIG_ENDIAN:
3769#ifndef TARGET_WORDS_BIGENDIAN
3770 swapendian_init(io_index);
3771#endif
3772 break;
3773 case DEVICE_LITTLE_ENDIAN:
3774#ifdef TARGET_WORDS_BIGENDIAN
3775 swapendian_init(io_index);
3776#endif
3777 break;
3778 case DEVICE_NATIVE_ENDIAN:
3779 default:
3780 break;
3781 }
3782
Richard Hendersonf6405242010-04-22 16:47:31 -07003783 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003784}
bellard61382a52003-10-27 21:22:23 +00003785
Blue Swirld60efc62009-08-25 18:29:31 +00003786int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3787 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003788 void *opaque, enum device_endian endian)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003789{
Alexander Graf2507c122010-12-08 12:05:37 +01003790 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003791}
3792
aliguori88715652009-02-11 15:20:58 +00003793void cpu_unregister_io_memory(int io_table_address)
3794{
3795 int i;
3796 int io_index = io_table_address >> IO_MEM_SHIFT;
3797
Alexander Grafdd310532010-12-08 12:05:36 +01003798 swapendian_del(io_index);
3799
aliguori88715652009-02-11 15:20:58 +00003800 for (i=0;i < 3; i++) {
3801 io_mem_read[io_index][i] = unassigned_mem_read[i];
3802 io_mem_write[io_index][i] = unassigned_mem_write[i];
3803 }
3804 io_mem_opaque[io_index] = NULL;
3805 io_mem_used[io_index] = 0;
3806}
3807
Avi Kivitye9179ce2009-06-14 11:38:52 +03003808static void io_mem_init(void)
3809{
3810 int i;
3811
Alexander Graf2507c122010-12-08 12:05:37 +01003812 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3813 unassigned_mem_write, NULL,
3814 DEVICE_NATIVE_ENDIAN);
3815 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3816 unassigned_mem_write, NULL,
3817 DEVICE_NATIVE_ENDIAN);
3818 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3819 notdirty_mem_write, NULL,
3820 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003821 for (i=0; i<5; i++)
3822 io_mem_used[i] = 1;
3823
3824 io_mem_watch = cpu_register_io_memory(watch_mem_read,
Alexander Graf2507c122010-12-08 12:05:37 +01003825 watch_mem_write, NULL,
3826 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003827}
3828
Avi Kivity62152b82011-07-26 14:26:14 +03003829static void memory_map_init(void)
3830{
Anthony Liguori7267c092011-08-20 22:09:37 -05003831 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003832 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003833 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003834
Anthony Liguori7267c092011-08-20 22:09:37 -05003835 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003836 memory_region_init(system_io, "io", 65536);
3837 set_system_io_map(system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003838}
3839
3840MemoryRegion *get_system_memory(void)
3841{
3842 return system_memory;
3843}
3844
Avi Kivity309cb472011-08-08 16:09:03 +03003845MemoryRegion *get_system_io(void)
3846{
3847 return system_io;
3848}
3849
pbrooke2eef172008-06-08 01:09:01 +00003850#endif /* !defined(CONFIG_USER_ONLY) */
3851
bellard13eb76e2004-01-24 15:23:36 +00003852/* physical memory access (slow version, mainly for debug) */
3853#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003854int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3855 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003856{
3857 int l, flags;
3858 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003859 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003860
3861 while (len > 0) {
3862 page = addr & TARGET_PAGE_MASK;
3863 l = (page + TARGET_PAGE_SIZE) - addr;
3864 if (l > len)
3865 l = len;
3866 flags = page_get_flags(page);
3867 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003868 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003869 if (is_write) {
3870 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003871 return -1;
bellard579a97f2007-11-11 14:26:47 +00003872 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003873 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003874 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003875 memcpy(p, buf, l);
3876 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003877 } else {
3878 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003879 return -1;
bellard579a97f2007-11-11 14:26:47 +00003880 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003881 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003882 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003883 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003884 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003885 }
3886 len -= l;
3887 buf += l;
3888 addr += l;
3889 }
Paul Brooka68fe892010-03-01 00:08:59 +00003890 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003891}
bellard8df1cd02005-01-28 22:37:22 +00003892
bellard13eb76e2004-01-24 15:23:36 +00003893#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003894void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003895 int len, int is_write)
3896{
3897 int l, io_index;
3898 uint8_t *ptr;
3899 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003900 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003901 ram_addr_t pd;
bellard92e873b2004-05-21 14:52:29 +00003902 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003903
bellard13eb76e2004-01-24 15:23:36 +00003904 while (len > 0) {
3905 page = addr & TARGET_PAGE_MASK;
3906 l = (page + TARGET_PAGE_SIZE) - addr;
3907 if (l > len)
3908 l = len;
bellard92e873b2004-05-21 14:52:29 +00003909 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003910 if (!p) {
3911 pd = IO_MEM_UNASSIGNED;
3912 } else {
3913 pd = p->phys_offset;
3914 }
ths3b46e622007-09-17 08:09:54 +00003915
bellard13eb76e2004-01-24 15:23:36 +00003916 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003917 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003918 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003919 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003920 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003921 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003922 /* XXX: could force cpu_single_env to NULL to avoid
3923 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003924 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003925 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003926 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003927 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003928 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003929 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003930 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003931 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003932 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003933 l = 2;
3934 } else {
bellard1c213d12005-09-03 10:49:04 +00003935 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003936 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003937 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003938 l = 1;
3939 }
3940 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003941 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003942 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003943 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003944 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003945 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003946 if (!cpu_physical_memory_is_dirty(addr1)) {
3947 /* invalidate code */
3948 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3949 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003950 cpu_physical_memory_set_dirty_flags(
3951 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003952 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003953 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003954 }
3955 } else {
ths5fafdf22007-09-16 21:08:06 +00003956 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003957 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003958 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003959 /* I/O case */
3960 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003961 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003962 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3963 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003964 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003965 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003966 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003967 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003968 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003969 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003970 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003971 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003972 l = 2;
3973 } else {
bellard1c213d12005-09-03 10:49:04 +00003974 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003975 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003976 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003977 l = 1;
3978 }
3979 } else {
3980 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003981 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3982 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3983 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003984 }
3985 }
3986 len -= l;
3987 buf += l;
3988 addr += l;
3989 }
3990}
bellard8df1cd02005-01-28 22:37:22 +00003991
bellardd0ecd2a2006-04-23 17:14:48 +00003992/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003993void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003994 const uint8_t *buf, int len)
3995{
3996 int l;
3997 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003998 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003999 unsigned long pd;
4000 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00004001
bellardd0ecd2a2006-04-23 17:14:48 +00004002 while (len > 0) {
4003 page = addr & TARGET_PAGE_MASK;
4004 l = (page + TARGET_PAGE_SIZE) - addr;
4005 if (l > len)
4006 l = len;
4007 p = phys_page_find(page >> TARGET_PAGE_BITS);
4008 if (!p) {
4009 pd = IO_MEM_UNASSIGNED;
4010 } else {
4011 pd = p->phys_offset;
4012 }
ths3b46e622007-09-17 08:09:54 +00004013
bellardd0ecd2a2006-04-23 17:14:48 +00004014 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00004015 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
4016 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00004017 /* do nothing */
4018 } else {
4019 unsigned long addr1;
4020 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4021 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004022 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00004023 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004024 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00004025 }
4026 len -= l;
4027 buf += l;
4028 addr += l;
4029 }
4030}
4031
aliguori6d16c2f2009-01-22 16:59:11 +00004032typedef struct {
4033 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05004034 target_phys_addr_t addr;
4035 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00004036} BounceBuffer;
4037
4038static BounceBuffer bounce;
4039
aliguoriba223c22009-01-22 16:59:16 +00004040typedef struct MapClient {
4041 void *opaque;
4042 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00004043 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00004044} MapClient;
4045
Blue Swirl72cf2d42009-09-12 07:36:22 +00004046static QLIST_HEAD(map_client_list, MapClient) map_client_list
4047 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004048
4049void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4050{
Anthony Liguori7267c092011-08-20 22:09:37 -05004051 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00004052
4053 client->opaque = opaque;
4054 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00004055 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00004056 return client;
4057}
4058
4059void cpu_unregister_map_client(void *_client)
4060{
4061 MapClient *client = (MapClient *)_client;
4062
Blue Swirl72cf2d42009-09-12 07:36:22 +00004063 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05004064 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00004065}
4066
4067static void cpu_notify_map_clients(void)
4068{
4069 MapClient *client;
4070
Blue Swirl72cf2d42009-09-12 07:36:22 +00004071 while (!QLIST_EMPTY(&map_client_list)) {
4072 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004073 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09004074 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00004075 }
4076}
4077
aliguori6d16c2f2009-01-22 16:59:11 +00004078/* Map a physical memory region into a host virtual address.
4079 * May map a subset of the requested range, given by and returned in *plen.
4080 * May return NULL if resources needed to perform the mapping are exhausted.
4081 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00004082 * Use cpu_register_map_client() to know when retrying the map operation is
4083 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00004084 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004085void *cpu_physical_memory_map(target_phys_addr_t addr,
4086 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00004087 int is_write)
4088{
Anthony Liguoric227f092009-10-01 16:12:16 -05004089 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004090 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00004091 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004092 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00004093 unsigned long pd;
4094 PhysPageDesc *p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00004095 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004096 ram_addr_t rlen;
4097 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004098
4099 while (len > 0) {
4100 page = addr & TARGET_PAGE_MASK;
4101 l = (page + TARGET_PAGE_SIZE) - addr;
4102 if (l > len)
4103 l = len;
4104 p = phys_page_find(page >> TARGET_PAGE_BITS);
4105 if (!p) {
4106 pd = IO_MEM_UNASSIGNED;
4107 } else {
4108 pd = p->phys_offset;
4109 }
4110
4111 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004112 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004113 break;
4114 }
4115 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4116 bounce.addr = addr;
4117 bounce.len = l;
4118 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004119 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004120 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004121
4122 *plen = l;
4123 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004124 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004125 if (!todo) {
4126 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4127 }
aliguori6d16c2f2009-01-22 16:59:11 +00004128
4129 len -= l;
4130 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004131 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004132 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004133 rlen = todo;
4134 ret = qemu_ram_ptr_length(raddr, &rlen);
4135 *plen = rlen;
4136 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004137}
4138
4139/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4140 * Will also mark the memory as dirty if is_write == 1. access_len gives
4141 * the amount of memory that was actually read or written by the caller.
4142 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004143void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4144 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004145{
4146 if (buffer != bounce.buffer) {
4147 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004148 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004149 while (access_len) {
4150 unsigned l;
4151 l = TARGET_PAGE_SIZE;
4152 if (l > access_len)
4153 l = access_len;
4154 if (!cpu_physical_memory_is_dirty(addr1)) {
4155 /* invalidate code */
4156 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4157 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004158 cpu_physical_memory_set_dirty_flags(
4159 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004160 }
4161 addr1 += l;
4162 access_len -= l;
4163 }
4164 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004165 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004166 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004167 }
aliguori6d16c2f2009-01-22 16:59:11 +00004168 return;
4169 }
4170 if (is_write) {
4171 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4172 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004173 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004174 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004175 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004176}
bellardd0ecd2a2006-04-23 17:14:48 +00004177
bellard8df1cd02005-01-28 22:37:22 +00004178/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004179static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4180 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004181{
4182 int io_index;
4183 uint8_t *ptr;
4184 uint32_t val;
4185 unsigned long pd;
4186 PhysPageDesc *p;
4187
4188 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4189 if (!p) {
4190 pd = IO_MEM_UNASSIGNED;
4191 } else {
4192 pd = p->phys_offset;
4193 }
ths3b46e622007-09-17 08:09:54 +00004194
ths5fafdf22007-09-16 21:08:06 +00004195 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00004196 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00004197 /* I/O case */
4198 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004199 if (p)
4200 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004201 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004202#if defined(TARGET_WORDS_BIGENDIAN)
4203 if (endian == DEVICE_LITTLE_ENDIAN) {
4204 val = bswap32(val);
4205 }
4206#else
4207 if (endian == DEVICE_BIG_ENDIAN) {
4208 val = bswap32(val);
4209 }
4210#endif
bellard8df1cd02005-01-28 22:37:22 +00004211 } else {
4212 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004213 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004214 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004215 switch (endian) {
4216 case DEVICE_LITTLE_ENDIAN:
4217 val = ldl_le_p(ptr);
4218 break;
4219 case DEVICE_BIG_ENDIAN:
4220 val = ldl_be_p(ptr);
4221 break;
4222 default:
4223 val = ldl_p(ptr);
4224 break;
4225 }
bellard8df1cd02005-01-28 22:37:22 +00004226 }
4227 return val;
4228}
4229
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004230uint32_t ldl_phys(target_phys_addr_t addr)
4231{
4232 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4233}
4234
4235uint32_t ldl_le_phys(target_phys_addr_t addr)
4236{
4237 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4238}
4239
4240uint32_t ldl_be_phys(target_phys_addr_t addr)
4241{
4242 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4243}
4244
bellard84b7b8e2005-11-28 21:19:04 +00004245/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004246static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4247 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004248{
4249 int io_index;
4250 uint8_t *ptr;
4251 uint64_t val;
4252 unsigned long pd;
4253 PhysPageDesc *p;
4254
4255 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4256 if (!p) {
4257 pd = IO_MEM_UNASSIGNED;
4258 } else {
4259 pd = p->phys_offset;
4260 }
ths3b46e622007-09-17 08:09:54 +00004261
bellard2a4188a2006-06-25 21:54:59 +00004262 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4263 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00004264 /* I/O case */
4265 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004266 if (p)
4267 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004268
4269 /* XXX This is broken when device endian != cpu endian.
4270 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004271#ifdef TARGET_WORDS_BIGENDIAN
4272 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4273 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4274#else
4275 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4276 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4277#endif
4278 } else {
4279 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004280 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004281 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004282 switch (endian) {
4283 case DEVICE_LITTLE_ENDIAN:
4284 val = ldq_le_p(ptr);
4285 break;
4286 case DEVICE_BIG_ENDIAN:
4287 val = ldq_be_p(ptr);
4288 break;
4289 default:
4290 val = ldq_p(ptr);
4291 break;
4292 }
bellard84b7b8e2005-11-28 21:19:04 +00004293 }
4294 return val;
4295}
4296
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004297uint64_t ldq_phys(target_phys_addr_t addr)
4298{
4299 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4300}
4301
4302uint64_t ldq_le_phys(target_phys_addr_t addr)
4303{
4304 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4305}
4306
4307uint64_t ldq_be_phys(target_phys_addr_t addr)
4308{
4309 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4310}
4311
bellardaab33092005-10-30 20:48:42 +00004312/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004313uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004314{
4315 uint8_t val;
4316 cpu_physical_memory_read(addr, &val, 1);
4317 return val;
4318}
4319
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004320/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004321static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4322 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004323{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004324 int io_index;
4325 uint8_t *ptr;
4326 uint64_t val;
4327 unsigned long pd;
4328 PhysPageDesc *p;
4329
4330 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4331 if (!p) {
4332 pd = IO_MEM_UNASSIGNED;
4333 } else {
4334 pd = p->phys_offset;
4335 }
4336
4337 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4338 !(pd & IO_MEM_ROMD)) {
4339 /* I/O case */
4340 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4341 if (p)
4342 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4343 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004344#if defined(TARGET_WORDS_BIGENDIAN)
4345 if (endian == DEVICE_LITTLE_ENDIAN) {
4346 val = bswap16(val);
4347 }
4348#else
4349 if (endian == DEVICE_BIG_ENDIAN) {
4350 val = bswap16(val);
4351 }
4352#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004353 } else {
4354 /* RAM case */
4355 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4356 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004357 switch (endian) {
4358 case DEVICE_LITTLE_ENDIAN:
4359 val = lduw_le_p(ptr);
4360 break;
4361 case DEVICE_BIG_ENDIAN:
4362 val = lduw_be_p(ptr);
4363 break;
4364 default:
4365 val = lduw_p(ptr);
4366 break;
4367 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004368 }
4369 return val;
bellardaab33092005-10-30 20:48:42 +00004370}
4371
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004372uint32_t lduw_phys(target_phys_addr_t addr)
4373{
4374 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4375}
4376
4377uint32_t lduw_le_phys(target_phys_addr_t addr)
4378{
4379 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4380}
4381
4382uint32_t lduw_be_phys(target_phys_addr_t addr)
4383{
4384 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4385}
4386
bellard8df1cd02005-01-28 22:37:22 +00004387/* warning: addr must be aligned. The ram page is not masked as dirty
4388 and the code inside is not invalidated. It is useful if the dirty
4389 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004390void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004391{
4392 int io_index;
4393 uint8_t *ptr;
4394 unsigned long pd;
4395 PhysPageDesc *p;
4396
4397 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4398 if (!p) {
4399 pd = IO_MEM_UNASSIGNED;
4400 } else {
4401 pd = p->phys_offset;
4402 }
ths3b46e622007-09-17 08:09:54 +00004403
bellard3a7d9292005-08-21 09:26:42 +00004404 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004405 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004406 if (p)
4407 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004408 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4409 } else {
aliguori74576192008-10-06 14:02:03 +00004410 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004411 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004412 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004413
4414 if (unlikely(in_migration)) {
4415 if (!cpu_physical_memory_is_dirty(addr1)) {
4416 /* invalidate code */
4417 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4418 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004419 cpu_physical_memory_set_dirty_flags(
4420 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004421 }
4422 }
bellard8df1cd02005-01-28 22:37:22 +00004423 }
4424}
4425
Anthony Liguoric227f092009-10-01 16:12:16 -05004426void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004427{
4428 int io_index;
4429 uint8_t *ptr;
4430 unsigned long pd;
4431 PhysPageDesc *p;
4432
4433 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4434 if (!p) {
4435 pd = IO_MEM_UNASSIGNED;
4436 } else {
4437 pd = p->phys_offset;
4438 }
ths3b46e622007-09-17 08:09:54 +00004439
j_mayerbc98a7e2007-04-04 07:55:12 +00004440 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4441 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004442 if (p)
4443 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004444#ifdef TARGET_WORDS_BIGENDIAN
4445 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4446 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4447#else
4448 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4449 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4450#endif
4451 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004452 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004453 (addr & ~TARGET_PAGE_MASK);
4454 stq_p(ptr, val);
4455 }
4456}
4457
bellard8df1cd02005-01-28 22:37:22 +00004458/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004459static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4460 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004461{
4462 int io_index;
4463 uint8_t *ptr;
4464 unsigned long pd;
4465 PhysPageDesc *p;
4466
4467 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4468 if (!p) {
4469 pd = IO_MEM_UNASSIGNED;
4470 } else {
4471 pd = p->phys_offset;
4472 }
ths3b46e622007-09-17 08:09:54 +00004473
bellard3a7d9292005-08-21 09:26:42 +00004474 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004475 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004476 if (p)
4477 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004478#if defined(TARGET_WORDS_BIGENDIAN)
4479 if (endian == DEVICE_LITTLE_ENDIAN) {
4480 val = bswap32(val);
4481 }
4482#else
4483 if (endian == DEVICE_BIG_ENDIAN) {
4484 val = bswap32(val);
4485 }
4486#endif
bellard8df1cd02005-01-28 22:37:22 +00004487 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4488 } else {
4489 unsigned long addr1;
4490 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4491 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004492 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004493 switch (endian) {
4494 case DEVICE_LITTLE_ENDIAN:
4495 stl_le_p(ptr, val);
4496 break;
4497 case DEVICE_BIG_ENDIAN:
4498 stl_be_p(ptr, val);
4499 break;
4500 default:
4501 stl_p(ptr, val);
4502 break;
4503 }
bellard3a7d9292005-08-21 09:26:42 +00004504 if (!cpu_physical_memory_is_dirty(addr1)) {
4505 /* invalidate code */
4506 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4507 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004508 cpu_physical_memory_set_dirty_flags(addr1,
4509 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004510 }
bellard8df1cd02005-01-28 22:37:22 +00004511 }
4512}
4513
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004514void stl_phys(target_phys_addr_t addr, uint32_t val)
4515{
4516 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4517}
4518
4519void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4520{
4521 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4522}
4523
4524void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4525{
4526 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4527}
4528
bellardaab33092005-10-30 20:48:42 +00004529/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004530void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004531{
4532 uint8_t v = val;
4533 cpu_physical_memory_write(addr, &v, 1);
4534}
4535
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004536/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004537static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4538 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004539{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004540 int io_index;
4541 uint8_t *ptr;
4542 unsigned long pd;
4543 PhysPageDesc *p;
4544
4545 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4546 if (!p) {
4547 pd = IO_MEM_UNASSIGNED;
4548 } else {
4549 pd = p->phys_offset;
4550 }
4551
4552 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4553 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4554 if (p)
4555 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004556#if defined(TARGET_WORDS_BIGENDIAN)
4557 if (endian == DEVICE_LITTLE_ENDIAN) {
4558 val = bswap16(val);
4559 }
4560#else
4561 if (endian == DEVICE_BIG_ENDIAN) {
4562 val = bswap16(val);
4563 }
4564#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004565 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4566 } else {
4567 unsigned long addr1;
4568 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4569 /* RAM case */
4570 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004571 switch (endian) {
4572 case DEVICE_LITTLE_ENDIAN:
4573 stw_le_p(ptr, val);
4574 break;
4575 case DEVICE_BIG_ENDIAN:
4576 stw_be_p(ptr, val);
4577 break;
4578 default:
4579 stw_p(ptr, val);
4580 break;
4581 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004582 if (!cpu_physical_memory_is_dirty(addr1)) {
4583 /* invalidate code */
4584 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4585 /* set dirty bit */
4586 cpu_physical_memory_set_dirty_flags(addr1,
4587 (0xff & ~CODE_DIRTY_FLAG));
4588 }
4589 }
bellardaab33092005-10-30 20:48:42 +00004590}
4591
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004592void stw_phys(target_phys_addr_t addr, uint32_t val)
4593{
4594 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4595}
4596
4597void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4598{
4599 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4600}
4601
4602void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4603{
4604 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4605}
4606
bellardaab33092005-10-30 20:48:42 +00004607/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004608void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004609{
4610 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004611 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004612}
4613
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004614void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4615{
4616 val = cpu_to_le64(val);
4617 cpu_physical_memory_write(addr, &val, 8);
4618}
4619
4620void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4621{
4622 val = cpu_to_be64(val);
4623 cpu_physical_memory_write(addr, &val, 8);
4624}
4625
aliguori5e2972f2009-03-28 17:51:36 +00004626/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004627int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004628 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004629{
4630 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004631 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004632 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004633
4634 while (len > 0) {
4635 page = addr & TARGET_PAGE_MASK;
4636 phys_addr = cpu_get_phys_page_debug(env, page);
4637 /* if no physical page mapped, return an error */
4638 if (phys_addr == -1)
4639 return -1;
4640 l = (page + TARGET_PAGE_SIZE) - addr;
4641 if (l > len)
4642 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004643 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004644 if (is_write)
4645 cpu_physical_memory_write_rom(phys_addr, buf, l);
4646 else
aliguori5e2972f2009-03-28 17:51:36 +00004647 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004648 len -= l;
4649 buf += l;
4650 addr += l;
4651 }
4652 return 0;
4653}
Paul Brooka68fe892010-03-01 00:08:59 +00004654#endif
bellard13eb76e2004-01-24 15:23:36 +00004655
pbrook2e70f6e2008-06-29 01:03:05 +00004656/* in deterministic execution mode, instructions doing device I/Os
4657 must be at the end of the TB */
4658void cpu_io_recompile(CPUState *env, void *retaddr)
4659{
4660 TranslationBlock *tb;
4661 uint32_t n, cflags;
4662 target_ulong pc, cs_base;
4663 uint64_t flags;
4664
4665 tb = tb_find_pc((unsigned long)retaddr);
4666 if (!tb) {
4667 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4668 retaddr);
4669 }
4670 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004671 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004672 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004673 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004674 n = n - env->icount_decr.u16.low;
4675 /* Generate a new TB ending on the I/O insn. */
4676 n++;
4677 /* On MIPS and SH, delay slot instructions can only be restarted if
4678 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004679 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004680 branch. */
4681#if defined(TARGET_MIPS)
4682 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4683 env->active_tc.PC -= 4;
4684 env->icount_decr.u16.low++;
4685 env->hflags &= ~MIPS_HFLAG_BMASK;
4686 }
4687#elif defined(TARGET_SH4)
4688 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4689 && n > 1) {
4690 env->pc -= 2;
4691 env->icount_decr.u16.low++;
4692 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4693 }
4694#endif
4695 /* This should never happen. */
4696 if (n > CF_COUNT_MASK)
4697 cpu_abort(env, "TB too big during recompile");
4698
4699 cflags = n | CF_LAST_IO;
4700 pc = tb->pc;
4701 cs_base = tb->cs_base;
4702 flags = tb->flags;
4703 tb_phys_invalidate(tb, -1);
4704 /* FIXME: In theory this could raise an exception. In practice
4705 we have already translated the block once so it's probably ok. */
4706 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004707 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004708 the first in the TB) then we end up generating a whole new TB and
4709 repeating the fault, which is horribly inefficient.
4710 Better would be to execute just this insn uncached, or generate a
4711 second new TB. */
4712 cpu_resume_from_signal(env, NULL);
4713}
4714
Paul Brookb3755a92010-03-12 16:54:58 +00004715#if !defined(CONFIG_USER_ONLY)
4716
Stefan Weil055403b2010-10-22 23:03:32 +02004717void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004718{
4719 int i, target_code_size, max_target_code_size;
4720 int direct_jmp_count, direct_jmp2_count, cross_page;
4721 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004722
bellarde3db7222005-01-26 22:00:47 +00004723 target_code_size = 0;
4724 max_target_code_size = 0;
4725 cross_page = 0;
4726 direct_jmp_count = 0;
4727 direct_jmp2_count = 0;
4728 for(i = 0; i < nb_tbs; i++) {
4729 tb = &tbs[i];
4730 target_code_size += tb->size;
4731 if (tb->size > max_target_code_size)
4732 max_target_code_size = tb->size;
4733 if (tb->page_addr[1] != -1)
4734 cross_page++;
4735 if (tb->tb_next_offset[0] != 0xffff) {
4736 direct_jmp_count++;
4737 if (tb->tb_next_offset[1] != 0xffff) {
4738 direct_jmp2_count++;
4739 }
4740 }
4741 }
4742 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004743 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004744 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004745 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4746 cpu_fprintf(f, "TB count %d/%d\n",
4747 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004748 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004749 nb_tbs ? target_code_size / nb_tbs : 0,
4750 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004751 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004752 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4753 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004754 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4755 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004756 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4757 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004758 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004759 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4760 direct_jmp2_count,
4761 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004762 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004763 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4764 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4765 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004766 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004767}
4768
bellard61382a52003-10-27 21:22:23 +00004769#define MMUSUFFIX _cmmu
4770#define GETPC() NULL
4771#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004772#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004773
4774#define SHIFT 0
4775#include "softmmu_template.h"
4776
4777#define SHIFT 1
4778#include "softmmu_template.h"
4779
4780#define SHIFT 2
4781#include "softmmu_template.h"
4782
4783#define SHIFT 3
4784#include "softmmu_template.h"
4785
4786#undef env
4787
4788#endif