blob: 5bc920313af1395b751528f0586cfe1372fca87f [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
bellardfd6ce8f2003-05-14 19:00:11 +000060//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000061//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000062//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000063//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000064
65/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000066//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000068
ths1196be32007-03-17 15:17:58 +000069//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000070//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000071
pbrook99773bd2006-04-16 15:14:59 +000072#if !defined(CONFIG_USER_ONLY)
73/* TB consistency checks only implemented for usermode emulation. */
74#undef DEBUG_TB_CHECK
75#endif
76
bellard9fa3e852004-01-04 18:06:42 +000077#define SMC_BITMAP_USE_THRESHOLD 10
78
blueswir1bdaf78e2008-10-04 07:24:27 +000079static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020080static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000081TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000082static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000083/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050084spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000085
blueswir1141ac462008-07-26 15:05:57 +000086#if defined(__arm__) || defined(__sparc_v9__)
87/* The prologue must be reachable with a direct jump. ARM and Sparc64
88 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000089 section close to code segment. */
90#define code_gen_section \
91 __attribute__((__section__(".gen_code"))) \
92 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020093#elif defined(_WIN32)
94/* Maximum alignment for Win32 is 16. */
95#define code_gen_section \
96 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000097#else
98#define code_gen_section \
99 __attribute__((aligned (32)))
100#endif
101
102uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000103static uint8_t *code_gen_buffer;
104static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000105/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200107static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000108
pbrooke2eef172008-06-08 01:09:01 +0000109#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000110int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000111static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000112
Alex Williamsonf471a172010-06-11 11:11:42 -0600113RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
Avi Kivity62152b82011-07-26 14:26:14 +0300114
115static MemoryRegion *system_memory;
116
pbrooke2eef172008-06-08 01:09:01 +0000117#endif
bellard9fa3e852004-01-04 18:06:42 +0000118
bellard6a00d602005-11-21 23:25:50 +0000119CPUState *first_cpu;
120/* current CPU in the current thread. It is only valid inside
121 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000122CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000123/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000124 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000125 2 = Adaptive rate instruction counting. */
126int use_icount = 0;
127/* Current instruction counter. While executing translated code this may
128 include some instructions that have not yet been executed. */
129int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000130
bellard54936002003-05-13 00:25:15 +0000131typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000132 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000133 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000134 /* in order to optimize self modifying code, we count the number
135 of lookups we do to a given page to use a bitmap */
136 unsigned int code_write_count;
137 uint8_t *code_bitmap;
138#if defined(CONFIG_USER_ONLY)
139 unsigned long flags;
140#endif
bellard54936002003-05-13 00:25:15 +0000141} PageDesc;
142
Paul Brook41c1b1c2010-03-12 16:54:58 +0000143/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800144 while in user mode we want it to be based on virtual addresses. */
145#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000146#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
147# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
148#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800149# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000151#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800152# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000153#endif
bellard54936002003-05-13 00:25:15 +0000154
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155/* Size of the L2 (and L3, etc) page tables. */
156#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000157#define L2_SIZE (1 << L2_BITS)
158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* The bits remaining after N lower levels of page tables. */
160#define P_L1_BITS_REM \
161 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
162#define V_L1_BITS_REM \
163 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
164
165/* Size of the L1 page table. Avoid silly small sizes. */
166#if P_L1_BITS_REM < 4
167#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
168#else
169#define P_L1_BITS P_L1_BITS_REM
170#endif
171
172#if V_L1_BITS_REM < 4
173#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
174#else
175#define V_L1_BITS V_L1_BITS_REM
176#endif
177
178#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
179#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
180
181#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
182#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
183
bellard83fb7ad2004-07-05 21:25:26 +0000184unsigned long qemu_real_host_page_size;
185unsigned long qemu_host_page_bits;
186unsigned long qemu_host_page_size;
187unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000188
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800189/* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000192
pbrooke2eef172008-06-08 01:09:01 +0000193#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000194typedef struct PhysPageDesc {
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset;
197 ram_addr_t region_offset;
198} PhysPageDesc;
199
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800200/* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000203
pbrooke2eef172008-06-08 01:09:01 +0000204static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300205static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000206
bellard33417e72003-08-10 21:47:01 +0000207/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000208CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
209CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000210void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000211static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000212static int io_mem_watch;
213#endif
bellard33417e72003-08-10 21:47:01 +0000214
bellard34865132003-10-05 14:28:56 +0000215/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200216#ifdef WIN32
217static const char *logfilename = "qemu.log";
218#else
blueswir1d9b630f2008-10-05 09:57:08 +0000219static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200220#endif
bellard34865132003-10-05 14:28:56 +0000221FILE *logfile;
222int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000223static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000224
bellarde3db7222005-01-26 22:00:47 +0000225/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000226#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000227static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000228#endif
bellarde3db7222005-01-26 22:00:47 +0000229static int tb_flush_count;
230static int tb_phys_invalidate_count;
231
bellard7cb69ca2008-05-10 10:55:51 +0000232#ifdef _WIN32
233static void map_exec(void *addr, long size)
234{
235 DWORD old_protect;
236 VirtualProtect(addr, size,
237 PAGE_EXECUTE_READWRITE, &old_protect);
238
239}
240#else
241static void map_exec(void *addr, long size)
242{
bellard43694152008-05-29 09:35:57 +0000243 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000244
bellard43694152008-05-29 09:35:57 +0000245 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000246 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000247 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000248
249 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000250 end += page_size - 1;
251 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000252
253 mprotect((void *)start, end - start,
254 PROT_READ | PROT_WRITE | PROT_EXEC);
255}
256#endif
257
bellardb346ff42003-06-15 20:05:50 +0000258static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000259{
bellard83fb7ad2004-07-05 21:25:26 +0000260 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000261 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000262#ifdef _WIN32
263 {
264 SYSTEM_INFO system_info;
265
266 GetSystemInfo(&system_info);
267 qemu_real_host_page_size = system_info.dwPageSize;
268 }
269#else
270 qemu_real_host_page_size = getpagesize();
271#endif
bellard83fb7ad2004-07-05 21:25:26 +0000272 if (qemu_host_page_size == 0)
273 qemu_host_page_size = qemu_real_host_page_size;
274 if (qemu_host_page_size < TARGET_PAGE_SIZE)
275 qemu_host_page_size = TARGET_PAGE_SIZE;
276 qemu_host_page_bits = 0;
277 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
278 qemu_host_page_bits++;
279 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000280
Paul Brook2e9a5712010-05-05 16:32:59 +0100281#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000282 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100283#ifdef HAVE_KINFO_GETVMMAP
284 struct kinfo_vmentry *freep;
285 int i, cnt;
286
287 freep = kinfo_getvmmap(getpid(), &cnt);
288 if (freep) {
289 mmap_lock();
290 for (i = 0; i < cnt; i++) {
291 unsigned long startaddr, endaddr;
292
293 startaddr = freep[i].kve_start;
294 endaddr = freep[i].kve_end;
295 if (h2g_valid(startaddr)) {
296 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
297
298 if (h2g_valid(endaddr)) {
299 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200300 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100301 } else {
302#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
303 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200304 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100305#endif
306 }
307 }
308 }
309 free(freep);
310 mmap_unlock();
311 }
312#else
balrog50a95692007-12-12 01:16:23 +0000313 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000314
pbrook07765902008-05-31 16:33:53 +0000315 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800316
Aurelien Jarnofd436902010-04-10 17:20:36 +0200317 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000318 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800319 mmap_lock();
320
balrog50a95692007-12-12 01:16:23 +0000321 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800322 unsigned long startaddr, endaddr;
323 int n;
324
325 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
326
327 if (n == 2 && h2g_valid(startaddr)) {
328 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
329
330 if (h2g_valid(endaddr)) {
331 endaddr = h2g(endaddr);
332 } else {
333 endaddr = ~0ul;
334 }
335 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000336 }
337 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800338
balrog50a95692007-12-12 01:16:23 +0000339 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800340 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000341 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100342#endif
balrog50a95692007-12-12 01:16:23 +0000343 }
344#endif
bellard54936002003-05-13 00:25:15 +0000345}
346
Paul Brook41c1b1c2010-03-12 16:54:58 +0000347static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000348{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000349 PageDesc *pd;
350 void **lp;
351 int i;
352
pbrook17e23772008-06-09 13:47:45 +0000353#if defined(CONFIG_USER_ONLY)
Paul Brook2e9a5712010-05-05 16:32:59 +0100354 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800355# define ALLOC(P, SIZE) \
356 do { \
357 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
358 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000360#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800361# define ALLOC(P, SIZE) \
362 do { P = qemu_mallocz(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000363#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800364
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800365 /* Level 1. Always allocated. */
366 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
367
368 /* Level 2..N-1. */
369 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
370 void **p = *lp;
371
372 if (p == NULL) {
373 if (!alloc) {
374 return NULL;
375 }
376 ALLOC(p, sizeof(void *) * L2_SIZE);
377 *lp = p;
378 }
379
380 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000381 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800382
383 pd = *lp;
384 if (pd == NULL) {
385 if (!alloc) {
386 return NULL;
387 }
388 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
389 *lp = pd;
390 }
391
392#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800393
394 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000395}
396
Paul Brook41c1b1c2010-03-12 16:54:58 +0000397static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000398{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800399 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000400}
401
Paul Brook6d9a1302010-02-28 23:55:53 +0000402#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500403static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000404{
pbrooke3f4e2a2006-04-08 20:02:06 +0000405 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800406 void **lp;
407 int i;
bellard92e873b2004-05-21 14:52:29 +0000408
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800409 /* Level 1. Always allocated. */
410 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000411
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800412 /* Level 2..N-1. */
413 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
414 void **p = *lp;
415 if (p == NULL) {
416 if (!alloc) {
417 return NULL;
418 }
419 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
420 }
421 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000422 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800423
pbrooke3f4e2a2006-04-08 20:02:06 +0000424 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800425 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000426 int i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800427
428 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000429 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800430 }
431
432 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
433
pbrook67c4d232009-02-23 13:16:07 +0000434 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800435 pd[i].phys_offset = IO_MEM_UNASSIGNED;
436 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000437 }
bellard92e873b2004-05-21 14:52:29 +0000438 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800439
440 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000441}
442
Anthony Liguoric227f092009-10-01 16:12:16 -0500443static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000444{
bellard108c49b2005-07-24 12:55:09 +0000445 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000446}
447
Anthony Liguoric227f092009-10-01 16:12:16 -0500448static void tlb_protect_code(ram_addr_t ram_addr);
449static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000450 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000451#define mmap_lock() do { } while(0)
452#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000453#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000454
bellard43694152008-05-29 09:35:57 +0000455#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
456
457#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100458/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000459 user mode. It will change when a dedicated libc will be used */
460#define USE_STATIC_CODE_GEN_BUFFER
461#endif
462
463#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200464static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
465 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000466#endif
467
blueswir18fcd3692008-08-17 20:26:25 +0000468static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000469{
bellard43694152008-05-29 09:35:57 +0000470#ifdef USE_STATIC_CODE_GEN_BUFFER
471 code_gen_buffer = static_code_gen_buffer;
472 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
473 map_exec(code_gen_buffer, code_gen_buffer_size);
474#else
bellard26a5f132008-05-28 12:30:31 +0000475 code_gen_buffer_size = tb_size;
476 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000477#if defined(CONFIG_USER_ONLY)
478 /* in user mode, phys_ram_size is not meaningful */
479 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
480#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100481 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000482 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000483#endif
bellard26a5f132008-05-28 12:30:31 +0000484 }
485 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
486 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
487 /* The code gen buffer location may have constraints depending on
488 the host cpu and OS */
489#if defined(__linux__)
490 {
491 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000492 void *start = NULL;
493
bellard26a5f132008-05-28 12:30:31 +0000494 flags = MAP_PRIVATE | MAP_ANONYMOUS;
495#if defined(__x86_64__)
496 flags |= MAP_32BIT;
497 /* Cannot map more than that */
498 if (code_gen_buffer_size > (800 * 1024 * 1024))
499 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000500#elif defined(__sparc_v9__)
501 // Map the buffer below 2G, so we can use direct calls and branches
502 flags |= MAP_FIXED;
503 start = (void *) 0x60000000UL;
504 if (code_gen_buffer_size > (512 * 1024 * 1024))
505 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000506#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000507 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000508 flags |= MAP_FIXED;
509 start = (void *) 0x01000000UL;
510 if (code_gen_buffer_size > 16 * 1024 * 1024)
511 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700512#elif defined(__s390x__)
513 /* Map the buffer so that we can use direct calls and branches. */
514 /* We have a +- 4GB range on the branches; leave some slop. */
515 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
516 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
517 }
518 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000519#endif
blueswir1141ac462008-07-26 15:05:57 +0000520 code_gen_buffer = mmap(start, code_gen_buffer_size,
521 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000522 flags, -1, 0);
523 if (code_gen_buffer == MAP_FAILED) {
524 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
525 exit(1);
526 }
527 }
Bradcbb608a2010-12-20 21:25:40 -0500528#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
529 || defined(__DragonFly__) || defined(__OpenBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000530 {
531 int flags;
532 void *addr = NULL;
533 flags = MAP_PRIVATE | MAP_ANONYMOUS;
534#if defined(__x86_64__)
535 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
536 * 0x40000000 is free */
537 flags |= MAP_FIXED;
538 addr = (void *)0x40000000;
539 /* Cannot map more than that */
540 if (code_gen_buffer_size > (800 * 1024 * 1024))
541 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000542#elif defined(__sparc_v9__)
543 // Map the buffer below 2G, so we can use direct calls and branches
544 flags |= MAP_FIXED;
545 addr = (void *) 0x60000000UL;
546 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
547 code_gen_buffer_size = (512 * 1024 * 1024);
548 }
aliguori06e67a82008-09-27 15:32:41 +0000549#endif
550 code_gen_buffer = mmap(addr, code_gen_buffer_size,
551 PROT_WRITE | PROT_READ | PROT_EXEC,
552 flags, -1, 0);
553 if (code_gen_buffer == MAP_FAILED) {
554 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
555 exit(1);
556 }
557 }
bellard26a5f132008-05-28 12:30:31 +0000558#else
559 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000560 map_exec(code_gen_buffer, code_gen_buffer_size);
561#endif
bellard43694152008-05-29 09:35:57 +0000562#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000563 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100564 code_gen_buffer_max_size = code_gen_buffer_size -
565 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000566 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
567 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
568}
569
570/* Must be called before using the QEMU cpus. 'tb_size' is the size
571 (in bytes) allocated to the translation buffer. Zero means default
572 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200573void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000574{
bellard26a5f132008-05-28 12:30:31 +0000575 cpu_gen_init();
576 code_gen_alloc(tb_size);
577 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000578 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700579#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
580 /* There's no guest base to take into account, so go ahead and
581 initialize the prologue now. */
582 tcg_prologue_init(&tcg_ctx);
583#endif
bellard26a5f132008-05-28 12:30:31 +0000584}
585
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200586bool tcg_enabled(void)
587{
588 return code_gen_buffer != NULL;
589}
590
591void cpu_exec_init_all(void)
592{
593#if !defined(CONFIG_USER_ONLY)
594 memory_map_init();
595 io_mem_init();
596#endif
597}
598
pbrook9656f322008-07-01 20:01:19 +0000599#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
600
Juan Quintelae59fb372009-09-29 22:48:21 +0200601static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200602{
603 CPUState *env = opaque;
604
aurel323098dba2009-03-07 21:28:24 +0000605 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
606 version_id is increased. */
607 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000608 tlb_flush(env, 1);
609
610 return 0;
611}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200612
613static const VMStateDescription vmstate_cpu_common = {
614 .name = "cpu_common",
615 .version_id = 1,
616 .minimum_version_id = 1,
617 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200618 .post_load = cpu_common_post_load,
619 .fields = (VMStateField []) {
620 VMSTATE_UINT32(halted, CPUState),
621 VMSTATE_UINT32(interrupt_request, CPUState),
622 VMSTATE_END_OF_LIST()
623 }
624};
pbrook9656f322008-07-01 20:01:19 +0000625#endif
626
Glauber Costa950f1472009-06-09 12:15:18 -0400627CPUState *qemu_get_cpu(int cpu)
628{
629 CPUState *env = first_cpu;
630
631 while (env) {
632 if (env->cpu_index == cpu)
633 break;
634 env = env->next_cpu;
635 }
636
637 return env;
638}
639
bellard6a00d602005-11-21 23:25:50 +0000640void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000641{
bellard6a00d602005-11-21 23:25:50 +0000642 CPUState **penv;
643 int cpu_index;
644
pbrookc2764712009-03-07 15:24:59 +0000645#if defined(CONFIG_USER_ONLY)
646 cpu_list_lock();
647#endif
bellard6a00d602005-11-21 23:25:50 +0000648 env->next_cpu = NULL;
649 penv = &first_cpu;
650 cpu_index = 0;
651 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700652 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000653 cpu_index++;
654 }
655 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000656 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000657 QTAILQ_INIT(&env->breakpoints);
658 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100659#ifndef CONFIG_USER_ONLY
660 env->thread_id = qemu_get_thread_id();
661#endif
bellard6a00d602005-11-21 23:25:50 +0000662 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000663#if defined(CONFIG_USER_ONLY)
664 cpu_list_unlock();
665#endif
pbrookb3c77242008-06-30 16:31:04 +0000666#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600667 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
668 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000669 cpu_save, cpu_load, env);
670#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000671}
672
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100673/* Allocate a new translation block. Flush the translation buffer if
674 too many translation blocks or too much generated code. */
675static TranslationBlock *tb_alloc(target_ulong pc)
676{
677 TranslationBlock *tb;
678
679 if (nb_tbs >= code_gen_max_blocks ||
680 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
681 return NULL;
682 tb = &tbs[nb_tbs++];
683 tb->pc = pc;
684 tb->cflags = 0;
685 return tb;
686}
687
688void tb_free(TranslationBlock *tb)
689{
690 /* In practice this is mostly used for single use temporary TB
691 Ignore the hard cases and just back up if this TB happens to
692 be the last one generated. */
693 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
694 code_gen_ptr = tb->tc_ptr;
695 nb_tbs--;
696 }
697}
698
bellard9fa3e852004-01-04 18:06:42 +0000699static inline void invalidate_page_bitmap(PageDesc *p)
700{
701 if (p->code_bitmap) {
bellard59817cc2004-02-16 22:01:13 +0000702 qemu_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000703 p->code_bitmap = NULL;
704 }
705 p->code_write_count = 0;
706}
707
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800708/* Set to NULL all the 'first_tb' fields in all PageDescs. */
709
710static void page_flush_tb_1 (int level, void **lp)
711{
712 int i;
713
714 if (*lp == NULL) {
715 return;
716 }
717 if (level == 0) {
718 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000719 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800720 pd[i].first_tb = NULL;
721 invalidate_page_bitmap(pd + i);
722 }
723 } else {
724 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000725 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800726 page_flush_tb_1 (level - 1, pp + i);
727 }
728 }
729}
730
bellardfd6ce8f2003-05-14 19:00:11 +0000731static void page_flush_tb(void)
732{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800733 int i;
734 for (i = 0; i < V_L1_SIZE; i++) {
735 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000736 }
737}
738
739/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000740/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000741void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000742{
bellard6a00d602005-11-21 23:25:50 +0000743 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000744#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000745 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
746 (unsigned long)(code_gen_ptr - code_gen_buffer),
747 nb_tbs, nb_tbs > 0 ?
748 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000749#endif
bellard26a5f132008-05-28 12:30:31 +0000750 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000751 cpu_abort(env1, "Internal error: code buffer overflow\n");
752
bellardfd6ce8f2003-05-14 19:00:11 +0000753 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000754
bellard6a00d602005-11-21 23:25:50 +0000755 for(env = first_cpu; env != NULL; env = env->next_cpu) {
756 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
757 }
bellard9fa3e852004-01-04 18:06:42 +0000758
bellard8a8a6082004-10-03 13:36:49 +0000759 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000760 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000761
bellardfd6ce8f2003-05-14 19:00:11 +0000762 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000763 /* XXX: flush processor icache at this point if cache flush is
764 expensive */
bellarde3db7222005-01-26 22:00:47 +0000765 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000766}
767
768#ifdef DEBUG_TB_CHECK
769
j_mayerbc98a7e2007-04-04 07:55:12 +0000770static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000771{
772 TranslationBlock *tb;
773 int i;
774 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000775 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
776 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000777 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
778 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000779 printf("ERROR invalidate: address=" TARGET_FMT_lx
780 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000781 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000782 }
783 }
784 }
785}
786
787/* verify that all the pages have correct rights for code */
788static void tb_page_check(void)
789{
790 TranslationBlock *tb;
791 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000792
pbrook99773bd2006-04-16 15:14:59 +0000793 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
794 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000795 flags1 = page_get_flags(tb->pc);
796 flags2 = page_get_flags(tb->pc + tb->size - 1);
797 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
798 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000799 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000800 }
801 }
802 }
803}
804
805#endif
806
807/* invalidate one TB */
808static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
809 int next_offset)
810{
811 TranslationBlock *tb1;
812 for(;;) {
813 tb1 = *ptb;
814 if (tb1 == tb) {
815 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
816 break;
817 }
818 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
819 }
820}
821
bellard9fa3e852004-01-04 18:06:42 +0000822static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
823{
824 TranslationBlock *tb1;
825 unsigned int n1;
826
827 for(;;) {
828 tb1 = *ptb;
829 n1 = (long)tb1 & 3;
830 tb1 = (TranslationBlock *)((long)tb1 & ~3);
831 if (tb1 == tb) {
832 *ptb = tb1->page_next[n1];
833 break;
834 }
835 ptb = &tb1->page_next[n1];
836 }
837}
838
bellardd4e81642003-05-25 16:46:15 +0000839static inline void tb_jmp_remove(TranslationBlock *tb, int n)
840{
841 TranslationBlock *tb1, **ptb;
842 unsigned int n1;
843
844 ptb = &tb->jmp_next[n];
845 tb1 = *ptb;
846 if (tb1) {
847 /* find tb(n) in circular list */
848 for(;;) {
849 tb1 = *ptb;
850 n1 = (long)tb1 & 3;
851 tb1 = (TranslationBlock *)((long)tb1 & ~3);
852 if (n1 == n && tb1 == tb)
853 break;
854 if (n1 == 2) {
855 ptb = &tb1->jmp_first;
856 } else {
857 ptb = &tb1->jmp_next[n1];
858 }
859 }
860 /* now we can suppress tb(n) from the list */
861 *ptb = tb->jmp_next[n];
862
863 tb->jmp_next[n] = NULL;
864 }
865}
866
867/* reset the jump entry 'n' of a TB so that it is not chained to
868 another TB */
869static inline void tb_reset_jump(TranslationBlock *tb, int n)
870{
871 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
872}
873
Paul Brook41c1b1c2010-03-12 16:54:58 +0000874void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000875{
bellard6a00d602005-11-21 23:25:50 +0000876 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000877 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000878 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000879 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000880 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000881
bellard9fa3e852004-01-04 18:06:42 +0000882 /* remove the TB from the hash list */
883 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
884 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000885 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000886 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000887
bellard9fa3e852004-01-04 18:06:42 +0000888 /* remove the TB from the page list */
889 if (tb->page_addr[0] != page_addr) {
890 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
891 tb_page_remove(&p->first_tb, tb);
892 invalidate_page_bitmap(p);
893 }
894 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
895 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
896 tb_page_remove(&p->first_tb, tb);
897 invalidate_page_bitmap(p);
898 }
899
bellard8a40a182005-11-20 10:35:40 +0000900 tb_invalidated_flag = 1;
901
902 /* remove the TB from the hash list */
903 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000904 for(env = first_cpu; env != NULL; env = env->next_cpu) {
905 if (env->tb_jmp_cache[h] == tb)
906 env->tb_jmp_cache[h] = NULL;
907 }
bellard8a40a182005-11-20 10:35:40 +0000908
909 /* suppress this TB from the two jump lists */
910 tb_jmp_remove(tb, 0);
911 tb_jmp_remove(tb, 1);
912
913 /* suppress any remaining jumps to this TB */
914 tb1 = tb->jmp_first;
915 for(;;) {
916 n1 = (long)tb1 & 3;
917 if (n1 == 2)
918 break;
919 tb1 = (TranslationBlock *)((long)tb1 & ~3);
920 tb2 = tb1->jmp_next[n1];
921 tb_reset_jump(tb1, n1);
922 tb1->jmp_next[n1] = NULL;
923 tb1 = tb2;
924 }
925 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
926
bellarde3db7222005-01-26 22:00:47 +0000927 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000928}
929
930static inline void set_bits(uint8_t *tab, int start, int len)
931{
932 int end, mask, end1;
933
934 end = start + len;
935 tab += start >> 3;
936 mask = 0xff << (start & 7);
937 if ((start & ~7) == (end & ~7)) {
938 if (start < end) {
939 mask &= ~(0xff << (end & 7));
940 *tab |= mask;
941 }
942 } else {
943 *tab++ |= mask;
944 start = (start + 8) & ~7;
945 end1 = end & ~7;
946 while (start < end1) {
947 *tab++ = 0xff;
948 start += 8;
949 }
950 if (start < end) {
951 mask = ~(0xff << (end & 7));
952 *tab |= mask;
953 }
954 }
955}
956
957static void build_page_bitmap(PageDesc *p)
958{
959 int n, tb_start, tb_end;
960 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000961
pbrookb2a70812008-06-09 13:57:23 +0000962 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000963
964 tb = p->first_tb;
965 while (tb != NULL) {
966 n = (long)tb & 3;
967 tb = (TranslationBlock *)((long)tb & ~3);
968 /* NOTE: this is subtle as a TB may span two physical pages */
969 if (n == 0) {
970 /* NOTE: tb_end may be after the end of the page, but
971 it is not a problem */
972 tb_start = tb->pc & ~TARGET_PAGE_MASK;
973 tb_end = tb_start + tb->size;
974 if (tb_end > TARGET_PAGE_SIZE)
975 tb_end = TARGET_PAGE_SIZE;
976 } else {
977 tb_start = 0;
978 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
979 }
980 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
981 tb = tb->page_next[n];
982 }
983}
984
pbrook2e70f6e2008-06-29 01:03:05 +0000985TranslationBlock *tb_gen_code(CPUState *env,
986 target_ulong pc, target_ulong cs_base,
987 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000988{
989 TranslationBlock *tb;
990 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000991 tb_page_addr_t phys_pc, phys_page2;
992 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000993 int code_gen_size;
994
Paul Brook41c1b1c2010-03-12 16:54:58 +0000995 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000996 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000997 if (!tb) {
998 /* flush must be done */
999 tb_flush(env);
1000 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001001 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001002 /* Don't forget to invalidate previous TB info. */
1003 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001004 }
1005 tc_ptr = code_gen_ptr;
1006 tb->tc_ptr = tc_ptr;
1007 tb->cs_base = cs_base;
1008 tb->flags = flags;
1009 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001010 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001011 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001012
bellardd720b932004-04-25 17:57:43 +00001013 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001014 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001015 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001016 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001017 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001018 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001019 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001020 return tb;
bellardd720b932004-04-25 17:57:43 +00001021}
ths3b46e622007-09-17 08:09:54 +00001022
bellard9fa3e852004-01-04 18:06:42 +00001023/* invalidate all TBs which intersect with the target physical page
1024 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001025 the same physical page. 'is_cpu_write_access' should be true if called
1026 from a real cpu write access: the virtual CPU will exit the current
1027 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001028void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001029 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001030{
aliguori6b917542008-11-18 19:46:41 +00001031 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001032 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001033 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001034 PageDesc *p;
1035 int n;
1036#ifdef TARGET_HAS_PRECISE_SMC
1037 int current_tb_not_found = is_cpu_write_access;
1038 TranslationBlock *current_tb = NULL;
1039 int current_tb_modified = 0;
1040 target_ulong current_pc = 0;
1041 target_ulong current_cs_base = 0;
1042 int current_flags = 0;
1043#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001044
1045 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001046 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001047 return;
ths5fafdf22007-09-16 21:08:06 +00001048 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001049 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1050 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001051 /* build code bitmap */
1052 build_page_bitmap(p);
1053 }
1054
1055 /* we remove all the TBs in the range [start, end[ */
1056 /* XXX: see if in some cases it could be faster to invalidate all the code */
1057 tb = p->first_tb;
1058 while (tb != NULL) {
1059 n = (long)tb & 3;
1060 tb = (TranslationBlock *)((long)tb & ~3);
1061 tb_next = tb->page_next[n];
1062 /* NOTE: this is subtle as a TB may span two physical pages */
1063 if (n == 0) {
1064 /* NOTE: tb_end may be after the end of the page, but
1065 it is not a problem */
1066 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1067 tb_end = tb_start + tb->size;
1068 } else {
1069 tb_start = tb->page_addr[1];
1070 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1071 }
1072 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001073#ifdef TARGET_HAS_PRECISE_SMC
1074 if (current_tb_not_found) {
1075 current_tb_not_found = 0;
1076 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001077 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001078 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001079 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001080 }
1081 }
1082 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001083 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001084 /* If we are modifying the current TB, we must stop
1085 its execution. We could be more precise by checking
1086 that the modification is after the current PC, but it
1087 would require a specialized function to partially
1088 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001089
bellardd720b932004-04-25 17:57:43 +00001090 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001091 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001092 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1093 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001094 }
1095#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001096 /* we need to do that to handle the case where a signal
1097 occurs while doing tb_phys_invalidate() */
1098 saved_tb = NULL;
1099 if (env) {
1100 saved_tb = env->current_tb;
1101 env->current_tb = NULL;
1102 }
bellard9fa3e852004-01-04 18:06:42 +00001103 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001104 if (env) {
1105 env->current_tb = saved_tb;
1106 if (env->interrupt_request && env->current_tb)
1107 cpu_interrupt(env, env->interrupt_request);
1108 }
bellard9fa3e852004-01-04 18:06:42 +00001109 }
1110 tb = tb_next;
1111 }
1112#if !defined(CONFIG_USER_ONLY)
1113 /* if no code remaining, no need to continue to use slow writes */
1114 if (!p->first_tb) {
1115 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001116 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001117 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001118 }
1119 }
1120#endif
1121#ifdef TARGET_HAS_PRECISE_SMC
1122 if (current_tb_modified) {
1123 /* we generate a block containing just the instruction
1124 modifying the memory. It will ensure that it cannot modify
1125 itself */
bellardea1c1802004-06-14 18:56:36 +00001126 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001127 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001128 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001129 }
1130#endif
1131}
1132
1133/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001134static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001135{
1136 PageDesc *p;
1137 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001138#if 0
bellarda4193c82004-06-03 14:01:43 +00001139 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001140 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1141 cpu_single_env->mem_io_vaddr, len,
1142 cpu_single_env->eip,
1143 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001144 }
1145#endif
bellard9fa3e852004-01-04 18:06:42 +00001146 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001147 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001148 return;
1149 if (p->code_bitmap) {
1150 offset = start & ~TARGET_PAGE_MASK;
1151 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1152 if (b & ((1 << len) - 1))
1153 goto do_invalidate;
1154 } else {
1155 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001156 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001157 }
1158}
1159
bellard9fa3e852004-01-04 18:06:42 +00001160#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001161static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001162 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001163{
aliguori6b917542008-11-18 19:46:41 +00001164 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001165 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001166 int n;
bellardd720b932004-04-25 17:57:43 +00001167#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001168 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001169 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001170 int current_tb_modified = 0;
1171 target_ulong current_pc = 0;
1172 target_ulong current_cs_base = 0;
1173 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001174#endif
bellard9fa3e852004-01-04 18:06:42 +00001175
1176 addr &= TARGET_PAGE_MASK;
1177 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001178 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001179 return;
1180 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001181#ifdef TARGET_HAS_PRECISE_SMC
1182 if (tb && pc != 0) {
1183 current_tb = tb_find_pc(pc);
1184 }
1185#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001186 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001187 n = (long)tb & 3;
1188 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001189#ifdef TARGET_HAS_PRECISE_SMC
1190 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001191 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001192 /* If we are modifying the current TB, we must stop
1193 its execution. We could be more precise by checking
1194 that the modification is after the current PC, but it
1195 would require a specialized function to partially
1196 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001197
bellardd720b932004-04-25 17:57:43 +00001198 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001199 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001200 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1201 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001202 }
1203#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001204 tb_phys_invalidate(tb, addr);
1205 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001206 }
1207 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001208#ifdef TARGET_HAS_PRECISE_SMC
1209 if (current_tb_modified) {
1210 /* we generate a block containing just the instruction
1211 modifying the memory. It will ensure that it cannot modify
1212 itself */
bellardea1c1802004-06-14 18:56:36 +00001213 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001214 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001215 cpu_resume_from_signal(env, puc);
1216 }
1217#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001218}
bellard9fa3e852004-01-04 18:06:42 +00001219#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001220
1221/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001222static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001223 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001224{
1225 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001226#ifndef CONFIG_USER_ONLY
1227 bool page_already_protected;
1228#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001229
bellard9fa3e852004-01-04 18:06:42 +00001230 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001231 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001232 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001233#ifndef CONFIG_USER_ONLY
1234 page_already_protected = p->first_tb != NULL;
1235#endif
bellard9fa3e852004-01-04 18:06:42 +00001236 p->first_tb = (TranslationBlock *)((long)tb | n);
1237 invalidate_page_bitmap(p);
1238
bellard107db442004-06-22 18:48:46 +00001239#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001240
bellard9fa3e852004-01-04 18:06:42 +00001241#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001242 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001243 target_ulong addr;
1244 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001245 int prot;
1246
bellardfd6ce8f2003-05-14 19:00:11 +00001247 /* force the host page as non writable (writes will have a
1248 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001249 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001250 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001251 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1252 addr += TARGET_PAGE_SIZE) {
1253
1254 p2 = page_find (addr >> TARGET_PAGE_BITS);
1255 if (!p2)
1256 continue;
1257 prot |= p2->flags;
1258 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001259 }
ths5fafdf22007-09-16 21:08:06 +00001260 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001261 (prot & PAGE_BITS) & ~PAGE_WRITE);
1262#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001263 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001264 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001265#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001266 }
bellard9fa3e852004-01-04 18:06:42 +00001267#else
1268 /* if some code is already present, then the pages are already
1269 protected. So we handle the case where only the first TB is
1270 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001271 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001272 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001273 }
1274#endif
bellardd720b932004-04-25 17:57:43 +00001275
1276#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001277}
1278
bellard9fa3e852004-01-04 18:06:42 +00001279/* add a new TB and link it to the physical page tables. phys_page2 is
1280 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001281void tb_link_page(TranslationBlock *tb,
1282 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001283{
bellard9fa3e852004-01-04 18:06:42 +00001284 unsigned int h;
1285 TranslationBlock **ptb;
1286
pbrookc8a706f2008-06-02 16:16:42 +00001287 /* Grab the mmap lock to stop another thread invalidating this TB
1288 before we are done. */
1289 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001290 /* add in the physical hash table */
1291 h = tb_phys_hash_func(phys_pc);
1292 ptb = &tb_phys_hash[h];
1293 tb->phys_hash_next = *ptb;
1294 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001295
1296 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001297 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1298 if (phys_page2 != -1)
1299 tb_alloc_page(tb, 1, phys_page2);
1300 else
1301 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001302
bellardd4e81642003-05-25 16:46:15 +00001303 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1304 tb->jmp_next[0] = NULL;
1305 tb->jmp_next[1] = NULL;
1306
1307 /* init original jump addresses */
1308 if (tb->tb_next_offset[0] != 0xffff)
1309 tb_reset_jump(tb, 0);
1310 if (tb->tb_next_offset[1] != 0xffff)
1311 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001312
1313#ifdef DEBUG_TB_CHECK
1314 tb_page_check();
1315#endif
pbrookc8a706f2008-06-02 16:16:42 +00001316 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001317}
1318
bellarda513fe12003-05-27 23:29:48 +00001319/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1320 tb[1].tc_ptr. Return NULL if not found */
1321TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1322{
1323 int m_min, m_max, m;
1324 unsigned long v;
1325 TranslationBlock *tb;
1326
1327 if (nb_tbs <= 0)
1328 return NULL;
1329 if (tc_ptr < (unsigned long)code_gen_buffer ||
1330 tc_ptr >= (unsigned long)code_gen_ptr)
1331 return NULL;
1332 /* binary search (cf Knuth) */
1333 m_min = 0;
1334 m_max = nb_tbs - 1;
1335 while (m_min <= m_max) {
1336 m = (m_min + m_max) >> 1;
1337 tb = &tbs[m];
1338 v = (unsigned long)tb->tc_ptr;
1339 if (v == tc_ptr)
1340 return tb;
1341 else if (tc_ptr < v) {
1342 m_max = m - 1;
1343 } else {
1344 m_min = m + 1;
1345 }
ths5fafdf22007-09-16 21:08:06 +00001346 }
bellarda513fe12003-05-27 23:29:48 +00001347 return &tbs[m_max];
1348}
bellard75012672003-06-21 13:11:07 +00001349
bellardea041c02003-06-25 16:16:50 +00001350static void tb_reset_jump_recursive(TranslationBlock *tb);
1351
1352static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1353{
1354 TranslationBlock *tb1, *tb_next, **ptb;
1355 unsigned int n1;
1356
1357 tb1 = tb->jmp_next[n];
1358 if (tb1 != NULL) {
1359 /* find head of list */
1360 for(;;) {
1361 n1 = (long)tb1 & 3;
1362 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1363 if (n1 == 2)
1364 break;
1365 tb1 = tb1->jmp_next[n1];
1366 }
1367 /* we are now sure now that tb jumps to tb1 */
1368 tb_next = tb1;
1369
1370 /* remove tb from the jmp_first list */
1371 ptb = &tb_next->jmp_first;
1372 for(;;) {
1373 tb1 = *ptb;
1374 n1 = (long)tb1 & 3;
1375 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1376 if (n1 == n && tb1 == tb)
1377 break;
1378 ptb = &tb1->jmp_next[n1];
1379 }
1380 *ptb = tb->jmp_next[n];
1381 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001382
bellardea041c02003-06-25 16:16:50 +00001383 /* suppress the jump to next tb in generated code */
1384 tb_reset_jump(tb, n);
1385
bellard01243112004-01-04 15:48:17 +00001386 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001387 tb_reset_jump_recursive(tb_next);
1388 }
1389}
1390
1391static void tb_reset_jump_recursive(TranslationBlock *tb)
1392{
1393 tb_reset_jump_recursive2(tb, 0);
1394 tb_reset_jump_recursive2(tb, 1);
1395}
1396
bellard1fddef42005-04-17 19:16:13 +00001397#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001398#if defined(CONFIG_USER_ONLY)
1399static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1400{
1401 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1402}
1403#else
bellardd720b932004-04-25 17:57:43 +00001404static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1405{
Anthony Liguoric227f092009-10-01 16:12:16 -05001406 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001407 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001408 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001409 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001410
pbrookc2f07f82006-04-08 17:14:56 +00001411 addr = cpu_get_phys_page_debug(env, pc);
1412 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1413 if (!p) {
1414 pd = IO_MEM_UNASSIGNED;
1415 } else {
1416 pd = p->phys_offset;
1417 }
1418 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001419 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001420}
bellardc27004e2005-01-03 23:35:10 +00001421#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001422#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001423
Paul Brookc527ee82010-03-01 03:31:14 +00001424#if defined(CONFIG_USER_ONLY)
1425void cpu_watchpoint_remove_all(CPUState *env, int mask)
1426
1427{
1428}
1429
1430int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1431 int flags, CPUWatchpoint **watchpoint)
1432{
1433 return -ENOSYS;
1434}
1435#else
pbrook6658ffb2007-03-16 23:58:11 +00001436/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001437int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1438 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001439{
aliguorib4051332008-11-18 20:14:20 +00001440 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001441 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001442
aliguorib4051332008-11-18 20:14:20 +00001443 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1444 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1445 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1446 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1447 return -EINVAL;
1448 }
aliguoria1d1bb32008-11-18 20:07:32 +00001449 wp = qemu_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001450
aliguoria1d1bb32008-11-18 20:07:32 +00001451 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001452 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001453 wp->flags = flags;
1454
aliguori2dc9f412008-11-18 20:56:59 +00001455 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001456 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001457 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001458 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001459 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001460
pbrook6658ffb2007-03-16 23:58:11 +00001461 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001462
1463 if (watchpoint)
1464 *watchpoint = wp;
1465 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001466}
1467
aliguoria1d1bb32008-11-18 20:07:32 +00001468/* Remove a specific watchpoint. */
1469int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1470 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001471{
aliguorib4051332008-11-18 20:14:20 +00001472 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001473 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001474
Blue Swirl72cf2d42009-09-12 07:36:22 +00001475 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001476 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001477 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001478 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001479 return 0;
1480 }
1481 }
aliguoria1d1bb32008-11-18 20:07:32 +00001482 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001483}
1484
aliguoria1d1bb32008-11-18 20:07:32 +00001485/* Remove a specific watchpoint by reference. */
1486void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1487{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001488 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001489
aliguoria1d1bb32008-11-18 20:07:32 +00001490 tlb_flush_page(env, watchpoint->vaddr);
1491
1492 qemu_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001493}
1494
aliguoria1d1bb32008-11-18 20:07:32 +00001495/* Remove all matching watchpoints. */
1496void cpu_watchpoint_remove_all(CPUState *env, int mask)
1497{
aliguoric0ce9982008-11-25 22:13:57 +00001498 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001499
Blue Swirl72cf2d42009-09-12 07:36:22 +00001500 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001501 if (wp->flags & mask)
1502 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001503 }
aliguoria1d1bb32008-11-18 20:07:32 +00001504}
Paul Brookc527ee82010-03-01 03:31:14 +00001505#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001506
1507/* Add a breakpoint. */
1508int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1509 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001510{
bellard1fddef42005-04-17 19:16:13 +00001511#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001512 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001513
aliguoria1d1bb32008-11-18 20:07:32 +00001514 bp = qemu_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001515
1516 bp->pc = pc;
1517 bp->flags = flags;
1518
aliguori2dc9f412008-11-18 20:56:59 +00001519 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001520 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001521 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001522 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001523 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001524
1525 breakpoint_invalidate(env, pc);
1526
1527 if (breakpoint)
1528 *breakpoint = bp;
1529 return 0;
1530#else
1531 return -ENOSYS;
1532#endif
1533}
1534
1535/* Remove a specific breakpoint. */
1536int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1537{
1538#if defined(TARGET_HAS_ICE)
1539 CPUBreakpoint *bp;
1540
Blue Swirl72cf2d42009-09-12 07:36:22 +00001541 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001542 if (bp->pc == pc && bp->flags == flags) {
1543 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001544 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001545 }
bellard4c3a88a2003-07-26 12:06:08 +00001546 }
aliguoria1d1bb32008-11-18 20:07:32 +00001547 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001548#else
aliguoria1d1bb32008-11-18 20:07:32 +00001549 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001550#endif
1551}
1552
aliguoria1d1bb32008-11-18 20:07:32 +00001553/* Remove a specific breakpoint by reference. */
1554void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001555{
bellard1fddef42005-04-17 19:16:13 +00001556#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001557 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001558
aliguoria1d1bb32008-11-18 20:07:32 +00001559 breakpoint_invalidate(env, breakpoint->pc);
1560
1561 qemu_free(breakpoint);
1562#endif
1563}
1564
1565/* Remove all matching breakpoints. */
1566void cpu_breakpoint_remove_all(CPUState *env, int mask)
1567{
1568#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001569 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001570
Blue Swirl72cf2d42009-09-12 07:36:22 +00001571 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001572 if (bp->flags & mask)
1573 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001574 }
bellard4c3a88a2003-07-26 12:06:08 +00001575#endif
1576}
1577
bellardc33a3462003-07-29 20:50:33 +00001578/* enable or disable single step mode. EXCP_DEBUG is returned by the
1579 CPU loop after each instruction */
1580void cpu_single_step(CPUState *env, int enabled)
1581{
bellard1fddef42005-04-17 19:16:13 +00001582#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001583 if (env->singlestep_enabled != enabled) {
1584 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001585 if (kvm_enabled())
1586 kvm_update_guest_debug(env, 0);
1587 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001588 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001589 /* XXX: only flush what is necessary */
1590 tb_flush(env);
1591 }
bellardc33a3462003-07-29 20:50:33 +00001592 }
1593#endif
1594}
1595
bellard34865132003-10-05 14:28:56 +00001596/* enable or disable low levels log */
1597void cpu_set_log(int log_flags)
1598{
1599 loglevel = log_flags;
1600 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001601 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001602 if (!logfile) {
1603 perror(logfilename);
1604 _exit(1);
1605 }
bellard9fa3e852004-01-04 18:06:42 +00001606#if !defined(CONFIG_SOFTMMU)
1607 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1608 {
blueswir1b55266b2008-09-20 08:07:15 +00001609 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001610 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1611 }
Filip Navarabf65f532009-07-27 10:02:04 -05001612#elif !defined(_WIN32)
1613 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001614 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001615#endif
pbrooke735b912007-06-30 13:53:24 +00001616 log_append = 1;
1617 }
1618 if (!loglevel && logfile) {
1619 fclose(logfile);
1620 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001621 }
1622}
1623
1624void cpu_set_log_filename(const char *filename)
1625{
1626 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001627 if (logfile) {
1628 fclose(logfile);
1629 logfile = NULL;
1630 }
1631 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001632}
bellardc33a3462003-07-29 20:50:33 +00001633
aurel323098dba2009-03-07 21:28:24 +00001634static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001635{
pbrookd5975362008-06-07 20:50:51 +00001636 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1637 problem and hope the cpu will stop of its own accord. For userspace
1638 emulation this often isn't actually as bad as it sounds. Often
1639 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001640 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001641 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001642
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001643 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001644 tb = env->current_tb;
1645 /* if the cpu is currently executing code, we must unlink it and
1646 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001647 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001648 env->current_tb = NULL;
1649 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001650 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001651 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001652}
1653
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001654#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001655/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001656static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001657{
1658 int old_mask;
1659
1660 old_mask = env->interrupt_request;
1661 env->interrupt_request |= mask;
1662
aliguori8edac962009-04-24 18:03:45 +00001663 /*
1664 * If called from iothread context, wake the target cpu in
1665 * case its halted.
1666 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001667 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001668 qemu_cpu_kick(env);
1669 return;
1670 }
aliguori8edac962009-04-24 18:03:45 +00001671
pbrook2e70f6e2008-06-29 01:03:05 +00001672 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001673 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001674 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001675 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001676 cpu_abort(env, "Raised interrupt while not in I/O function");
1677 }
pbrook2e70f6e2008-06-29 01:03:05 +00001678 } else {
aurel323098dba2009-03-07 21:28:24 +00001679 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001680 }
1681}
1682
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001683CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1684
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001685#else /* CONFIG_USER_ONLY */
1686
1687void cpu_interrupt(CPUState *env, int mask)
1688{
1689 env->interrupt_request |= mask;
1690 cpu_unlink_tb(env);
1691}
1692#endif /* CONFIG_USER_ONLY */
1693
bellardb54ad042004-05-20 13:42:52 +00001694void cpu_reset_interrupt(CPUState *env, int mask)
1695{
1696 env->interrupt_request &= ~mask;
1697}
1698
aurel323098dba2009-03-07 21:28:24 +00001699void cpu_exit(CPUState *env)
1700{
1701 env->exit_request = 1;
1702 cpu_unlink_tb(env);
1703}
1704
blueswir1c7cd6a32008-10-02 18:27:46 +00001705const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001706 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001707 "show generated host assembly code for each compiled TB" },
1708 { CPU_LOG_TB_IN_ASM, "in_asm",
1709 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001710 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001711 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001712 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001713 "show micro ops "
1714#ifdef TARGET_I386
1715 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001716#endif
blueswir1e01a1152008-03-14 17:37:11 +00001717 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001718 { CPU_LOG_INT, "int",
1719 "show interrupts/exceptions in short format" },
1720 { CPU_LOG_EXEC, "exec",
1721 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001722 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001723 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001724#ifdef TARGET_I386
1725 { CPU_LOG_PCALL, "pcall",
1726 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001727 { CPU_LOG_RESET, "cpu_reset",
1728 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001729#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001730#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001731 { CPU_LOG_IOPORT, "ioport",
1732 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001733#endif
bellardf193c792004-03-21 17:06:25 +00001734 { 0, NULL, NULL },
1735};
1736
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001737#ifndef CONFIG_USER_ONLY
1738static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1739 = QLIST_HEAD_INITIALIZER(memory_client_list);
1740
1741static void cpu_notify_set_memory(target_phys_addr_t start_addr,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001742 ram_addr_t size,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001743 ram_addr_t phys_offset,
1744 bool log_dirty)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001745{
1746 CPUPhysMemoryClient *client;
1747 QLIST_FOREACH(client, &memory_client_list, list) {
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001748 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001749 }
1750}
1751
1752static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001753 target_phys_addr_t end)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001754{
1755 CPUPhysMemoryClient *client;
1756 QLIST_FOREACH(client, &memory_client_list, list) {
1757 int r = client->sync_dirty_bitmap(client, start, end);
1758 if (r < 0)
1759 return r;
1760 }
1761 return 0;
1762}
1763
1764static int cpu_notify_migration_log(int enable)
1765{
1766 CPUPhysMemoryClient *client;
1767 QLIST_FOREACH(client, &memory_client_list, list) {
1768 int r = client->migration_log(client, enable);
1769 if (r < 0)
1770 return r;
1771 }
1772 return 0;
1773}
1774
Alex Williamson2173a752011-05-03 12:36:58 -06001775struct last_map {
1776 target_phys_addr_t start_addr;
1777 ram_addr_t size;
1778 ram_addr_t phys_offset;
1779};
1780
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001781/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1782 * address. Each intermediate table provides the next L2_BITs of guest
1783 * physical address space. The number of levels vary based on host and
1784 * guest configuration, making it efficient to build the final guest
1785 * physical address by seeding the L1 offset and shifting and adding in
1786 * each L2 offset as we recurse through them. */
Alex Williamson2173a752011-05-03 12:36:58 -06001787static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1788 void **lp, target_phys_addr_t addr,
1789 struct last_map *map)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001790{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001791 int i;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001792
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001793 if (*lp == NULL) {
1794 return;
1795 }
1796 if (level == 0) {
1797 PhysPageDesc *pd = *lp;
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001798 addr <<= L2_BITS + TARGET_PAGE_BITS;
Paul Brook7296aba2010-03-14 14:58:46 +00001799 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001800 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
Alex Williamson2173a752011-05-03 12:36:58 -06001801 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1802
1803 if (map->size &&
1804 start_addr == map->start_addr + map->size &&
1805 pd[i].phys_offset == map->phys_offset + map->size) {
1806
1807 map->size += TARGET_PAGE_SIZE;
1808 continue;
1809 } else if (map->size) {
1810 client->set_memory(client, map->start_addr,
1811 map->size, map->phys_offset, false);
1812 }
1813
1814 map->start_addr = start_addr;
1815 map->size = TARGET_PAGE_SIZE;
1816 map->phys_offset = pd[i].phys_offset;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001817 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001818 }
1819 } else {
1820 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001821 for (i = 0; i < L2_SIZE; ++i) {
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001822 phys_page_for_each_1(client, level - 1, pp + i,
Alex Williamson2173a752011-05-03 12:36:58 -06001823 (addr << L2_BITS) | i, map);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001824 }
1825 }
1826}
1827
1828static void phys_page_for_each(CPUPhysMemoryClient *client)
1829{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001830 int i;
Alex Williamson2173a752011-05-03 12:36:58 -06001831 struct last_map map = { };
1832
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001833 for (i = 0; i < P_L1_SIZE; ++i) {
1834 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
Alex Williamson2173a752011-05-03 12:36:58 -06001835 l1_phys_map + i, i, &map);
1836 }
1837 if (map.size) {
1838 client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1839 false);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001840 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001841}
1842
1843void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1844{
1845 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1846 phys_page_for_each(client);
1847}
1848
1849void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1850{
1851 QLIST_REMOVE(client, list);
1852}
1853#endif
1854
bellardf193c792004-03-21 17:06:25 +00001855static int cmp1(const char *s1, int n, const char *s2)
1856{
1857 if (strlen(s2) != n)
1858 return 0;
1859 return memcmp(s1, s2, n) == 0;
1860}
ths3b46e622007-09-17 08:09:54 +00001861
bellardf193c792004-03-21 17:06:25 +00001862/* takes a comma separated list of log masks. Return 0 if error. */
1863int cpu_str_to_log_mask(const char *str)
1864{
blueswir1c7cd6a32008-10-02 18:27:46 +00001865 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001866 int mask;
1867 const char *p, *p1;
1868
1869 p = str;
1870 mask = 0;
1871 for(;;) {
1872 p1 = strchr(p, ',');
1873 if (!p1)
1874 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001875 if(cmp1(p,p1-p,"all")) {
1876 for(item = cpu_log_items; item->mask != 0; item++) {
1877 mask |= item->mask;
1878 }
1879 } else {
1880 for(item = cpu_log_items; item->mask != 0; item++) {
1881 if (cmp1(p, p1 - p, item->name))
1882 goto found;
1883 }
1884 return 0;
bellardf193c792004-03-21 17:06:25 +00001885 }
bellardf193c792004-03-21 17:06:25 +00001886 found:
1887 mask |= item->mask;
1888 if (*p1 != ',')
1889 break;
1890 p = p1 + 1;
1891 }
1892 return mask;
1893}
bellardea041c02003-06-25 16:16:50 +00001894
bellard75012672003-06-21 13:11:07 +00001895void cpu_abort(CPUState *env, const char *fmt, ...)
1896{
1897 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001898 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001899
1900 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001901 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001902 fprintf(stderr, "qemu: fatal: ");
1903 vfprintf(stderr, fmt, ap);
1904 fprintf(stderr, "\n");
1905#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001906 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1907#else
1908 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001909#endif
aliguori93fcfe32009-01-15 22:34:14 +00001910 if (qemu_log_enabled()) {
1911 qemu_log("qemu: fatal: ");
1912 qemu_log_vprintf(fmt, ap2);
1913 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001914#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001915 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001916#else
aliguori93fcfe32009-01-15 22:34:14 +00001917 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001918#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001919 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001920 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001921 }
pbrook493ae1f2007-11-23 16:53:59 +00001922 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001923 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001924#if defined(CONFIG_USER_ONLY)
1925 {
1926 struct sigaction act;
1927 sigfillset(&act.sa_mask);
1928 act.sa_handler = SIG_DFL;
1929 sigaction(SIGABRT, &act, NULL);
1930 }
1931#endif
bellard75012672003-06-21 13:11:07 +00001932 abort();
1933}
1934
thsc5be9f02007-02-28 20:20:53 +00001935CPUState *cpu_copy(CPUState *env)
1936{
ths01ba9812007-12-09 02:22:57 +00001937 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001938 CPUState *next_cpu = new_env->next_cpu;
1939 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001940#if defined(TARGET_HAS_ICE)
1941 CPUBreakpoint *bp;
1942 CPUWatchpoint *wp;
1943#endif
1944
thsc5be9f02007-02-28 20:20:53 +00001945 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001946
1947 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001948 new_env->next_cpu = next_cpu;
1949 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001950
1951 /* Clone all break/watchpoints.
1952 Note: Once we support ptrace with hw-debug register access, make sure
1953 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001954 QTAILQ_INIT(&env->breakpoints);
1955 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001956#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001957 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001958 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1959 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001960 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001961 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1962 wp->flags, NULL);
1963 }
1964#endif
1965
thsc5be9f02007-02-28 20:20:53 +00001966 return new_env;
1967}
1968
bellard01243112004-01-04 15:48:17 +00001969#if !defined(CONFIG_USER_ONLY)
1970
edgar_igl5c751e92008-05-06 08:44:21 +00001971static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1972{
1973 unsigned int i;
1974
1975 /* Discard jump cache entries for any tb which might potentially
1976 overlap the flushed page. */
1977 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1978 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001979 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001980
1981 i = tb_jmp_cache_hash_page(addr);
1982 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001983 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001984}
1985
Igor Kovalenko08738982009-07-12 02:15:40 +04001986static CPUTLBEntry s_cputlb_empty_entry = {
1987 .addr_read = -1,
1988 .addr_write = -1,
1989 .addr_code = -1,
1990 .addend = -1,
1991};
1992
bellardee8b7022004-02-03 23:35:10 +00001993/* NOTE: if flush_global is true, also flush global entries (not
1994 implemented yet) */
1995void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001996{
bellard33417e72003-08-10 21:47:01 +00001997 int i;
bellard01243112004-01-04 15:48:17 +00001998
bellard9fa3e852004-01-04 18:06:42 +00001999#if defined(DEBUG_TLB)
2000 printf("tlb_flush:\n");
2001#endif
bellard01243112004-01-04 15:48:17 +00002002 /* must reset current TB so that interrupts cannot modify the
2003 links while we are modifying them */
2004 env->current_tb = NULL;
2005
bellard33417e72003-08-10 21:47:01 +00002006 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002007 int mmu_idx;
2008 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04002009 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002010 }
bellard33417e72003-08-10 21:47:01 +00002011 }
bellard9fa3e852004-01-04 18:06:42 +00002012
bellard8a40a182005-11-20 10:35:40 +00002013 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00002014
Paul Brookd4c430a2010-03-17 02:14:28 +00002015 env->tlb_flush_addr = -1;
2016 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00002017 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00002018}
2019
bellard274da6b2004-05-20 21:56:27 +00002020static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00002021{
ths5fafdf22007-09-16 21:08:06 +00002022 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00002023 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002024 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00002025 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002026 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00002027 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04002028 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00002029 }
bellard61382a52003-10-27 21:22:23 +00002030}
2031
bellard2e126692004-04-25 21:28:44 +00002032void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00002033{
bellard8a40a182005-11-20 10:35:40 +00002034 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002035 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00002036
bellard9fa3e852004-01-04 18:06:42 +00002037#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00002038 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00002039#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00002040 /* Check if we need to flush due to large pages. */
2041 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2042#if defined(DEBUG_TLB)
2043 printf("tlb_flush_page: forced full flush ("
2044 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2045 env->tlb_flush_addr, env->tlb_flush_mask);
2046#endif
2047 tlb_flush(env, 1);
2048 return;
2049 }
bellard01243112004-01-04 15:48:17 +00002050 /* must reset current TB so that interrupts cannot modify the
2051 links while we are modifying them */
2052 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002053
bellard61382a52003-10-27 21:22:23 +00002054 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002055 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002056 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2057 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002058
edgar_igl5c751e92008-05-06 08:44:21 +00002059 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002060}
2061
bellard9fa3e852004-01-04 18:06:42 +00002062/* update the TLBs so that writes to code in the virtual page 'addr'
2063 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002064static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002065{
ths5fafdf22007-09-16 21:08:06 +00002066 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002067 ram_addr + TARGET_PAGE_SIZE,
2068 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002069}
2070
bellard9fa3e852004-01-04 18:06:42 +00002071/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002072 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002073static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002074 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002075{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002076 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002077}
2078
ths5fafdf22007-09-16 21:08:06 +00002079static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002080 unsigned long start, unsigned long length)
2081{
2082 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00002083 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2084 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002085 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002086 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002087 }
2088 }
2089}
2090
pbrook5579c7f2009-04-11 14:47:08 +00002091/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002092void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002093 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002094{
2095 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002096 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002097 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002098
2099 start &= TARGET_PAGE_MASK;
2100 end = TARGET_PAGE_ALIGN(end);
2101
2102 length = end - start;
2103 if (length == 0)
2104 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002105 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002106
bellard1ccde1c2004-02-06 19:46:14 +00002107 /* we modify the TLB cache so that the dirty bit will be set again
2108 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002109 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002110 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002111 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002112 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002113 != (end - 1) - start) {
2114 abort();
2115 }
2116
bellard6a00d602005-11-21 23:25:50 +00002117 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002118 int mmu_idx;
2119 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2120 for(i = 0; i < CPU_TLB_SIZE; i++)
2121 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2122 start1, length);
2123 }
bellard6a00d602005-11-21 23:25:50 +00002124 }
bellard1ccde1c2004-02-06 19:46:14 +00002125}
2126
aliguori74576192008-10-06 14:02:03 +00002127int cpu_physical_memory_set_dirty_tracking(int enable)
2128{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002129 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002130 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002131 ret = cpu_notify_migration_log(!!enable);
2132 return ret;
aliguori74576192008-10-06 14:02:03 +00002133}
2134
2135int cpu_physical_memory_get_dirty_tracking(void)
2136{
2137 return in_migration;
2138}
2139
Anthony Liguoric227f092009-10-01 16:12:16 -05002140int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2141 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00002142{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02002143 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02002144
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002145 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02002146 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00002147}
2148
Anthony PERARDe5896b12011-02-07 12:19:23 +01002149int cpu_physical_log_start(target_phys_addr_t start_addr,
2150 ram_addr_t size)
2151{
2152 CPUPhysMemoryClient *client;
2153 QLIST_FOREACH(client, &memory_client_list, list) {
2154 if (client->log_start) {
2155 int r = client->log_start(client, start_addr, size);
2156 if (r < 0) {
2157 return r;
2158 }
2159 }
2160 }
2161 return 0;
2162}
2163
2164int cpu_physical_log_stop(target_phys_addr_t start_addr,
2165 ram_addr_t size)
2166{
2167 CPUPhysMemoryClient *client;
2168 QLIST_FOREACH(client, &memory_client_list, list) {
2169 if (client->log_stop) {
2170 int r = client->log_stop(client, start_addr, size);
2171 if (r < 0) {
2172 return r;
2173 }
2174 }
2175 }
2176 return 0;
2177}
2178
bellard3a7d9292005-08-21 09:26:42 +00002179static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2180{
Anthony Liguoric227f092009-10-01 16:12:16 -05002181 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002182 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002183
bellard84b7b8e2005-11-28 21:19:04 +00002184 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002185 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2186 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002187 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002188 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002189 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002190 }
2191 }
2192}
2193
2194/* update the TLB according to the current state of the dirty bits */
2195void cpu_tlb_update_dirty(CPUState *env)
2196{
2197 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002198 int mmu_idx;
2199 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2200 for(i = 0; i < CPU_TLB_SIZE; i++)
2201 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2202 }
bellard3a7d9292005-08-21 09:26:42 +00002203}
2204
pbrook0f459d12008-06-09 00:20:13 +00002205static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002206{
pbrook0f459d12008-06-09 00:20:13 +00002207 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2208 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002209}
2210
pbrook0f459d12008-06-09 00:20:13 +00002211/* update the TLB corresponding to virtual page vaddr
2212 so that it is no longer dirty */
2213static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002214{
bellard1ccde1c2004-02-06 19:46:14 +00002215 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002216 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002217
pbrook0f459d12008-06-09 00:20:13 +00002218 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002219 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002220 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2221 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002222}
2223
Paul Brookd4c430a2010-03-17 02:14:28 +00002224/* Our TLB does not support large pages, so remember the area covered by
2225 large pages and trigger a full TLB flush if these are invalidated. */
2226static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2227 target_ulong size)
2228{
2229 target_ulong mask = ~(size - 1);
2230
2231 if (env->tlb_flush_addr == (target_ulong)-1) {
2232 env->tlb_flush_addr = vaddr & mask;
2233 env->tlb_flush_mask = mask;
2234 return;
2235 }
2236 /* Extend the existing region to include the new page.
2237 This is a compromise between unnecessary flushes and the cost
2238 of maintaining a full variable size TLB. */
2239 mask &= env->tlb_flush_mask;
2240 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2241 mask <<= 1;
2242 }
2243 env->tlb_flush_addr &= mask;
2244 env->tlb_flush_mask = mask;
2245}
2246
2247/* Add a new TLB entry. At most one entry for a given virtual address
2248 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2249 supplied size is only used by tlb_flush_page. */
2250void tlb_set_page(CPUState *env, target_ulong vaddr,
2251 target_phys_addr_t paddr, int prot,
2252 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002253{
bellard92e873b2004-05-21 14:52:29 +00002254 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002255 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002256 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002257 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002258 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002259 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002260 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002261 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002262 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002263
Paul Brookd4c430a2010-03-17 02:14:28 +00002264 assert(size >= TARGET_PAGE_SIZE);
2265 if (size != TARGET_PAGE_SIZE) {
2266 tlb_add_large_page(env, vaddr, size);
2267 }
bellard92e873b2004-05-21 14:52:29 +00002268 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002269 if (!p) {
2270 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002271 } else {
2272 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002273 }
2274#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002275 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2276 " prot=%x idx=%d pd=0x%08lx\n",
2277 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002278#endif
2279
pbrook0f459d12008-06-09 00:20:13 +00002280 address = vaddr;
2281 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2282 /* IO memory case (romd handled later) */
2283 address |= TLB_MMIO;
2284 }
pbrook5579c7f2009-04-11 14:47:08 +00002285 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002286 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2287 /* Normal RAM. */
2288 iotlb = pd & TARGET_PAGE_MASK;
2289 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2290 iotlb |= IO_MEM_NOTDIRTY;
2291 else
2292 iotlb |= IO_MEM_ROM;
2293 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002294 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002295 It would be nice to pass an offset from the base address
2296 of that region. This would avoid having to special case RAM,
2297 and avoid full address decoding in every device.
2298 We can't use the high bits of pd for this because
2299 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002300 iotlb = (pd & ~TARGET_PAGE_MASK);
2301 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002302 iotlb += p->region_offset;
2303 } else {
2304 iotlb += paddr;
2305 }
pbrook0f459d12008-06-09 00:20:13 +00002306 }
pbrook6658ffb2007-03-16 23:58:11 +00002307
pbrook0f459d12008-06-09 00:20:13 +00002308 code_address = address;
2309 /* Make accesses to pages with watchpoints go via the
2310 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002311 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002312 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002313 /* Avoid trapping reads of pages with a write breakpoint. */
2314 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2315 iotlb = io_mem_watch + paddr;
2316 address |= TLB_MMIO;
2317 break;
2318 }
pbrook6658ffb2007-03-16 23:58:11 +00002319 }
pbrook0f459d12008-06-09 00:20:13 +00002320 }
balrogd79acba2007-06-26 20:01:13 +00002321
pbrook0f459d12008-06-09 00:20:13 +00002322 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2323 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2324 te = &env->tlb_table[mmu_idx][index];
2325 te->addend = addend - vaddr;
2326 if (prot & PAGE_READ) {
2327 te->addr_read = address;
2328 } else {
2329 te->addr_read = -1;
2330 }
edgar_igl5c751e92008-05-06 08:44:21 +00002331
pbrook0f459d12008-06-09 00:20:13 +00002332 if (prot & PAGE_EXEC) {
2333 te->addr_code = code_address;
2334 } else {
2335 te->addr_code = -1;
2336 }
2337 if (prot & PAGE_WRITE) {
2338 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2339 (pd & IO_MEM_ROMD)) {
2340 /* Write access calls the I/O callback. */
2341 te->addr_write = address | TLB_MMIO;
2342 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2343 !cpu_physical_memory_is_dirty(pd)) {
2344 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002345 } else {
pbrook0f459d12008-06-09 00:20:13 +00002346 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002347 }
pbrook0f459d12008-06-09 00:20:13 +00002348 } else {
2349 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002350 }
bellard9fa3e852004-01-04 18:06:42 +00002351}
2352
bellard01243112004-01-04 15:48:17 +00002353#else
2354
bellardee8b7022004-02-03 23:35:10 +00002355void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002356{
2357}
2358
bellard2e126692004-04-25 21:28:44 +00002359void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002360{
2361}
2362
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002363/*
2364 * Walks guest process memory "regions" one by one
2365 * and calls callback function 'fn' for each region.
2366 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002367
2368struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002369{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002370 walk_memory_regions_fn fn;
2371 void *priv;
2372 unsigned long start;
2373 int prot;
2374};
bellard9fa3e852004-01-04 18:06:42 +00002375
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002376static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002377 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002378{
2379 if (data->start != -1ul) {
2380 int rc = data->fn(data->priv, data->start, end, data->prot);
2381 if (rc != 0) {
2382 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002383 }
bellard33417e72003-08-10 21:47:01 +00002384 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002385
2386 data->start = (new_prot ? end : -1ul);
2387 data->prot = new_prot;
2388
2389 return 0;
2390}
2391
2392static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002393 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002394{
Paul Brookb480d9b2010-03-12 23:23:29 +00002395 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002396 int i, rc;
2397
2398 if (*lp == NULL) {
2399 return walk_memory_regions_end(data, base, 0);
2400 }
2401
2402 if (level == 0) {
2403 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002404 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002405 int prot = pd[i].flags;
2406
2407 pa = base | (i << TARGET_PAGE_BITS);
2408 if (prot != data->prot) {
2409 rc = walk_memory_regions_end(data, pa, prot);
2410 if (rc != 0) {
2411 return rc;
2412 }
2413 }
2414 }
2415 } else {
2416 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002417 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002418 pa = base | ((abi_ulong)i <<
2419 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002420 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2421 if (rc != 0) {
2422 return rc;
2423 }
2424 }
2425 }
2426
2427 return 0;
2428}
2429
2430int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2431{
2432 struct walk_memory_regions_data data;
2433 unsigned long i;
2434
2435 data.fn = fn;
2436 data.priv = priv;
2437 data.start = -1ul;
2438 data.prot = 0;
2439
2440 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002441 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002442 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2443 if (rc != 0) {
2444 return rc;
2445 }
2446 }
2447
2448 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002449}
2450
Paul Brookb480d9b2010-03-12 23:23:29 +00002451static int dump_region(void *priv, abi_ulong start,
2452 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002453{
2454 FILE *f = (FILE *)priv;
2455
Paul Brookb480d9b2010-03-12 23:23:29 +00002456 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2457 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002458 start, end, end - start,
2459 ((prot & PAGE_READ) ? 'r' : '-'),
2460 ((prot & PAGE_WRITE) ? 'w' : '-'),
2461 ((prot & PAGE_EXEC) ? 'x' : '-'));
2462
2463 return (0);
2464}
2465
2466/* dump memory mappings */
2467void page_dump(FILE *f)
2468{
2469 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2470 "start", "end", "size", "prot");
2471 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002472}
2473
pbrook53a59602006-03-25 19:31:22 +00002474int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002475{
bellard9fa3e852004-01-04 18:06:42 +00002476 PageDesc *p;
2477
2478 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002479 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002480 return 0;
2481 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002482}
2483
Richard Henderson376a7902010-03-10 15:57:04 -08002484/* Modify the flags of a page and invalidate the code if necessary.
2485 The flag PAGE_WRITE_ORG is positioned automatically depending
2486 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002487void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002488{
Richard Henderson376a7902010-03-10 15:57:04 -08002489 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002490
Richard Henderson376a7902010-03-10 15:57:04 -08002491 /* This function should never be called with addresses outside the
2492 guest address space. If this assert fires, it probably indicates
2493 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002494#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2495 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002496#endif
2497 assert(start < end);
2498
bellard9fa3e852004-01-04 18:06:42 +00002499 start = start & TARGET_PAGE_MASK;
2500 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002501
2502 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002503 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002504 }
2505
2506 for (addr = start, len = end - start;
2507 len != 0;
2508 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2509 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2510
2511 /* If the write protection bit is set, then we invalidate
2512 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002513 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002514 (flags & PAGE_WRITE) &&
2515 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002516 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002517 }
2518 p->flags = flags;
2519 }
bellard9fa3e852004-01-04 18:06:42 +00002520}
2521
ths3d97b402007-11-02 19:02:07 +00002522int page_check_range(target_ulong start, target_ulong len, int flags)
2523{
2524 PageDesc *p;
2525 target_ulong end;
2526 target_ulong addr;
2527
Richard Henderson376a7902010-03-10 15:57:04 -08002528 /* This function should never be called with addresses outside the
2529 guest address space. If this assert fires, it probably indicates
2530 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002531#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2532 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002533#endif
2534
Richard Henderson3e0650a2010-03-29 10:54:42 -07002535 if (len == 0) {
2536 return 0;
2537 }
Richard Henderson376a7902010-03-10 15:57:04 -08002538 if (start + len - 1 < start) {
2539 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002540 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002541 }
balrog55f280c2008-10-28 10:24:11 +00002542
ths3d97b402007-11-02 19:02:07 +00002543 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2544 start = start & TARGET_PAGE_MASK;
2545
Richard Henderson376a7902010-03-10 15:57:04 -08002546 for (addr = start, len = end - start;
2547 len != 0;
2548 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002549 p = page_find(addr >> TARGET_PAGE_BITS);
2550 if( !p )
2551 return -1;
2552 if( !(p->flags & PAGE_VALID) )
2553 return -1;
2554
bellarddae32702007-11-14 10:51:00 +00002555 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002556 return -1;
bellarddae32702007-11-14 10:51:00 +00002557 if (flags & PAGE_WRITE) {
2558 if (!(p->flags & PAGE_WRITE_ORG))
2559 return -1;
2560 /* unprotect the page if it was put read-only because it
2561 contains translated code */
2562 if (!(p->flags & PAGE_WRITE)) {
2563 if (!page_unprotect(addr, 0, NULL))
2564 return -1;
2565 }
2566 return 0;
2567 }
ths3d97b402007-11-02 19:02:07 +00002568 }
2569 return 0;
2570}
2571
bellard9fa3e852004-01-04 18:06:42 +00002572/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002573 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002574int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002575{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002576 unsigned int prot;
2577 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002578 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002579
pbrookc8a706f2008-06-02 16:16:42 +00002580 /* Technically this isn't safe inside a signal handler. However we
2581 know this only ever happens in a synchronous SEGV handler, so in
2582 practice it seems to be ok. */
2583 mmap_lock();
2584
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002585 p = page_find(address >> TARGET_PAGE_BITS);
2586 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002587 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002588 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002589 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002590
bellard9fa3e852004-01-04 18:06:42 +00002591 /* if the page was really writable, then we change its
2592 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002593 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2594 host_start = address & qemu_host_page_mask;
2595 host_end = host_start + qemu_host_page_size;
2596
2597 prot = 0;
2598 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2599 p = page_find(addr >> TARGET_PAGE_BITS);
2600 p->flags |= PAGE_WRITE;
2601 prot |= p->flags;
2602
bellard9fa3e852004-01-04 18:06:42 +00002603 /* and since the content will be modified, we must invalidate
2604 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002605 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002606#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002607 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002608#endif
bellard9fa3e852004-01-04 18:06:42 +00002609 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002610 mprotect((void *)g2h(host_start), qemu_host_page_size,
2611 prot & PAGE_BITS);
2612
2613 mmap_unlock();
2614 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002615 }
pbrookc8a706f2008-06-02 16:16:42 +00002616 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002617 return 0;
2618}
2619
bellard6a00d602005-11-21 23:25:50 +00002620static inline void tlb_set_dirty(CPUState *env,
2621 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002622{
2623}
bellard9fa3e852004-01-04 18:06:42 +00002624#endif /* defined(CONFIG_USER_ONLY) */
2625
pbrooke2eef172008-06-08 01:09:01 +00002626#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002627
Paul Brookc04b2b72010-03-01 03:31:14 +00002628#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2629typedef struct subpage_t {
2630 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002631 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2632 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002633} subpage_t;
2634
Anthony Liguoric227f092009-10-01 16:12:16 -05002635static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2636 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002637static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2638 ram_addr_t orig_memory,
2639 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002640#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2641 need_subpage) \
2642 do { \
2643 if (addr > start_addr) \
2644 start_addr2 = 0; \
2645 else { \
2646 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2647 if (start_addr2 > 0) \
2648 need_subpage = 1; \
2649 } \
2650 \
blueswir149e9fba2007-05-30 17:25:06 +00002651 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002652 end_addr2 = TARGET_PAGE_SIZE - 1; \
2653 else { \
2654 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2655 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2656 need_subpage = 1; \
2657 } \
2658 } while (0)
2659
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002660/* register physical memory.
2661 For RAM, 'size' must be a multiple of the target page size.
2662 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002663 io memory page. The address used when calling the IO function is
2664 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002665 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002666 before calculating this offset. This should not be a problem unless
2667 the low bits of start_addr and region_offset differ. */
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002668void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002669 ram_addr_t size,
2670 ram_addr_t phys_offset,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002671 ram_addr_t region_offset,
2672 bool log_dirty)
bellard33417e72003-08-10 21:47:01 +00002673{
Anthony Liguoric227f092009-10-01 16:12:16 -05002674 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002675 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002676 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002677 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002678 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002679
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002680 assert(size);
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002681 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002682
pbrook67c4d232009-02-23 13:16:07 +00002683 if (phys_offset == IO_MEM_UNASSIGNED) {
2684 region_offset = start_addr;
2685 }
pbrook8da3ff12008-12-01 18:59:50 +00002686 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002687 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002688 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002689
2690 addr = start_addr;
2691 do {
blueswir1db7b5422007-05-26 17:36:03 +00002692 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2693 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002694 ram_addr_t orig_memory = p->phys_offset;
2695 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002696 int need_subpage = 0;
2697
2698 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2699 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002700 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002701 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2702 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002703 &p->phys_offset, orig_memory,
2704 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002705 } else {
2706 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2707 >> IO_MEM_SHIFT];
2708 }
pbrook8da3ff12008-12-01 18:59:50 +00002709 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2710 region_offset);
2711 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002712 } else {
2713 p->phys_offset = phys_offset;
2714 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2715 (phys_offset & IO_MEM_ROMD))
2716 phys_offset += TARGET_PAGE_SIZE;
2717 }
2718 } else {
2719 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2720 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002721 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002722 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002723 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002724 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002725 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002726 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002727 int need_subpage = 0;
2728
2729 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2730 end_addr2, need_subpage);
2731
Richard Hendersonf6405242010-04-22 16:47:31 -07002732 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002733 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002734 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002735 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002736 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002737 phys_offset, region_offset);
2738 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002739 }
2740 }
2741 }
pbrook8da3ff12008-12-01 18:59:50 +00002742 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002743 addr += TARGET_PAGE_SIZE;
2744 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002745
bellard9d420372006-06-25 22:25:22 +00002746 /* since each CPU stores ram addresses in its TLB cache, we must
2747 reset the modified entries */
2748 /* XXX: slow ! */
2749 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2750 tlb_flush(env, 1);
2751 }
bellard33417e72003-08-10 21:47:01 +00002752}
2753
bellardba863452006-09-24 18:41:10 +00002754/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002755ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002756{
2757 PhysPageDesc *p;
2758
2759 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2760 if (!p)
2761 return IO_MEM_UNASSIGNED;
2762 return p->phys_offset;
2763}
2764
Anthony Liguoric227f092009-10-01 16:12:16 -05002765void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002766{
2767 if (kvm_enabled())
2768 kvm_coalesce_mmio_region(addr, size);
2769}
2770
Anthony Liguoric227f092009-10-01 16:12:16 -05002771void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002772{
2773 if (kvm_enabled())
2774 kvm_uncoalesce_mmio_region(addr, size);
2775}
2776
Sheng Yang62a27442010-01-26 19:21:16 +08002777void qemu_flush_coalesced_mmio_buffer(void)
2778{
2779 if (kvm_enabled())
2780 kvm_flush_coalesced_mmio_buffer();
2781}
2782
Marcelo Tosattic9027602010-03-01 20:25:08 -03002783#if defined(__linux__) && !defined(TARGET_S390X)
2784
2785#include <sys/vfs.h>
2786
2787#define HUGETLBFS_MAGIC 0x958458f6
2788
2789static long gethugepagesize(const char *path)
2790{
2791 struct statfs fs;
2792 int ret;
2793
2794 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002795 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002796 } while (ret != 0 && errno == EINTR);
2797
2798 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002799 perror(path);
2800 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002801 }
2802
2803 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002804 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002805
2806 return fs.f_bsize;
2807}
2808
Alex Williamson04b16652010-07-02 11:13:17 -06002809static void *file_ram_alloc(RAMBlock *block,
2810 ram_addr_t memory,
2811 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002812{
2813 char *filename;
2814 void *area;
2815 int fd;
2816#ifdef MAP_POPULATE
2817 int flags;
2818#endif
2819 unsigned long hpagesize;
2820
2821 hpagesize = gethugepagesize(path);
2822 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002823 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002824 }
2825
2826 if (memory < hpagesize) {
2827 return NULL;
2828 }
2829
2830 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2831 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2832 return NULL;
2833 }
2834
2835 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002836 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002837 }
2838
2839 fd = mkstemp(filename);
2840 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002841 perror("unable to create backing store for hugepages");
2842 free(filename);
2843 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002844 }
2845 unlink(filename);
2846 free(filename);
2847
2848 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2849
2850 /*
2851 * ftruncate is not supported by hugetlbfs in older
2852 * hosts, so don't bother bailing out on errors.
2853 * If anything goes wrong with it under other filesystems,
2854 * mmap will fail.
2855 */
2856 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002857 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002858
2859#ifdef MAP_POPULATE
2860 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2861 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2862 * to sidestep this quirk.
2863 */
2864 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2865 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2866#else
2867 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2868#endif
2869 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002870 perror("file_ram_alloc: can't mmap RAM pages");
2871 close(fd);
2872 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002873 }
Alex Williamson04b16652010-07-02 11:13:17 -06002874 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002875 return area;
2876}
2877#endif
2878
Alex Williamsond17b5282010-06-25 11:08:38 -06002879static ram_addr_t find_ram_offset(ram_addr_t size)
2880{
Alex Williamson04b16652010-07-02 11:13:17 -06002881 RAMBlock *block, *next_block;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002882 ram_addr_t offset = 0, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002883
2884 if (QLIST_EMPTY(&ram_list.blocks))
2885 return 0;
2886
2887 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002888 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002889
2890 end = block->offset + block->length;
2891
2892 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2893 if (next_block->offset >= end) {
2894 next = MIN(next, next_block->offset);
2895 }
2896 }
2897 if (next - end >= size && next - end < mingap) {
2898 offset = end;
2899 mingap = next - end;
2900 }
2901 }
2902 return offset;
2903}
2904
2905static ram_addr_t last_ram_offset(void)
2906{
Alex Williamsond17b5282010-06-25 11:08:38 -06002907 RAMBlock *block;
2908 ram_addr_t last = 0;
2909
2910 QLIST_FOREACH(block, &ram_list.blocks, next)
2911 last = MAX(last, block->offset + block->length);
2912
2913 return last;
2914}
2915
Cam Macdonell84b89d72010-07-26 18:10:57 -06002916ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002917 ram_addr_t size, void *host)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002918{
2919 RAMBlock *new_block, *block;
2920
2921 size = TARGET_PAGE_ALIGN(size);
2922 new_block = qemu_mallocz(sizeof(*new_block));
2923
2924 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2925 char *id = dev->parent_bus->info->get_dev_path(dev);
2926 if (id) {
2927 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2928 qemu_free(id);
2929 }
2930 }
2931 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2932
2933 QLIST_FOREACH(block, &ram_list.blocks, next) {
2934 if (!strcmp(block->idstr, new_block->idstr)) {
2935 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2936 new_block->idstr);
2937 abort();
2938 }
2939 }
2940
Jun Nakajima432d2682010-08-31 16:41:25 +01002941 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002942 if (host) {
2943 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002944 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002945 } else {
2946 if (mem_path) {
2947#if defined (__linux__) && !defined(TARGET_S390X)
2948 new_block->host = file_ram_alloc(new_block, size, mem_path);
2949 if (!new_block->host) {
2950 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002951 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002952 }
2953#else
2954 fprintf(stderr, "-mem-path option unsupported\n");
2955 exit(1);
2956#endif
2957 } else {
2958#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002959 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2960 an system defined value, which is at least 256GB. Larger systems
2961 have larger values. We put the guest between the end of data
2962 segment (system break) and this value. We use 32GB as a base to
2963 have enough room for the system break to grow. */
2964 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002965 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002966 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002967 if (new_block->host == MAP_FAILED) {
2968 fprintf(stderr, "Allocating RAM failed\n");
2969 abort();
2970 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002971#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002972 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002973 xen_ram_alloc(new_block->offset, size);
2974 } else {
2975 new_block->host = qemu_vmalloc(size);
2976 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002977#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002978 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002979 }
2980 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002981 new_block->length = size;
2982
2983 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2984
2985 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2986 last_ram_offset() >> TARGET_PAGE_BITS);
2987 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2988 0xff, size >> TARGET_PAGE_BITS);
2989
2990 if (kvm_enabled())
2991 kvm_setup_guest_memory(new_block->host, size);
2992
2993 return new_block->offset;
2994}
2995
Alex Williamson1724f042010-06-25 11:09:35 -06002996ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002997{
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002998 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
pbrook94a6b542009-04-11 17:15:54 +00002999}
bellarde9a1ab12007-02-08 23:08:38 +00003000
Alex Williamson1f2e98b2011-05-03 12:48:09 -06003001void qemu_ram_free_from_ptr(ram_addr_t addr)
3002{
3003 RAMBlock *block;
3004
3005 QLIST_FOREACH(block, &ram_list.blocks, next) {
3006 if (addr == block->offset) {
3007 QLIST_REMOVE(block, next);
3008 qemu_free(block);
3009 return;
3010 }
3011 }
3012}
3013
Anthony Liguoric227f092009-10-01 16:12:16 -05003014void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00003015{
Alex Williamson04b16652010-07-02 11:13:17 -06003016 RAMBlock *block;
3017
3018 QLIST_FOREACH(block, &ram_list.blocks, next) {
3019 if (addr == block->offset) {
3020 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01003021 if (block->flags & RAM_PREALLOC_MASK) {
3022 ;
3023 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06003024#if defined (__linux__) && !defined(TARGET_S390X)
3025 if (block->fd) {
3026 munmap(block->host, block->length);
3027 close(block->fd);
3028 } else {
3029 qemu_vfree(block->host);
3030 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003031#else
3032 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06003033#endif
3034 } else {
3035#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3036 munmap(block->host, block->length);
3037#else
Jan Kiszka868bb332011-06-21 22:59:09 +02003038 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003039 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01003040 } else {
3041 qemu_vfree(block->host);
3042 }
Alex Williamson04b16652010-07-02 11:13:17 -06003043#endif
3044 }
3045 qemu_free(block);
3046 return;
3047 }
3048 }
3049
bellarde9a1ab12007-02-08 23:08:38 +00003050}
3051
Huang Yingcd19cfa2011-03-02 08:56:19 +01003052#ifndef _WIN32
3053void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3054{
3055 RAMBlock *block;
3056 ram_addr_t offset;
3057 int flags;
3058 void *area, *vaddr;
3059
3060 QLIST_FOREACH(block, &ram_list.blocks, next) {
3061 offset = addr - block->offset;
3062 if (offset < block->length) {
3063 vaddr = block->host + offset;
3064 if (block->flags & RAM_PREALLOC_MASK) {
3065 ;
3066 } else {
3067 flags = MAP_FIXED;
3068 munmap(vaddr, length);
3069 if (mem_path) {
3070#if defined(__linux__) && !defined(TARGET_S390X)
3071 if (block->fd) {
3072#ifdef MAP_POPULATE
3073 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3074 MAP_PRIVATE;
3075#else
3076 flags |= MAP_PRIVATE;
3077#endif
3078 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3079 flags, block->fd, offset);
3080 } else {
3081 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3082 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3083 flags, -1, 0);
3084 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003085#else
3086 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003087#endif
3088 } else {
3089#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3090 flags |= MAP_SHARED | MAP_ANONYMOUS;
3091 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3092 flags, -1, 0);
3093#else
3094 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3095 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3096 flags, -1, 0);
3097#endif
3098 }
3099 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003100 fprintf(stderr, "Could not remap addr: "
3101 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003102 length, addr);
3103 exit(1);
3104 }
3105 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3106 }
3107 return;
3108 }
3109 }
3110}
3111#endif /* !_WIN32 */
3112
pbrookdc828ca2009-04-09 22:21:07 +00003113/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003114 With the exception of the softmmu code in this file, this should
3115 only be used for local memory (e.g. video ram) that the device owns,
3116 and knows it isn't going to access beyond the end of the block.
3117
3118 It should not be used for general purpose DMA.
3119 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3120 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003121void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003122{
pbrook94a6b542009-04-11 17:15:54 +00003123 RAMBlock *block;
3124
Alex Williamsonf471a172010-06-11 11:11:42 -06003125 QLIST_FOREACH(block, &ram_list.blocks, next) {
3126 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003127 /* Move this entry to to start of the list. */
3128 if (block != QLIST_FIRST(&ram_list.blocks)) {
3129 QLIST_REMOVE(block, next);
3130 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3131 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003132 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003133 /* We need to check if the requested address is in the RAM
3134 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003135 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003136 */
3137 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003138 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003139 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003140 block->host =
3141 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003142 }
3143 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003144 return block->host + (addr - block->offset);
3145 }
pbrook94a6b542009-04-11 17:15:54 +00003146 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003147
3148 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3149 abort();
3150
3151 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003152}
3153
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003154/* Return a host pointer to ram allocated with qemu_ram_alloc.
3155 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3156 */
3157void *qemu_safe_ram_ptr(ram_addr_t addr)
3158{
3159 RAMBlock *block;
3160
3161 QLIST_FOREACH(block, &ram_list.blocks, next) {
3162 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003163 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003164 /* We need to check if the requested address is in the RAM
3165 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003166 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003167 */
3168 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003169 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003170 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003171 block->host =
3172 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003173 }
3174 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003175 return block->host + (addr - block->offset);
3176 }
3177 }
3178
3179 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3180 abort();
3181
3182 return NULL;
3183}
3184
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003185/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3186 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003187void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003188{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003189 if (*size == 0) {
3190 return NULL;
3191 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003192 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003193 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003194 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003195 RAMBlock *block;
3196
3197 QLIST_FOREACH(block, &ram_list.blocks, next) {
3198 if (addr - block->offset < block->length) {
3199 if (addr - block->offset + *size > block->length)
3200 *size = block->length - addr + block->offset;
3201 return block->host + (addr - block->offset);
3202 }
3203 }
3204
3205 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3206 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003207 }
3208}
3209
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003210void qemu_put_ram_ptr(void *addr)
3211{
3212 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003213}
3214
Marcelo Tosattie8902612010-10-11 15:31:19 -03003215int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003216{
pbrook94a6b542009-04-11 17:15:54 +00003217 RAMBlock *block;
3218 uint8_t *host = ptr;
3219
Jan Kiszka868bb332011-06-21 22:59:09 +02003220 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003221 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003222 return 0;
3223 }
3224
Alex Williamsonf471a172010-06-11 11:11:42 -06003225 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003226 /* This case append when the block is not mapped. */
3227 if (block->host == NULL) {
3228 continue;
3229 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003230 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003231 *ram_addr = block->offset + (host - block->host);
3232 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003233 }
pbrook94a6b542009-04-11 17:15:54 +00003234 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003235
Marcelo Tosattie8902612010-10-11 15:31:19 -03003236 return -1;
3237}
Alex Williamsonf471a172010-06-11 11:11:42 -06003238
Marcelo Tosattie8902612010-10-11 15:31:19 -03003239/* Some of the softmmu routines need to translate from a host pointer
3240 (typically a TLB entry) back to a ram offset. */
3241ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3242{
3243 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003244
Marcelo Tosattie8902612010-10-11 15:31:19 -03003245 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3246 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3247 abort();
3248 }
3249 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003250}
3251
Anthony Liguoric227f092009-10-01 16:12:16 -05003252static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00003253{
pbrook67d3b952006-12-18 05:03:52 +00003254#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003255 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003256#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003257#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003258 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003259#endif
3260 return 0;
3261}
3262
Anthony Liguoric227f092009-10-01 16:12:16 -05003263static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003264{
3265#ifdef DEBUG_UNASSIGNED
3266 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3267#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003268#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003269 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003270#endif
3271 return 0;
3272}
3273
Anthony Liguoric227f092009-10-01 16:12:16 -05003274static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003275{
3276#ifdef DEBUG_UNASSIGNED
3277 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3278#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003279#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003280 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003281#endif
bellard33417e72003-08-10 21:47:01 +00003282 return 0;
3283}
3284
Anthony Liguoric227f092009-10-01 16:12:16 -05003285static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00003286{
pbrook67d3b952006-12-18 05:03:52 +00003287#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003288 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00003289#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003290#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003291 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003292#endif
3293}
3294
Anthony Liguoric227f092009-10-01 16:12:16 -05003295static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003296{
3297#ifdef DEBUG_UNASSIGNED
3298 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3299#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003300#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003301 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003302#endif
3303}
3304
Anthony Liguoric227f092009-10-01 16:12:16 -05003305static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003306{
3307#ifdef DEBUG_UNASSIGNED
3308 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3309#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003310#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003311 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003312#endif
bellard33417e72003-08-10 21:47:01 +00003313}
3314
Blue Swirld60efc62009-08-25 18:29:31 +00003315static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00003316 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00003317 unassigned_mem_readw,
3318 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00003319};
3320
Blue Swirld60efc62009-08-25 18:29:31 +00003321static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00003322 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00003323 unassigned_mem_writew,
3324 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00003325};
3326
Anthony Liguoric227f092009-10-01 16:12:16 -05003327static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003328 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003329{
bellard3a7d9292005-08-21 09:26:42 +00003330 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003331 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003332 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3333#if !defined(CONFIG_USER_ONLY)
3334 tb_invalidate_phys_page_fast(ram_addr, 1);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003335 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003336#endif
3337 }
pbrook5579c7f2009-04-11 14:47:08 +00003338 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003339 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003340 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003341 /* we remove the notdirty callback only if the code has been
3342 flushed */
3343 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003344 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003345}
3346
Anthony Liguoric227f092009-10-01 16:12:16 -05003347static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003348 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003349{
bellard3a7d9292005-08-21 09:26:42 +00003350 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003351 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003352 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3353#if !defined(CONFIG_USER_ONLY)
3354 tb_invalidate_phys_page_fast(ram_addr, 2);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003355 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003356#endif
3357 }
pbrook5579c7f2009-04-11 14:47:08 +00003358 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003359 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003360 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003361 /* we remove the notdirty callback only if the code has been
3362 flushed */
3363 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003364 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003365}
3366
Anthony Liguoric227f092009-10-01 16:12:16 -05003367static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003368 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003369{
bellard3a7d9292005-08-21 09:26:42 +00003370 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003371 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003372 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3373#if !defined(CONFIG_USER_ONLY)
3374 tb_invalidate_phys_page_fast(ram_addr, 4);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003375 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003376#endif
3377 }
pbrook5579c7f2009-04-11 14:47:08 +00003378 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003379 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003380 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003381 /* we remove the notdirty callback only if the code has been
3382 flushed */
3383 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003384 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003385}
3386
Blue Swirld60efc62009-08-25 18:29:31 +00003387static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00003388 NULL, /* never used */
3389 NULL, /* never used */
3390 NULL, /* never used */
3391};
3392
Blue Swirld60efc62009-08-25 18:29:31 +00003393static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00003394 notdirty_mem_writeb,
3395 notdirty_mem_writew,
3396 notdirty_mem_writel,
3397};
3398
pbrook0f459d12008-06-09 00:20:13 +00003399/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003400static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003401{
3402 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003403 target_ulong pc, cs_base;
3404 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003405 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003406 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003407 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003408
aliguori06d55cc2008-11-18 20:24:06 +00003409 if (env->watchpoint_hit) {
3410 /* We re-entered the check after replacing the TB. Now raise
3411 * the debug interrupt so that is will trigger after the
3412 * current instruction. */
3413 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3414 return;
3415 }
pbrook2e70f6e2008-06-29 01:03:05 +00003416 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003417 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003418 if ((vaddr == (wp->vaddr & len_mask) ||
3419 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003420 wp->flags |= BP_WATCHPOINT_HIT;
3421 if (!env->watchpoint_hit) {
3422 env->watchpoint_hit = wp;
3423 tb = tb_find_pc(env->mem_io_pc);
3424 if (!tb) {
3425 cpu_abort(env, "check_watchpoint: could not find TB for "
3426 "pc=%p", (void *)env->mem_io_pc);
3427 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003428 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003429 tb_phys_invalidate(tb, -1);
3430 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3431 env->exception_index = EXCP_DEBUG;
3432 } else {
3433 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3434 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3435 }
3436 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003437 }
aliguori6e140f22008-11-18 20:37:55 +00003438 } else {
3439 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003440 }
3441 }
3442}
3443
pbrook6658ffb2007-03-16 23:58:11 +00003444/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3445 so these check for a hit then pass through to the normal out-of-line
3446 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003447static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003448{
aliguorib4051332008-11-18 20:14:20 +00003449 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003450 return ldub_phys(addr);
3451}
3452
Anthony Liguoric227f092009-10-01 16:12:16 -05003453static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003454{
aliguorib4051332008-11-18 20:14:20 +00003455 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003456 return lduw_phys(addr);
3457}
3458
Anthony Liguoric227f092009-10-01 16:12:16 -05003459static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003460{
aliguorib4051332008-11-18 20:14:20 +00003461 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003462 return ldl_phys(addr);
3463}
3464
Anthony Liguoric227f092009-10-01 16:12:16 -05003465static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003466 uint32_t val)
3467{
aliguorib4051332008-11-18 20:14:20 +00003468 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003469 stb_phys(addr, val);
3470}
3471
Anthony Liguoric227f092009-10-01 16:12:16 -05003472static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003473 uint32_t val)
3474{
aliguorib4051332008-11-18 20:14:20 +00003475 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003476 stw_phys(addr, val);
3477}
3478
Anthony Liguoric227f092009-10-01 16:12:16 -05003479static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003480 uint32_t val)
3481{
aliguorib4051332008-11-18 20:14:20 +00003482 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003483 stl_phys(addr, val);
3484}
3485
Blue Swirld60efc62009-08-25 18:29:31 +00003486static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003487 watch_mem_readb,
3488 watch_mem_readw,
3489 watch_mem_readl,
3490};
3491
Blue Swirld60efc62009-08-25 18:29:31 +00003492static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003493 watch_mem_writeb,
3494 watch_mem_writew,
3495 watch_mem_writel,
3496};
pbrook6658ffb2007-03-16 23:58:11 +00003497
Richard Hendersonf6405242010-04-22 16:47:31 -07003498static inline uint32_t subpage_readlen (subpage_t *mmio,
3499 target_phys_addr_t addr,
3500 unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003501{
Richard Hendersonf6405242010-04-22 16:47:31 -07003502 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003503#if defined(DEBUG_SUBPAGE)
3504 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3505 mmio, len, addr, idx);
3506#endif
blueswir1db7b5422007-05-26 17:36:03 +00003507
Richard Hendersonf6405242010-04-22 16:47:31 -07003508 addr += mmio->region_offset[idx];
3509 idx = mmio->sub_io_index[idx];
3510 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
blueswir1db7b5422007-05-26 17:36:03 +00003511}
3512
Anthony Liguoric227f092009-10-01 16:12:16 -05003513static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
Richard Hendersonf6405242010-04-22 16:47:31 -07003514 uint32_t value, unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003515{
Richard Hendersonf6405242010-04-22 16:47:31 -07003516 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003517#if defined(DEBUG_SUBPAGE)
Richard Hendersonf6405242010-04-22 16:47:31 -07003518 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3519 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003520#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003521
3522 addr += mmio->region_offset[idx];
3523 idx = mmio->sub_io_index[idx];
3524 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00003525}
3526
Anthony Liguoric227f092009-10-01 16:12:16 -05003527static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003528{
blueswir1db7b5422007-05-26 17:36:03 +00003529 return subpage_readlen(opaque, addr, 0);
3530}
3531
Anthony Liguoric227f092009-10-01 16:12:16 -05003532static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003533 uint32_t value)
3534{
blueswir1db7b5422007-05-26 17:36:03 +00003535 subpage_writelen(opaque, addr, value, 0);
3536}
3537
Anthony Liguoric227f092009-10-01 16:12:16 -05003538static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003539{
blueswir1db7b5422007-05-26 17:36:03 +00003540 return subpage_readlen(opaque, addr, 1);
3541}
3542
Anthony Liguoric227f092009-10-01 16:12:16 -05003543static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003544 uint32_t value)
3545{
blueswir1db7b5422007-05-26 17:36:03 +00003546 subpage_writelen(opaque, addr, value, 1);
3547}
3548
Anthony Liguoric227f092009-10-01 16:12:16 -05003549static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003550{
blueswir1db7b5422007-05-26 17:36:03 +00003551 return subpage_readlen(opaque, addr, 2);
3552}
3553
Richard Hendersonf6405242010-04-22 16:47:31 -07003554static void subpage_writel (void *opaque, target_phys_addr_t addr,
3555 uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003556{
blueswir1db7b5422007-05-26 17:36:03 +00003557 subpage_writelen(opaque, addr, value, 2);
3558}
3559
Blue Swirld60efc62009-08-25 18:29:31 +00003560static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003561 &subpage_readb,
3562 &subpage_readw,
3563 &subpage_readl,
3564};
3565
Blue Swirld60efc62009-08-25 18:29:31 +00003566static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003567 &subpage_writeb,
3568 &subpage_writew,
3569 &subpage_writel,
3570};
3571
Anthony Liguoric227f092009-10-01 16:12:16 -05003572static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3573 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003574{
3575 int idx, eidx;
3576
3577 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3578 return -1;
3579 idx = SUBPAGE_IDX(start);
3580 eidx = SUBPAGE_IDX(end);
3581#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003582 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003583 mmio, start, end, idx, eidx, memory);
3584#endif
Gleb Natapov95c318f2010-07-29 10:41:45 +03003585 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3586 memory = IO_MEM_UNASSIGNED;
Richard Hendersonf6405242010-04-22 16:47:31 -07003587 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003588 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003589 mmio->sub_io_index[idx] = memory;
3590 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003591 }
3592
3593 return 0;
3594}
3595
Richard Hendersonf6405242010-04-22 16:47:31 -07003596static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3597 ram_addr_t orig_memory,
3598 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003599{
Anthony Liguoric227f092009-10-01 16:12:16 -05003600 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003601 int subpage_memory;
3602
Anthony Liguoric227f092009-10-01 16:12:16 -05003603 mmio = qemu_mallocz(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003604
3605 mmio->base = base;
Alexander Graf2507c122010-12-08 12:05:37 +01003606 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3607 DEVICE_NATIVE_ENDIAN);
blueswir1db7b5422007-05-26 17:36:03 +00003608#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003609 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3610 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003611#endif
aliguori1eec6142009-02-05 22:06:18 +00003612 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003613 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003614
3615 return mmio;
3616}
3617
aliguori88715652009-02-11 15:20:58 +00003618static int get_free_io_mem_idx(void)
3619{
3620 int i;
3621
3622 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3623 if (!io_mem_used[i]) {
3624 io_mem_used[i] = 1;
3625 return i;
3626 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003627 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003628 return -1;
3629}
3630
Alexander Grafdd310532010-12-08 12:05:36 +01003631/*
3632 * Usually, devices operate in little endian mode. There are devices out
3633 * there that operate in big endian too. Each device gets byte swapped
3634 * mmio if plugged onto a CPU that does the other endianness.
3635 *
3636 * CPU Device swap?
3637 *
3638 * little little no
3639 * little big yes
3640 * big little yes
3641 * big big no
3642 */
3643
3644typedef struct SwapEndianContainer {
3645 CPUReadMemoryFunc *read[3];
3646 CPUWriteMemoryFunc *write[3];
3647 void *opaque;
3648} SwapEndianContainer;
3649
3650static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3651{
3652 uint32_t val;
3653 SwapEndianContainer *c = opaque;
3654 val = c->read[0](c->opaque, addr);
3655 return val;
3656}
3657
3658static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3659{
3660 uint32_t val;
3661 SwapEndianContainer *c = opaque;
3662 val = bswap16(c->read[1](c->opaque, addr));
3663 return val;
3664}
3665
3666static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3667{
3668 uint32_t val;
3669 SwapEndianContainer *c = opaque;
3670 val = bswap32(c->read[2](c->opaque, addr));
3671 return val;
3672}
3673
3674static CPUReadMemoryFunc * const swapendian_readfn[3]={
3675 swapendian_mem_readb,
3676 swapendian_mem_readw,
3677 swapendian_mem_readl
3678};
3679
3680static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3681 uint32_t val)
3682{
3683 SwapEndianContainer *c = opaque;
3684 c->write[0](c->opaque, addr, val);
3685}
3686
3687static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3688 uint32_t val)
3689{
3690 SwapEndianContainer *c = opaque;
3691 c->write[1](c->opaque, addr, bswap16(val));
3692}
3693
3694static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3695 uint32_t val)
3696{
3697 SwapEndianContainer *c = opaque;
3698 c->write[2](c->opaque, addr, bswap32(val));
3699}
3700
3701static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3702 swapendian_mem_writeb,
3703 swapendian_mem_writew,
3704 swapendian_mem_writel
3705};
3706
3707static void swapendian_init(int io_index)
3708{
3709 SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3710 int i;
3711
3712 /* Swap mmio for big endian targets */
3713 c->opaque = io_mem_opaque[io_index];
3714 for (i = 0; i < 3; i++) {
3715 c->read[i] = io_mem_read[io_index][i];
3716 c->write[i] = io_mem_write[io_index][i];
3717
3718 io_mem_read[io_index][i] = swapendian_readfn[i];
3719 io_mem_write[io_index][i] = swapendian_writefn[i];
3720 }
3721 io_mem_opaque[io_index] = c;
3722}
3723
3724static void swapendian_del(int io_index)
3725{
3726 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3727 qemu_free(io_mem_opaque[io_index]);
3728 }
3729}
3730
bellard33417e72003-08-10 21:47:01 +00003731/* mem_read and mem_write are arrays of functions containing the
3732 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003733 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003734 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003735 modified. If it is zero, a new io zone is allocated. The return
3736 value can be used with cpu_register_physical_memory(). (-1) is
3737 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003738static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003739 CPUReadMemoryFunc * const *mem_read,
3740 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003741 void *opaque, enum device_endian endian)
bellard33417e72003-08-10 21:47:01 +00003742{
Richard Henderson3cab7212010-05-07 09:52:51 -07003743 int i;
3744
bellard33417e72003-08-10 21:47:01 +00003745 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003746 io_index = get_free_io_mem_idx();
3747 if (io_index == -1)
3748 return io_index;
bellard33417e72003-08-10 21:47:01 +00003749 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003750 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003751 if (io_index >= IO_MEM_NB_ENTRIES)
3752 return -1;
3753 }
bellardb5ff1b32005-11-26 10:38:39 +00003754
Richard Henderson3cab7212010-05-07 09:52:51 -07003755 for (i = 0; i < 3; ++i) {
3756 io_mem_read[io_index][i]
3757 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3758 }
3759 for (i = 0; i < 3; ++i) {
3760 io_mem_write[io_index][i]
3761 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3762 }
bellarda4193c82004-06-03 14:01:43 +00003763 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003764
Alexander Grafdd310532010-12-08 12:05:36 +01003765 switch (endian) {
3766 case DEVICE_BIG_ENDIAN:
3767#ifndef TARGET_WORDS_BIGENDIAN
3768 swapendian_init(io_index);
3769#endif
3770 break;
3771 case DEVICE_LITTLE_ENDIAN:
3772#ifdef TARGET_WORDS_BIGENDIAN
3773 swapendian_init(io_index);
3774#endif
3775 break;
3776 case DEVICE_NATIVE_ENDIAN:
3777 default:
3778 break;
3779 }
3780
Richard Hendersonf6405242010-04-22 16:47:31 -07003781 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003782}
bellard61382a52003-10-27 21:22:23 +00003783
Blue Swirld60efc62009-08-25 18:29:31 +00003784int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3785 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003786 void *opaque, enum device_endian endian)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003787{
Alexander Graf2507c122010-12-08 12:05:37 +01003788 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003789}
3790
aliguori88715652009-02-11 15:20:58 +00003791void cpu_unregister_io_memory(int io_table_address)
3792{
3793 int i;
3794 int io_index = io_table_address >> IO_MEM_SHIFT;
3795
Alexander Grafdd310532010-12-08 12:05:36 +01003796 swapendian_del(io_index);
3797
aliguori88715652009-02-11 15:20:58 +00003798 for (i=0;i < 3; i++) {
3799 io_mem_read[io_index][i] = unassigned_mem_read[i];
3800 io_mem_write[io_index][i] = unassigned_mem_write[i];
3801 }
3802 io_mem_opaque[io_index] = NULL;
3803 io_mem_used[io_index] = 0;
3804}
3805
Avi Kivitye9179ce2009-06-14 11:38:52 +03003806static void io_mem_init(void)
3807{
3808 int i;
3809
Alexander Graf2507c122010-12-08 12:05:37 +01003810 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3811 unassigned_mem_write, NULL,
3812 DEVICE_NATIVE_ENDIAN);
3813 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3814 unassigned_mem_write, NULL,
3815 DEVICE_NATIVE_ENDIAN);
3816 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3817 notdirty_mem_write, NULL,
3818 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003819 for (i=0; i<5; i++)
3820 io_mem_used[i] = 1;
3821
3822 io_mem_watch = cpu_register_io_memory(watch_mem_read,
Alexander Graf2507c122010-12-08 12:05:37 +01003823 watch_mem_write, NULL,
3824 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003825}
3826
Avi Kivity62152b82011-07-26 14:26:14 +03003827static void memory_map_init(void)
3828{
3829 system_memory = qemu_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003830 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003831 set_system_memory_map(system_memory);
3832}
3833
3834MemoryRegion *get_system_memory(void)
3835{
3836 return system_memory;
3837}
3838
pbrooke2eef172008-06-08 01:09:01 +00003839#endif /* !defined(CONFIG_USER_ONLY) */
3840
bellard13eb76e2004-01-24 15:23:36 +00003841/* physical memory access (slow version, mainly for debug) */
3842#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003843int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3844 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003845{
3846 int l, flags;
3847 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003848 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003849
3850 while (len > 0) {
3851 page = addr & TARGET_PAGE_MASK;
3852 l = (page + TARGET_PAGE_SIZE) - addr;
3853 if (l > len)
3854 l = len;
3855 flags = page_get_flags(page);
3856 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003857 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003858 if (is_write) {
3859 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003860 return -1;
bellard579a97f2007-11-11 14:26:47 +00003861 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003862 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003863 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003864 memcpy(p, buf, l);
3865 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003866 } else {
3867 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003868 return -1;
bellard579a97f2007-11-11 14:26:47 +00003869 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003870 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003871 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003872 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003873 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003874 }
3875 len -= l;
3876 buf += l;
3877 addr += l;
3878 }
Paul Brooka68fe892010-03-01 00:08:59 +00003879 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003880}
bellard8df1cd02005-01-28 22:37:22 +00003881
bellard13eb76e2004-01-24 15:23:36 +00003882#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003883void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003884 int len, int is_write)
3885{
3886 int l, io_index;
3887 uint8_t *ptr;
3888 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003889 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003890 ram_addr_t pd;
bellard92e873b2004-05-21 14:52:29 +00003891 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003892
bellard13eb76e2004-01-24 15:23:36 +00003893 while (len > 0) {
3894 page = addr & TARGET_PAGE_MASK;
3895 l = (page + TARGET_PAGE_SIZE) - addr;
3896 if (l > len)
3897 l = len;
bellard92e873b2004-05-21 14:52:29 +00003898 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003899 if (!p) {
3900 pd = IO_MEM_UNASSIGNED;
3901 } else {
3902 pd = p->phys_offset;
3903 }
ths3b46e622007-09-17 08:09:54 +00003904
bellard13eb76e2004-01-24 15:23:36 +00003905 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003906 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003907 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003908 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003909 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003910 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003911 /* XXX: could force cpu_single_env to NULL to avoid
3912 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003913 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003914 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003915 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003916 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003917 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003918 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003919 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003920 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003921 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003922 l = 2;
3923 } else {
bellard1c213d12005-09-03 10:49:04 +00003924 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003925 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003926 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003927 l = 1;
3928 }
3929 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003930 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003931 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003932 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003933 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003934 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003935 if (!cpu_physical_memory_is_dirty(addr1)) {
3936 /* invalidate code */
3937 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3938 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003939 cpu_physical_memory_set_dirty_flags(
3940 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003941 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003942 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003943 }
3944 } else {
ths5fafdf22007-09-16 21:08:06 +00003945 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003946 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003947 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003948 /* I/O case */
3949 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003950 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003951 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3952 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003953 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003954 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003955 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003956 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003957 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003958 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003959 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003960 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003961 l = 2;
3962 } else {
bellard1c213d12005-09-03 10:49:04 +00003963 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003964 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003965 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003966 l = 1;
3967 }
3968 } else {
3969 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003970 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3971 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3972 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003973 }
3974 }
3975 len -= l;
3976 buf += l;
3977 addr += l;
3978 }
3979}
bellard8df1cd02005-01-28 22:37:22 +00003980
bellardd0ecd2a2006-04-23 17:14:48 +00003981/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003982void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003983 const uint8_t *buf, int len)
3984{
3985 int l;
3986 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003987 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003988 unsigned long pd;
3989 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003990
bellardd0ecd2a2006-04-23 17:14:48 +00003991 while (len > 0) {
3992 page = addr & TARGET_PAGE_MASK;
3993 l = (page + TARGET_PAGE_SIZE) - addr;
3994 if (l > len)
3995 l = len;
3996 p = phys_page_find(page >> TARGET_PAGE_BITS);
3997 if (!p) {
3998 pd = IO_MEM_UNASSIGNED;
3999 } else {
4000 pd = p->phys_offset;
4001 }
ths3b46e622007-09-17 08:09:54 +00004002
bellardd0ecd2a2006-04-23 17:14:48 +00004003 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00004004 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
4005 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00004006 /* do nothing */
4007 } else {
4008 unsigned long addr1;
4009 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4010 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004011 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00004012 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004013 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00004014 }
4015 len -= l;
4016 buf += l;
4017 addr += l;
4018 }
4019}
4020
aliguori6d16c2f2009-01-22 16:59:11 +00004021typedef struct {
4022 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05004023 target_phys_addr_t addr;
4024 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00004025} BounceBuffer;
4026
4027static BounceBuffer bounce;
4028
aliguoriba223c22009-01-22 16:59:16 +00004029typedef struct MapClient {
4030 void *opaque;
4031 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00004032 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00004033} MapClient;
4034
Blue Swirl72cf2d42009-09-12 07:36:22 +00004035static QLIST_HEAD(map_client_list, MapClient) map_client_list
4036 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004037
4038void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4039{
4040 MapClient *client = qemu_malloc(sizeof(*client));
4041
4042 client->opaque = opaque;
4043 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00004044 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00004045 return client;
4046}
4047
4048void cpu_unregister_map_client(void *_client)
4049{
4050 MapClient *client = (MapClient *)_client;
4051
Blue Swirl72cf2d42009-09-12 07:36:22 +00004052 QLIST_REMOVE(client, link);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09004053 qemu_free(client);
aliguoriba223c22009-01-22 16:59:16 +00004054}
4055
4056static void cpu_notify_map_clients(void)
4057{
4058 MapClient *client;
4059
Blue Swirl72cf2d42009-09-12 07:36:22 +00004060 while (!QLIST_EMPTY(&map_client_list)) {
4061 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004062 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09004063 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00004064 }
4065}
4066
aliguori6d16c2f2009-01-22 16:59:11 +00004067/* Map a physical memory region into a host virtual address.
4068 * May map a subset of the requested range, given by and returned in *plen.
4069 * May return NULL if resources needed to perform the mapping are exhausted.
4070 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00004071 * Use cpu_register_map_client() to know when retrying the map operation is
4072 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00004073 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004074void *cpu_physical_memory_map(target_phys_addr_t addr,
4075 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00004076 int is_write)
4077{
Anthony Liguoric227f092009-10-01 16:12:16 -05004078 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004079 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00004080 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004081 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00004082 unsigned long pd;
4083 PhysPageDesc *p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00004084 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004085 ram_addr_t rlen;
4086 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004087
4088 while (len > 0) {
4089 page = addr & TARGET_PAGE_MASK;
4090 l = (page + TARGET_PAGE_SIZE) - addr;
4091 if (l > len)
4092 l = len;
4093 p = phys_page_find(page >> TARGET_PAGE_BITS);
4094 if (!p) {
4095 pd = IO_MEM_UNASSIGNED;
4096 } else {
4097 pd = p->phys_offset;
4098 }
4099
4100 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004101 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004102 break;
4103 }
4104 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4105 bounce.addr = addr;
4106 bounce.len = l;
4107 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004108 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004109 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004110
4111 *plen = l;
4112 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004113 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004114 if (!todo) {
4115 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4116 }
aliguori6d16c2f2009-01-22 16:59:11 +00004117
4118 len -= l;
4119 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004120 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004121 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004122 rlen = todo;
4123 ret = qemu_ram_ptr_length(raddr, &rlen);
4124 *plen = rlen;
4125 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004126}
4127
4128/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4129 * Will also mark the memory as dirty if is_write == 1. access_len gives
4130 * the amount of memory that was actually read or written by the caller.
4131 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004132void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4133 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004134{
4135 if (buffer != bounce.buffer) {
4136 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004137 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004138 while (access_len) {
4139 unsigned l;
4140 l = TARGET_PAGE_SIZE;
4141 if (l > access_len)
4142 l = access_len;
4143 if (!cpu_physical_memory_is_dirty(addr1)) {
4144 /* invalidate code */
4145 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4146 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004147 cpu_physical_memory_set_dirty_flags(
4148 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004149 }
4150 addr1 += l;
4151 access_len -= l;
4152 }
4153 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004154 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004155 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004156 }
aliguori6d16c2f2009-01-22 16:59:11 +00004157 return;
4158 }
4159 if (is_write) {
4160 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4161 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004162 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004163 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004164 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004165}
bellardd0ecd2a2006-04-23 17:14:48 +00004166
bellard8df1cd02005-01-28 22:37:22 +00004167/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004168static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4169 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004170{
4171 int io_index;
4172 uint8_t *ptr;
4173 uint32_t val;
4174 unsigned long pd;
4175 PhysPageDesc *p;
4176
4177 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4178 if (!p) {
4179 pd = IO_MEM_UNASSIGNED;
4180 } else {
4181 pd = p->phys_offset;
4182 }
ths3b46e622007-09-17 08:09:54 +00004183
ths5fafdf22007-09-16 21:08:06 +00004184 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00004185 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00004186 /* I/O case */
4187 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004188 if (p)
4189 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004190 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004191#if defined(TARGET_WORDS_BIGENDIAN)
4192 if (endian == DEVICE_LITTLE_ENDIAN) {
4193 val = bswap32(val);
4194 }
4195#else
4196 if (endian == DEVICE_BIG_ENDIAN) {
4197 val = bswap32(val);
4198 }
4199#endif
bellard8df1cd02005-01-28 22:37:22 +00004200 } else {
4201 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004202 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004203 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004204 switch (endian) {
4205 case DEVICE_LITTLE_ENDIAN:
4206 val = ldl_le_p(ptr);
4207 break;
4208 case DEVICE_BIG_ENDIAN:
4209 val = ldl_be_p(ptr);
4210 break;
4211 default:
4212 val = ldl_p(ptr);
4213 break;
4214 }
bellard8df1cd02005-01-28 22:37:22 +00004215 }
4216 return val;
4217}
4218
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004219uint32_t ldl_phys(target_phys_addr_t addr)
4220{
4221 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4222}
4223
4224uint32_t ldl_le_phys(target_phys_addr_t addr)
4225{
4226 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4227}
4228
4229uint32_t ldl_be_phys(target_phys_addr_t addr)
4230{
4231 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4232}
4233
bellard84b7b8e2005-11-28 21:19:04 +00004234/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004235static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4236 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004237{
4238 int io_index;
4239 uint8_t *ptr;
4240 uint64_t val;
4241 unsigned long pd;
4242 PhysPageDesc *p;
4243
4244 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4245 if (!p) {
4246 pd = IO_MEM_UNASSIGNED;
4247 } else {
4248 pd = p->phys_offset;
4249 }
ths3b46e622007-09-17 08:09:54 +00004250
bellard2a4188a2006-06-25 21:54:59 +00004251 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4252 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00004253 /* I/O case */
4254 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004255 if (p)
4256 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004257
4258 /* XXX This is broken when device endian != cpu endian.
4259 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004260#ifdef TARGET_WORDS_BIGENDIAN
4261 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4262 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4263#else
4264 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4265 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4266#endif
4267 } else {
4268 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004269 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004270 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004271 switch (endian) {
4272 case DEVICE_LITTLE_ENDIAN:
4273 val = ldq_le_p(ptr);
4274 break;
4275 case DEVICE_BIG_ENDIAN:
4276 val = ldq_be_p(ptr);
4277 break;
4278 default:
4279 val = ldq_p(ptr);
4280 break;
4281 }
bellard84b7b8e2005-11-28 21:19:04 +00004282 }
4283 return val;
4284}
4285
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004286uint64_t ldq_phys(target_phys_addr_t addr)
4287{
4288 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4289}
4290
4291uint64_t ldq_le_phys(target_phys_addr_t addr)
4292{
4293 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4294}
4295
4296uint64_t ldq_be_phys(target_phys_addr_t addr)
4297{
4298 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4299}
4300
bellardaab33092005-10-30 20:48:42 +00004301/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004302uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004303{
4304 uint8_t val;
4305 cpu_physical_memory_read(addr, &val, 1);
4306 return val;
4307}
4308
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004309/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004310static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4311 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004312{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004313 int io_index;
4314 uint8_t *ptr;
4315 uint64_t val;
4316 unsigned long pd;
4317 PhysPageDesc *p;
4318
4319 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4320 if (!p) {
4321 pd = IO_MEM_UNASSIGNED;
4322 } else {
4323 pd = p->phys_offset;
4324 }
4325
4326 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4327 !(pd & IO_MEM_ROMD)) {
4328 /* I/O case */
4329 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4330 if (p)
4331 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4332 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004333#if defined(TARGET_WORDS_BIGENDIAN)
4334 if (endian == DEVICE_LITTLE_ENDIAN) {
4335 val = bswap16(val);
4336 }
4337#else
4338 if (endian == DEVICE_BIG_ENDIAN) {
4339 val = bswap16(val);
4340 }
4341#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004342 } else {
4343 /* RAM case */
4344 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4345 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004346 switch (endian) {
4347 case DEVICE_LITTLE_ENDIAN:
4348 val = lduw_le_p(ptr);
4349 break;
4350 case DEVICE_BIG_ENDIAN:
4351 val = lduw_be_p(ptr);
4352 break;
4353 default:
4354 val = lduw_p(ptr);
4355 break;
4356 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004357 }
4358 return val;
bellardaab33092005-10-30 20:48:42 +00004359}
4360
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004361uint32_t lduw_phys(target_phys_addr_t addr)
4362{
4363 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4364}
4365
4366uint32_t lduw_le_phys(target_phys_addr_t addr)
4367{
4368 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4369}
4370
4371uint32_t lduw_be_phys(target_phys_addr_t addr)
4372{
4373 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4374}
4375
bellard8df1cd02005-01-28 22:37:22 +00004376/* warning: addr must be aligned. The ram page is not masked as dirty
4377 and the code inside is not invalidated. It is useful if the dirty
4378 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004379void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004380{
4381 int io_index;
4382 uint8_t *ptr;
4383 unsigned long pd;
4384 PhysPageDesc *p;
4385
4386 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4387 if (!p) {
4388 pd = IO_MEM_UNASSIGNED;
4389 } else {
4390 pd = p->phys_offset;
4391 }
ths3b46e622007-09-17 08:09:54 +00004392
bellard3a7d9292005-08-21 09:26:42 +00004393 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004394 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004395 if (p)
4396 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004397 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4398 } else {
aliguori74576192008-10-06 14:02:03 +00004399 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004400 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004401 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004402
4403 if (unlikely(in_migration)) {
4404 if (!cpu_physical_memory_is_dirty(addr1)) {
4405 /* invalidate code */
4406 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4407 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004408 cpu_physical_memory_set_dirty_flags(
4409 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004410 }
4411 }
bellard8df1cd02005-01-28 22:37:22 +00004412 }
4413}
4414
Anthony Liguoric227f092009-10-01 16:12:16 -05004415void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004416{
4417 int io_index;
4418 uint8_t *ptr;
4419 unsigned long pd;
4420 PhysPageDesc *p;
4421
4422 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4423 if (!p) {
4424 pd = IO_MEM_UNASSIGNED;
4425 } else {
4426 pd = p->phys_offset;
4427 }
ths3b46e622007-09-17 08:09:54 +00004428
j_mayerbc98a7e2007-04-04 07:55:12 +00004429 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4430 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004431 if (p)
4432 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004433#ifdef TARGET_WORDS_BIGENDIAN
4434 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4435 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4436#else
4437 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4438 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4439#endif
4440 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004441 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004442 (addr & ~TARGET_PAGE_MASK);
4443 stq_p(ptr, val);
4444 }
4445}
4446
bellard8df1cd02005-01-28 22:37:22 +00004447/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004448static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4449 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004450{
4451 int io_index;
4452 uint8_t *ptr;
4453 unsigned long pd;
4454 PhysPageDesc *p;
4455
4456 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4457 if (!p) {
4458 pd = IO_MEM_UNASSIGNED;
4459 } else {
4460 pd = p->phys_offset;
4461 }
ths3b46e622007-09-17 08:09:54 +00004462
bellard3a7d9292005-08-21 09:26:42 +00004463 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004464 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004465 if (p)
4466 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004467#if defined(TARGET_WORDS_BIGENDIAN)
4468 if (endian == DEVICE_LITTLE_ENDIAN) {
4469 val = bswap32(val);
4470 }
4471#else
4472 if (endian == DEVICE_BIG_ENDIAN) {
4473 val = bswap32(val);
4474 }
4475#endif
bellard8df1cd02005-01-28 22:37:22 +00004476 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4477 } else {
4478 unsigned long addr1;
4479 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4480 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004481 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004482 switch (endian) {
4483 case DEVICE_LITTLE_ENDIAN:
4484 stl_le_p(ptr, val);
4485 break;
4486 case DEVICE_BIG_ENDIAN:
4487 stl_be_p(ptr, val);
4488 break;
4489 default:
4490 stl_p(ptr, val);
4491 break;
4492 }
bellard3a7d9292005-08-21 09:26:42 +00004493 if (!cpu_physical_memory_is_dirty(addr1)) {
4494 /* invalidate code */
4495 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4496 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004497 cpu_physical_memory_set_dirty_flags(addr1,
4498 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004499 }
bellard8df1cd02005-01-28 22:37:22 +00004500 }
4501}
4502
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004503void stl_phys(target_phys_addr_t addr, uint32_t val)
4504{
4505 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4506}
4507
4508void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4509{
4510 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4511}
4512
4513void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4514{
4515 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4516}
4517
bellardaab33092005-10-30 20:48:42 +00004518/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004519void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004520{
4521 uint8_t v = val;
4522 cpu_physical_memory_write(addr, &v, 1);
4523}
4524
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004525/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004526static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4527 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004528{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004529 int io_index;
4530 uint8_t *ptr;
4531 unsigned long pd;
4532 PhysPageDesc *p;
4533
4534 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4535 if (!p) {
4536 pd = IO_MEM_UNASSIGNED;
4537 } else {
4538 pd = p->phys_offset;
4539 }
4540
4541 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4542 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4543 if (p)
4544 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004545#if defined(TARGET_WORDS_BIGENDIAN)
4546 if (endian == DEVICE_LITTLE_ENDIAN) {
4547 val = bswap16(val);
4548 }
4549#else
4550 if (endian == DEVICE_BIG_ENDIAN) {
4551 val = bswap16(val);
4552 }
4553#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004554 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4555 } else {
4556 unsigned long addr1;
4557 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4558 /* RAM case */
4559 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004560 switch (endian) {
4561 case DEVICE_LITTLE_ENDIAN:
4562 stw_le_p(ptr, val);
4563 break;
4564 case DEVICE_BIG_ENDIAN:
4565 stw_be_p(ptr, val);
4566 break;
4567 default:
4568 stw_p(ptr, val);
4569 break;
4570 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004571 if (!cpu_physical_memory_is_dirty(addr1)) {
4572 /* invalidate code */
4573 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4574 /* set dirty bit */
4575 cpu_physical_memory_set_dirty_flags(addr1,
4576 (0xff & ~CODE_DIRTY_FLAG));
4577 }
4578 }
bellardaab33092005-10-30 20:48:42 +00004579}
4580
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004581void stw_phys(target_phys_addr_t addr, uint32_t val)
4582{
4583 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4584}
4585
4586void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4587{
4588 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4589}
4590
4591void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4592{
4593 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4594}
4595
bellardaab33092005-10-30 20:48:42 +00004596/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004597void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004598{
4599 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004600 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004601}
4602
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004603void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4604{
4605 val = cpu_to_le64(val);
4606 cpu_physical_memory_write(addr, &val, 8);
4607}
4608
4609void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4610{
4611 val = cpu_to_be64(val);
4612 cpu_physical_memory_write(addr, &val, 8);
4613}
4614
aliguori5e2972f2009-03-28 17:51:36 +00004615/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004616int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004617 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004618{
4619 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004620 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004621 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004622
4623 while (len > 0) {
4624 page = addr & TARGET_PAGE_MASK;
4625 phys_addr = cpu_get_phys_page_debug(env, page);
4626 /* if no physical page mapped, return an error */
4627 if (phys_addr == -1)
4628 return -1;
4629 l = (page + TARGET_PAGE_SIZE) - addr;
4630 if (l > len)
4631 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004632 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004633 if (is_write)
4634 cpu_physical_memory_write_rom(phys_addr, buf, l);
4635 else
aliguori5e2972f2009-03-28 17:51:36 +00004636 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004637 len -= l;
4638 buf += l;
4639 addr += l;
4640 }
4641 return 0;
4642}
Paul Brooka68fe892010-03-01 00:08:59 +00004643#endif
bellard13eb76e2004-01-24 15:23:36 +00004644
pbrook2e70f6e2008-06-29 01:03:05 +00004645/* in deterministic execution mode, instructions doing device I/Os
4646 must be at the end of the TB */
4647void cpu_io_recompile(CPUState *env, void *retaddr)
4648{
4649 TranslationBlock *tb;
4650 uint32_t n, cflags;
4651 target_ulong pc, cs_base;
4652 uint64_t flags;
4653
4654 tb = tb_find_pc((unsigned long)retaddr);
4655 if (!tb) {
4656 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4657 retaddr);
4658 }
4659 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004660 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004661 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004662 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004663 n = n - env->icount_decr.u16.low;
4664 /* Generate a new TB ending on the I/O insn. */
4665 n++;
4666 /* On MIPS and SH, delay slot instructions can only be restarted if
4667 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004668 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004669 branch. */
4670#if defined(TARGET_MIPS)
4671 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4672 env->active_tc.PC -= 4;
4673 env->icount_decr.u16.low++;
4674 env->hflags &= ~MIPS_HFLAG_BMASK;
4675 }
4676#elif defined(TARGET_SH4)
4677 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4678 && n > 1) {
4679 env->pc -= 2;
4680 env->icount_decr.u16.low++;
4681 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4682 }
4683#endif
4684 /* This should never happen. */
4685 if (n > CF_COUNT_MASK)
4686 cpu_abort(env, "TB too big during recompile");
4687
4688 cflags = n | CF_LAST_IO;
4689 pc = tb->pc;
4690 cs_base = tb->cs_base;
4691 flags = tb->flags;
4692 tb_phys_invalidate(tb, -1);
4693 /* FIXME: In theory this could raise an exception. In practice
4694 we have already translated the block once so it's probably ok. */
4695 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004696 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004697 the first in the TB) then we end up generating a whole new TB and
4698 repeating the fault, which is horribly inefficient.
4699 Better would be to execute just this insn uncached, or generate a
4700 second new TB. */
4701 cpu_resume_from_signal(env, NULL);
4702}
4703
Paul Brookb3755a92010-03-12 16:54:58 +00004704#if !defined(CONFIG_USER_ONLY)
4705
Stefan Weil055403b2010-10-22 23:03:32 +02004706void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004707{
4708 int i, target_code_size, max_target_code_size;
4709 int direct_jmp_count, direct_jmp2_count, cross_page;
4710 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004711
bellarde3db7222005-01-26 22:00:47 +00004712 target_code_size = 0;
4713 max_target_code_size = 0;
4714 cross_page = 0;
4715 direct_jmp_count = 0;
4716 direct_jmp2_count = 0;
4717 for(i = 0; i < nb_tbs; i++) {
4718 tb = &tbs[i];
4719 target_code_size += tb->size;
4720 if (tb->size > max_target_code_size)
4721 max_target_code_size = tb->size;
4722 if (tb->page_addr[1] != -1)
4723 cross_page++;
4724 if (tb->tb_next_offset[0] != 0xffff) {
4725 direct_jmp_count++;
4726 if (tb->tb_next_offset[1] != 0xffff) {
4727 direct_jmp2_count++;
4728 }
4729 }
4730 }
4731 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004732 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004733 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004734 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4735 cpu_fprintf(f, "TB count %d/%d\n",
4736 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004737 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004738 nb_tbs ? target_code_size / nb_tbs : 0,
4739 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004740 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004741 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4742 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004743 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4744 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004745 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4746 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004747 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004748 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4749 direct_jmp2_count,
4750 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004751 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004752 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4753 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4754 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004755 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004756}
4757
bellard61382a52003-10-27 21:22:23 +00004758#define MMUSUFFIX _cmmu
4759#define GETPC() NULL
4760#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004761#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004762
4763#define SHIFT 0
4764#include "softmmu_template.h"
4765
4766#define SHIFT 1
4767#include "softmmu_template.h"
4768
4769#define SHIFT 2
4770#include "softmmu_template.h"
4771
4772#define SHIFT 3
4773#include "softmmu_template.h"
4774
4775#undef env
4776
4777#endif