blob: 719fff9a915f07d0ff2f7c313bceacb6a16332ab [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
bellardfd6ce8f2003-05-14 19:00:11 +000060//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000061//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000062//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000063//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000064
65/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000066//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000068
ths1196be32007-03-17 15:17:58 +000069//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000070//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000071
pbrook99773bd2006-04-16 15:14:59 +000072#if !defined(CONFIG_USER_ONLY)
73/* TB consistency checks only implemented for usermode emulation. */
74#undef DEBUG_TB_CHECK
75#endif
76
bellard9fa3e852004-01-04 18:06:42 +000077#define SMC_BITMAP_USE_THRESHOLD 10
78
blueswir1bdaf78e2008-10-04 07:24:27 +000079static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020080static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000081TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000082static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000083/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050084spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000085
blueswir1141ac462008-07-26 15:05:57 +000086#if defined(__arm__) || defined(__sparc_v9__)
87/* The prologue must be reachable with a direct jump. ARM and Sparc64
88 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000089 section close to code segment. */
90#define code_gen_section \
91 __attribute__((__section__(".gen_code"))) \
92 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020093#elif defined(_WIN32)
94/* Maximum alignment for Win32 is 16. */
95#define code_gen_section \
96 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000097#else
98#define code_gen_section \
99 __attribute__((aligned (32)))
100#endif
101
102uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000103static uint8_t *code_gen_buffer;
104static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000105/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200107static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000108
pbrooke2eef172008-06-08 01:09:01 +0000109#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000110int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000111static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000112
Alex Williamsonf471a172010-06-11 11:11:42 -0600113RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
Avi Kivity62152b82011-07-26 14:26:14 +0300114
115static MemoryRegion *system_memory;
116
pbrooke2eef172008-06-08 01:09:01 +0000117#endif
bellard9fa3e852004-01-04 18:06:42 +0000118
bellard6a00d602005-11-21 23:25:50 +0000119CPUState *first_cpu;
120/* current CPU in the current thread. It is only valid inside
121 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000122CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000123/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000124 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000125 2 = Adaptive rate instruction counting. */
126int use_icount = 0;
127/* Current instruction counter. While executing translated code this may
128 include some instructions that have not yet been executed. */
129int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000130
bellard54936002003-05-13 00:25:15 +0000131typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000132 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000133 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000134 /* in order to optimize self modifying code, we count the number
135 of lookups we do to a given page to use a bitmap */
136 unsigned int code_write_count;
137 uint8_t *code_bitmap;
138#if defined(CONFIG_USER_ONLY)
139 unsigned long flags;
140#endif
bellard54936002003-05-13 00:25:15 +0000141} PageDesc;
142
Paul Brook41c1b1c2010-03-12 16:54:58 +0000143/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800144 while in user mode we want it to be based on virtual addresses. */
145#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000146#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
147# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
148#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800149# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000151#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800152# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000153#endif
bellard54936002003-05-13 00:25:15 +0000154
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155/* Size of the L2 (and L3, etc) page tables. */
156#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000157#define L2_SIZE (1 << L2_BITS)
158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* The bits remaining after N lower levels of page tables. */
160#define P_L1_BITS_REM \
161 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
162#define V_L1_BITS_REM \
163 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
164
165/* Size of the L1 page table. Avoid silly small sizes. */
166#if P_L1_BITS_REM < 4
167#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
168#else
169#define P_L1_BITS P_L1_BITS_REM
170#endif
171
172#if V_L1_BITS_REM < 4
173#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
174#else
175#define V_L1_BITS V_L1_BITS_REM
176#endif
177
178#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
179#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
180
181#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
182#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
183
bellard83fb7ad2004-07-05 21:25:26 +0000184unsigned long qemu_real_host_page_size;
185unsigned long qemu_host_page_bits;
186unsigned long qemu_host_page_size;
187unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000188
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800189/* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000192
pbrooke2eef172008-06-08 01:09:01 +0000193#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000194typedef struct PhysPageDesc {
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset;
197 ram_addr_t region_offset;
198} PhysPageDesc;
199
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800200/* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000203
pbrooke2eef172008-06-08 01:09:01 +0000204static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300205static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000206
bellard33417e72003-08-10 21:47:01 +0000207/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000208CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
209CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000210void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000211static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000212static int io_mem_watch;
213#endif
bellard33417e72003-08-10 21:47:01 +0000214
bellard34865132003-10-05 14:28:56 +0000215/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200216#ifdef WIN32
217static const char *logfilename = "qemu.log";
218#else
blueswir1d9b630f2008-10-05 09:57:08 +0000219static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200220#endif
bellard34865132003-10-05 14:28:56 +0000221FILE *logfile;
222int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000223static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000224
bellarde3db7222005-01-26 22:00:47 +0000225/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000226#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000227static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000228#endif
bellarde3db7222005-01-26 22:00:47 +0000229static int tb_flush_count;
230static int tb_phys_invalidate_count;
231
bellard7cb69ca2008-05-10 10:55:51 +0000232#ifdef _WIN32
233static void map_exec(void *addr, long size)
234{
235 DWORD old_protect;
236 VirtualProtect(addr, size,
237 PAGE_EXECUTE_READWRITE, &old_protect);
238
239}
240#else
241static void map_exec(void *addr, long size)
242{
bellard43694152008-05-29 09:35:57 +0000243 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000244
bellard43694152008-05-29 09:35:57 +0000245 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000246 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000247 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000248
249 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000250 end += page_size - 1;
251 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000252
253 mprotect((void *)start, end - start,
254 PROT_READ | PROT_WRITE | PROT_EXEC);
255}
256#endif
257
bellardb346ff42003-06-15 20:05:50 +0000258static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000259{
bellard83fb7ad2004-07-05 21:25:26 +0000260 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000261 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000262#ifdef _WIN32
263 {
264 SYSTEM_INFO system_info;
265
266 GetSystemInfo(&system_info);
267 qemu_real_host_page_size = system_info.dwPageSize;
268 }
269#else
270 qemu_real_host_page_size = getpagesize();
271#endif
bellard83fb7ad2004-07-05 21:25:26 +0000272 if (qemu_host_page_size == 0)
273 qemu_host_page_size = qemu_real_host_page_size;
274 if (qemu_host_page_size < TARGET_PAGE_SIZE)
275 qemu_host_page_size = TARGET_PAGE_SIZE;
276 qemu_host_page_bits = 0;
277 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
278 qemu_host_page_bits++;
279 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000280
Paul Brook2e9a5712010-05-05 16:32:59 +0100281#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000282 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100283#ifdef HAVE_KINFO_GETVMMAP
284 struct kinfo_vmentry *freep;
285 int i, cnt;
286
287 freep = kinfo_getvmmap(getpid(), &cnt);
288 if (freep) {
289 mmap_lock();
290 for (i = 0; i < cnt; i++) {
291 unsigned long startaddr, endaddr;
292
293 startaddr = freep[i].kve_start;
294 endaddr = freep[i].kve_end;
295 if (h2g_valid(startaddr)) {
296 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
297
298 if (h2g_valid(endaddr)) {
299 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200300 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100301 } else {
302#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
303 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200304 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100305#endif
306 }
307 }
308 }
309 free(freep);
310 mmap_unlock();
311 }
312#else
balrog50a95692007-12-12 01:16:23 +0000313 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000314
pbrook07765902008-05-31 16:33:53 +0000315 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800316
Aurelien Jarnofd436902010-04-10 17:20:36 +0200317 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000318 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800319 mmap_lock();
320
balrog50a95692007-12-12 01:16:23 +0000321 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800322 unsigned long startaddr, endaddr;
323 int n;
324
325 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
326
327 if (n == 2 && h2g_valid(startaddr)) {
328 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
329
330 if (h2g_valid(endaddr)) {
331 endaddr = h2g(endaddr);
332 } else {
333 endaddr = ~0ul;
334 }
335 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000336 }
337 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800338
balrog50a95692007-12-12 01:16:23 +0000339 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800340 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000341 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100342#endif
balrog50a95692007-12-12 01:16:23 +0000343 }
344#endif
bellard54936002003-05-13 00:25:15 +0000345}
346
Paul Brook41c1b1c2010-03-12 16:54:58 +0000347static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000348{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000349 PageDesc *pd;
350 void **lp;
351 int i;
352
pbrook17e23772008-06-09 13:47:45 +0000353#if defined(CONFIG_USER_ONLY)
Paul Brook2e9a5712010-05-05 16:32:59 +0100354 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800355# define ALLOC(P, SIZE) \
356 do { \
357 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
358 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000360#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800361# define ALLOC(P, SIZE) \
362 do { P = qemu_mallocz(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000363#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800364
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800365 /* Level 1. Always allocated. */
366 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
367
368 /* Level 2..N-1. */
369 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
370 void **p = *lp;
371
372 if (p == NULL) {
373 if (!alloc) {
374 return NULL;
375 }
376 ALLOC(p, sizeof(void *) * L2_SIZE);
377 *lp = p;
378 }
379
380 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000381 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800382
383 pd = *lp;
384 if (pd == NULL) {
385 if (!alloc) {
386 return NULL;
387 }
388 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
389 *lp = pd;
390 }
391
392#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800393
394 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000395}
396
Paul Brook41c1b1c2010-03-12 16:54:58 +0000397static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000398{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800399 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000400}
401
Paul Brook6d9a1302010-02-28 23:55:53 +0000402#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500403static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000404{
pbrooke3f4e2a2006-04-08 20:02:06 +0000405 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800406 void **lp;
407 int i;
bellard92e873b2004-05-21 14:52:29 +0000408
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800409 /* Level 1. Always allocated. */
410 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000411
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800412 /* Level 2..N-1. */
413 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
414 void **p = *lp;
415 if (p == NULL) {
416 if (!alloc) {
417 return NULL;
418 }
419 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
420 }
421 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000422 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800423
pbrooke3f4e2a2006-04-08 20:02:06 +0000424 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800425 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000426 int i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800427
428 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000429 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800430 }
431
432 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
433
pbrook67c4d232009-02-23 13:16:07 +0000434 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800435 pd[i].phys_offset = IO_MEM_UNASSIGNED;
436 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000437 }
bellard92e873b2004-05-21 14:52:29 +0000438 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800439
440 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000441}
442
Anthony Liguoric227f092009-10-01 16:12:16 -0500443static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000444{
bellard108c49b2005-07-24 12:55:09 +0000445 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000446}
447
Anthony Liguoric227f092009-10-01 16:12:16 -0500448static void tlb_protect_code(ram_addr_t ram_addr);
449static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000450 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000451#define mmap_lock() do { } while(0)
452#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000453#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000454
bellard43694152008-05-29 09:35:57 +0000455#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
456
457#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100458/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000459 user mode. It will change when a dedicated libc will be used */
460#define USE_STATIC_CODE_GEN_BUFFER
461#endif
462
463#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200464static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
465 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000466#endif
467
blueswir18fcd3692008-08-17 20:26:25 +0000468static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000469{
bellard43694152008-05-29 09:35:57 +0000470#ifdef USE_STATIC_CODE_GEN_BUFFER
471 code_gen_buffer = static_code_gen_buffer;
472 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
473 map_exec(code_gen_buffer, code_gen_buffer_size);
474#else
bellard26a5f132008-05-28 12:30:31 +0000475 code_gen_buffer_size = tb_size;
476 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000477#if defined(CONFIG_USER_ONLY)
478 /* in user mode, phys_ram_size is not meaningful */
479 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
480#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100481 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000482 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000483#endif
bellard26a5f132008-05-28 12:30:31 +0000484 }
485 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
486 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
487 /* The code gen buffer location may have constraints depending on
488 the host cpu and OS */
489#if defined(__linux__)
490 {
491 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000492 void *start = NULL;
493
bellard26a5f132008-05-28 12:30:31 +0000494 flags = MAP_PRIVATE | MAP_ANONYMOUS;
495#if defined(__x86_64__)
496 flags |= MAP_32BIT;
497 /* Cannot map more than that */
498 if (code_gen_buffer_size > (800 * 1024 * 1024))
499 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000500#elif defined(__sparc_v9__)
501 // Map the buffer below 2G, so we can use direct calls and branches
502 flags |= MAP_FIXED;
503 start = (void *) 0x60000000UL;
504 if (code_gen_buffer_size > (512 * 1024 * 1024))
505 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000506#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000507 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000508 flags |= MAP_FIXED;
509 start = (void *) 0x01000000UL;
510 if (code_gen_buffer_size > 16 * 1024 * 1024)
511 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700512#elif defined(__s390x__)
513 /* Map the buffer so that we can use direct calls and branches. */
514 /* We have a +- 4GB range on the branches; leave some slop. */
515 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
516 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
517 }
518 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000519#endif
blueswir1141ac462008-07-26 15:05:57 +0000520 code_gen_buffer = mmap(start, code_gen_buffer_size,
521 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000522 flags, -1, 0);
523 if (code_gen_buffer == MAP_FAILED) {
524 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
525 exit(1);
526 }
527 }
Bradcbb608a2010-12-20 21:25:40 -0500528#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000529 || defined(__DragonFly__) || defined(__OpenBSD__) \
530 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000531 {
532 int flags;
533 void *addr = NULL;
534 flags = MAP_PRIVATE | MAP_ANONYMOUS;
535#if defined(__x86_64__)
536 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
537 * 0x40000000 is free */
538 flags |= MAP_FIXED;
539 addr = (void *)0x40000000;
540 /* Cannot map more than that */
541 if (code_gen_buffer_size > (800 * 1024 * 1024))
542 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000543#elif defined(__sparc_v9__)
544 // Map the buffer below 2G, so we can use direct calls and branches
545 flags |= MAP_FIXED;
546 addr = (void *) 0x60000000UL;
547 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
548 code_gen_buffer_size = (512 * 1024 * 1024);
549 }
aliguori06e67a82008-09-27 15:32:41 +0000550#endif
551 code_gen_buffer = mmap(addr, code_gen_buffer_size,
552 PROT_WRITE | PROT_READ | PROT_EXEC,
553 flags, -1, 0);
554 if (code_gen_buffer == MAP_FAILED) {
555 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
556 exit(1);
557 }
558 }
bellard26a5f132008-05-28 12:30:31 +0000559#else
560 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000561 map_exec(code_gen_buffer, code_gen_buffer_size);
562#endif
bellard43694152008-05-29 09:35:57 +0000563#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000564 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100565 code_gen_buffer_max_size = code_gen_buffer_size -
566 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000567 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
568 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
569}
570
571/* Must be called before using the QEMU cpus. 'tb_size' is the size
572 (in bytes) allocated to the translation buffer. Zero means default
573 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200574void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000575{
bellard26a5f132008-05-28 12:30:31 +0000576 cpu_gen_init();
577 code_gen_alloc(tb_size);
578 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000579 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700580#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
581 /* There's no guest base to take into account, so go ahead and
582 initialize the prologue now. */
583 tcg_prologue_init(&tcg_ctx);
584#endif
bellard26a5f132008-05-28 12:30:31 +0000585}
586
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200587bool tcg_enabled(void)
588{
589 return code_gen_buffer != NULL;
590}
591
592void cpu_exec_init_all(void)
593{
594#if !defined(CONFIG_USER_ONLY)
595 memory_map_init();
596 io_mem_init();
597#endif
598}
599
pbrook9656f322008-07-01 20:01:19 +0000600#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
601
Juan Quintelae59fb372009-09-29 22:48:21 +0200602static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200603{
604 CPUState *env = opaque;
605
aurel323098dba2009-03-07 21:28:24 +0000606 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
607 version_id is increased. */
608 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000609 tlb_flush(env, 1);
610
611 return 0;
612}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200613
614static const VMStateDescription vmstate_cpu_common = {
615 .name = "cpu_common",
616 .version_id = 1,
617 .minimum_version_id = 1,
618 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200619 .post_load = cpu_common_post_load,
620 .fields = (VMStateField []) {
621 VMSTATE_UINT32(halted, CPUState),
622 VMSTATE_UINT32(interrupt_request, CPUState),
623 VMSTATE_END_OF_LIST()
624 }
625};
pbrook9656f322008-07-01 20:01:19 +0000626#endif
627
Glauber Costa950f1472009-06-09 12:15:18 -0400628CPUState *qemu_get_cpu(int cpu)
629{
630 CPUState *env = first_cpu;
631
632 while (env) {
633 if (env->cpu_index == cpu)
634 break;
635 env = env->next_cpu;
636 }
637
638 return env;
639}
640
bellard6a00d602005-11-21 23:25:50 +0000641void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000642{
bellard6a00d602005-11-21 23:25:50 +0000643 CPUState **penv;
644 int cpu_index;
645
pbrookc2764712009-03-07 15:24:59 +0000646#if defined(CONFIG_USER_ONLY)
647 cpu_list_lock();
648#endif
bellard6a00d602005-11-21 23:25:50 +0000649 env->next_cpu = NULL;
650 penv = &first_cpu;
651 cpu_index = 0;
652 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700653 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000654 cpu_index++;
655 }
656 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000657 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000658 QTAILQ_INIT(&env->breakpoints);
659 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100660#ifndef CONFIG_USER_ONLY
661 env->thread_id = qemu_get_thread_id();
662#endif
bellard6a00d602005-11-21 23:25:50 +0000663 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000664#if defined(CONFIG_USER_ONLY)
665 cpu_list_unlock();
666#endif
pbrookb3c77242008-06-30 16:31:04 +0000667#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600668 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
669 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000670 cpu_save, cpu_load, env);
671#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000672}
673
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100674/* Allocate a new translation block. Flush the translation buffer if
675 too many translation blocks or too much generated code. */
676static TranslationBlock *tb_alloc(target_ulong pc)
677{
678 TranslationBlock *tb;
679
680 if (nb_tbs >= code_gen_max_blocks ||
681 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
682 return NULL;
683 tb = &tbs[nb_tbs++];
684 tb->pc = pc;
685 tb->cflags = 0;
686 return tb;
687}
688
689void tb_free(TranslationBlock *tb)
690{
691 /* In practice this is mostly used for single use temporary TB
692 Ignore the hard cases and just back up if this TB happens to
693 be the last one generated. */
694 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
695 code_gen_ptr = tb->tc_ptr;
696 nb_tbs--;
697 }
698}
699
bellard9fa3e852004-01-04 18:06:42 +0000700static inline void invalidate_page_bitmap(PageDesc *p)
701{
702 if (p->code_bitmap) {
bellard59817cc2004-02-16 22:01:13 +0000703 qemu_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000704 p->code_bitmap = NULL;
705 }
706 p->code_write_count = 0;
707}
708
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800709/* Set to NULL all the 'first_tb' fields in all PageDescs. */
710
711static void page_flush_tb_1 (int level, void **lp)
712{
713 int i;
714
715 if (*lp == NULL) {
716 return;
717 }
718 if (level == 0) {
719 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000720 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800721 pd[i].first_tb = NULL;
722 invalidate_page_bitmap(pd + i);
723 }
724 } else {
725 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000726 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800727 page_flush_tb_1 (level - 1, pp + i);
728 }
729 }
730}
731
bellardfd6ce8f2003-05-14 19:00:11 +0000732static void page_flush_tb(void)
733{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800734 int i;
735 for (i = 0; i < V_L1_SIZE; i++) {
736 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000737 }
738}
739
740/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000741/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000742void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000743{
bellard6a00d602005-11-21 23:25:50 +0000744 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000745#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000746 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
747 (unsigned long)(code_gen_ptr - code_gen_buffer),
748 nb_tbs, nb_tbs > 0 ?
749 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000750#endif
bellard26a5f132008-05-28 12:30:31 +0000751 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000752 cpu_abort(env1, "Internal error: code buffer overflow\n");
753
bellardfd6ce8f2003-05-14 19:00:11 +0000754 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000755
bellard6a00d602005-11-21 23:25:50 +0000756 for(env = first_cpu; env != NULL; env = env->next_cpu) {
757 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
758 }
bellard9fa3e852004-01-04 18:06:42 +0000759
bellard8a8a6082004-10-03 13:36:49 +0000760 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000761 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000762
bellardfd6ce8f2003-05-14 19:00:11 +0000763 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000764 /* XXX: flush processor icache at this point if cache flush is
765 expensive */
bellarde3db7222005-01-26 22:00:47 +0000766 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000767}
768
769#ifdef DEBUG_TB_CHECK
770
j_mayerbc98a7e2007-04-04 07:55:12 +0000771static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000772{
773 TranslationBlock *tb;
774 int i;
775 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000776 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
777 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000778 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
779 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000780 printf("ERROR invalidate: address=" TARGET_FMT_lx
781 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000782 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000783 }
784 }
785 }
786}
787
788/* verify that all the pages have correct rights for code */
789static void tb_page_check(void)
790{
791 TranslationBlock *tb;
792 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000793
pbrook99773bd2006-04-16 15:14:59 +0000794 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
795 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000796 flags1 = page_get_flags(tb->pc);
797 flags2 = page_get_flags(tb->pc + tb->size - 1);
798 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
799 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000800 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000801 }
802 }
803 }
804}
805
806#endif
807
808/* invalidate one TB */
809static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
810 int next_offset)
811{
812 TranslationBlock *tb1;
813 for(;;) {
814 tb1 = *ptb;
815 if (tb1 == tb) {
816 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
817 break;
818 }
819 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
820 }
821}
822
bellard9fa3e852004-01-04 18:06:42 +0000823static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
824{
825 TranslationBlock *tb1;
826 unsigned int n1;
827
828 for(;;) {
829 tb1 = *ptb;
830 n1 = (long)tb1 & 3;
831 tb1 = (TranslationBlock *)((long)tb1 & ~3);
832 if (tb1 == tb) {
833 *ptb = tb1->page_next[n1];
834 break;
835 }
836 ptb = &tb1->page_next[n1];
837 }
838}
839
bellardd4e81642003-05-25 16:46:15 +0000840static inline void tb_jmp_remove(TranslationBlock *tb, int n)
841{
842 TranslationBlock *tb1, **ptb;
843 unsigned int n1;
844
845 ptb = &tb->jmp_next[n];
846 tb1 = *ptb;
847 if (tb1) {
848 /* find tb(n) in circular list */
849 for(;;) {
850 tb1 = *ptb;
851 n1 = (long)tb1 & 3;
852 tb1 = (TranslationBlock *)((long)tb1 & ~3);
853 if (n1 == n && tb1 == tb)
854 break;
855 if (n1 == 2) {
856 ptb = &tb1->jmp_first;
857 } else {
858 ptb = &tb1->jmp_next[n1];
859 }
860 }
861 /* now we can suppress tb(n) from the list */
862 *ptb = tb->jmp_next[n];
863
864 tb->jmp_next[n] = NULL;
865 }
866}
867
868/* reset the jump entry 'n' of a TB so that it is not chained to
869 another TB */
870static inline void tb_reset_jump(TranslationBlock *tb, int n)
871{
872 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
873}
874
Paul Brook41c1b1c2010-03-12 16:54:58 +0000875void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000876{
bellard6a00d602005-11-21 23:25:50 +0000877 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000878 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000879 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000880 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000881 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000882
bellard9fa3e852004-01-04 18:06:42 +0000883 /* remove the TB from the hash list */
884 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
885 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000886 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000887 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000888
bellard9fa3e852004-01-04 18:06:42 +0000889 /* remove the TB from the page list */
890 if (tb->page_addr[0] != page_addr) {
891 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
892 tb_page_remove(&p->first_tb, tb);
893 invalidate_page_bitmap(p);
894 }
895 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
896 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
897 tb_page_remove(&p->first_tb, tb);
898 invalidate_page_bitmap(p);
899 }
900
bellard8a40a182005-11-20 10:35:40 +0000901 tb_invalidated_flag = 1;
902
903 /* remove the TB from the hash list */
904 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000905 for(env = first_cpu; env != NULL; env = env->next_cpu) {
906 if (env->tb_jmp_cache[h] == tb)
907 env->tb_jmp_cache[h] = NULL;
908 }
bellard8a40a182005-11-20 10:35:40 +0000909
910 /* suppress this TB from the two jump lists */
911 tb_jmp_remove(tb, 0);
912 tb_jmp_remove(tb, 1);
913
914 /* suppress any remaining jumps to this TB */
915 tb1 = tb->jmp_first;
916 for(;;) {
917 n1 = (long)tb1 & 3;
918 if (n1 == 2)
919 break;
920 tb1 = (TranslationBlock *)((long)tb1 & ~3);
921 tb2 = tb1->jmp_next[n1];
922 tb_reset_jump(tb1, n1);
923 tb1->jmp_next[n1] = NULL;
924 tb1 = tb2;
925 }
926 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
927
bellarde3db7222005-01-26 22:00:47 +0000928 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000929}
930
931static inline void set_bits(uint8_t *tab, int start, int len)
932{
933 int end, mask, end1;
934
935 end = start + len;
936 tab += start >> 3;
937 mask = 0xff << (start & 7);
938 if ((start & ~7) == (end & ~7)) {
939 if (start < end) {
940 mask &= ~(0xff << (end & 7));
941 *tab |= mask;
942 }
943 } else {
944 *tab++ |= mask;
945 start = (start + 8) & ~7;
946 end1 = end & ~7;
947 while (start < end1) {
948 *tab++ = 0xff;
949 start += 8;
950 }
951 if (start < end) {
952 mask = ~(0xff << (end & 7));
953 *tab |= mask;
954 }
955 }
956}
957
958static void build_page_bitmap(PageDesc *p)
959{
960 int n, tb_start, tb_end;
961 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000962
pbrookb2a70812008-06-09 13:57:23 +0000963 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000964
965 tb = p->first_tb;
966 while (tb != NULL) {
967 n = (long)tb & 3;
968 tb = (TranslationBlock *)((long)tb & ~3);
969 /* NOTE: this is subtle as a TB may span two physical pages */
970 if (n == 0) {
971 /* NOTE: tb_end may be after the end of the page, but
972 it is not a problem */
973 tb_start = tb->pc & ~TARGET_PAGE_MASK;
974 tb_end = tb_start + tb->size;
975 if (tb_end > TARGET_PAGE_SIZE)
976 tb_end = TARGET_PAGE_SIZE;
977 } else {
978 tb_start = 0;
979 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
980 }
981 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
982 tb = tb->page_next[n];
983 }
984}
985
pbrook2e70f6e2008-06-29 01:03:05 +0000986TranslationBlock *tb_gen_code(CPUState *env,
987 target_ulong pc, target_ulong cs_base,
988 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000989{
990 TranslationBlock *tb;
991 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000992 tb_page_addr_t phys_pc, phys_page2;
993 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000994 int code_gen_size;
995
Paul Brook41c1b1c2010-03-12 16:54:58 +0000996 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000997 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000998 if (!tb) {
999 /* flush must be done */
1000 tb_flush(env);
1001 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001002 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001003 /* Don't forget to invalidate previous TB info. */
1004 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001005 }
1006 tc_ptr = code_gen_ptr;
1007 tb->tc_ptr = tc_ptr;
1008 tb->cs_base = cs_base;
1009 tb->flags = flags;
1010 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001011 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001012 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001013
bellardd720b932004-04-25 17:57:43 +00001014 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001015 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001016 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001017 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001018 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001019 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001020 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001021 return tb;
bellardd720b932004-04-25 17:57:43 +00001022}
ths3b46e622007-09-17 08:09:54 +00001023
bellard9fa3e852004-01-04 18:06:42 +00001024/* invalidate all TBs which intersect with the target physical page
1025 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001026 the same physical page. 'is_cpu_write_access' should be true if called
1027 from a real cpu write access: the virtual CPU will exit the current
1028 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001029void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001030 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001031{
aliguori6b917542008-11-18 19:46:41 +00001032 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001033 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001034 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001035 PageDesc *p;
1036 int n;
1037#ifdef TARGET_HAS_PRECISE_SMC
1038 int current_tb_not_found = is_cpu_write_access;
1039 TranslationBlock *current_tb = NULL;
1040 int current_tb_modified = 0;
1041 target_ulong current_pc = 0;
1042 target_ulong current_cs_base = 0;
1043 int current_flags = 0;
1044#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001045
1046 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001047 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001048 return;
ths5fafdf22007-09-16 21:08:06 +00001049 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001050 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1051 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001052 /* build code bitmap */
1053 build_page_bitmap(p);
1054 }
1055
1056 /* we remove all the TBs in the range [start, end[ */
1057 /* XXX: see if in some cases it could be faster to invalidate all the code */
1058 tb = p->first_tb;
1059 while (tb != NULL) {
1060 n = (long)tb & 3;
1061 tb = (TranslationBlock *)((long)tb & ~3);
1062 tb_next = tb->page_next[n];
1063 /* NOTE: this is subtle as a TB may span two physical pages */
1064 if (n == 0) {
1065 /* NOTE: tb_end may be after the end of the page, but
1066 it is not a problem */
1067 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1068 tb_end = tb_start + tb->size;
1069 } else {
1070 tb_start = tb->page_addr[1];
1071 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1072 }
1073 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001074#ifdef TARGET_HAS_PRECISE_SMC
1075 if (current_tb_not_found) {
1076 current_tb_not_found = 0;
1077 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001078 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001079 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001080 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001081 }
1082 }
1083 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001084 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001085 /* If we are modifying the current TB, we must stop
1086 its execution. We could be more precise by checking
1087 that the modification is after the current PC, but it
1088 would require a specialized function to partially
1089 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001090
bellardd720b932004-04-25 17:57:43 +00001091 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001092 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001093 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1094 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001095 }
1096#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001097 /* we need to do that to handle the case where a signal
1098 occurs while doing tb_phys_invalidate() */
1099 saved_tb = NULL;
1100 if (env) {
1101 saved_tb = env->current_tb;
1102 env->current_tb = NULL;
1103 }
bellard9fa3e852004-01-04 18:06:42 +00001104 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001105 if (env) {
1106 env->current_tb = saved_tb;
1107 if (env->interrupt_request && env->current_tb)
1108 cpu_interrupt(env, env->interrupt_request);
1109 }
bellard9fa3e852004-01-04 18:06:42 +00001110 }
1111 tb = tb_next;
1112 }
1113#if !defined(CONFIG_USER_ONLY)
1114 /* if no code remaining, no need to continue to use slow writes */
1115 if (!p->first_tb) {
1116 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001117 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001118 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001119 }
1120 }
1121#endif
1122#ifdef TARGET_HAS_PRECISE_SMC
1123 if (current_tb_modified) {
1124 /* we generate a block containing just the instruction
1125 modifying the memory. It will ensure that it cannot modify
1126 itself */
bellardea1c1802004-06-14 18:56:36 +00001127 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001128 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001129 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001130 }
1131#endif
1132}
1133
1134/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001135static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001136{
1137 PageDesc *p;
1138 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001139#if 0
bellarda4193c82004-06-03 14:01:43 +00001140 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001141 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1142 cpu_single_env->mem_io_vaddr, len,
1143 cpu_single_env->eip,
1144 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001145 }
1146#endif
bellard9fa3e852004-01-04 18:06:42 +00001147 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001148 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001149 return;
1150 if (p->code_bitmap) {
1151 offset = start & ~TARGET_PAGE_MASK;
1152 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1153 if (b & ((1 << len) - 1))
1154 goto do_invalidate;
1155 } else {
1156 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001157 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001158 }
1159}
1160
bellard9fa3e852004-01-04 18:06:42 +00001161#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001162static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001163 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001164{
aliguori6b917542008-11-18 19:46:41 +00001165 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001166 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001167 int n;
bellardd720b932004-04-25 17:57:43 +00001168#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001169 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001170 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001171 int current_tb_modified = 0;
1172 target_ulong current_pc = 0;
1173 target_ulong current_cs_base = 0;
1174 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001175#endif
bellard9fa3e852004-01-04 18:06:42 +00001176
1177 addr &= TARGET_PAGE_MASK;
1178 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001179 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001180 return;
1181 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001182#ifdef TARGET_HAS_PRECISE_SMC
1183 if (tb && pc != 0) {
1184 current_tb = tb_find_pc(pc);
1185 }
1186#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001187 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001188 n = (long)tb & 3;
1189 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001190#ifdef TARGET_HAS_PRECISE_SMC
1191 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001192 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001193 /* If we are modifying the current TB, we must stop
1194 its execution. We could be more precise by checking
1195 that the modification is after the current PC, but it
1196 would require a specialized function to partially
1197 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001198
bellardd720b932004-04-25 17:57:43 +00001199 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001200 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001201 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1202 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001203 }
1204#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001205 tb_phys_invalidate(tb, addr);
1206 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001207 }
1208 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001209#ifdef TARGET_HAS_PRECISE_SMC
1210 if (current_tb_modified) {
1211 /* we generate a block containing just the instruction
1212 modifying the memory. It will ensure that it cannot modify
1213 itself */
bellardea1c1802004-06-14 18:56:36 +00001214 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001215 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001216 cpu_resume_from_signal(env, puc);
1217 }
1218#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001219}
bellard9fa3e852004-01-04 18:06:42 +00001220#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001221
1222/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001223static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001224 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001225{
1226 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001227#ifndef CONFIG_USER_ONLY
1228 bool page_already_protected;
1229#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001230
bellard9fa3e852004-01-04 18:06:42 +00001231 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001232 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001233 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001234#ifndef CONFIG_USER_ONLY
1235 page_already_protected = p->first_tb != NULL;
1236#endif
bellard9fa3e852004-01-04 18:06:42 +00001237 p->first_tb = (TranslationBlock *)((long)tb | n);
1238 invalidate_page_bitmap(p);
1239
bellard107db442004-06-22 18:48:46 +00001240#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001241
bellard9fa3e852004-01-04 18:06:42 +00001242#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001243 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001244 target_ulong addr;
1245 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001246 int prot;
1247
bellardfd6ce8f2003-05-14 19:00:11 +00001248 /* force the host page as non writable (writes will have a
1249 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001250 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001251 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001252 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1253 addr += TARGET_PAGE_SIZE) {
1254
1255 p2 = page_find (addr >> TARGET_PAGE_BITS);
1256 if (!p2)
1257 continue;
1258 prot |= p2->flags;
1259 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001260 }
ths5fafdf22007-09-16 21:08:06 +00001261 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001262 (prot & PAGE_BITS) & ~PAGE_WRITE);
1263#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001264 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001265 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001266#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001267 }
bellard9fa3e852004-01-04 18:06:42 +00001268#else
1269 /* if some code is already present, then the pages are already
1270 protected. So we handle the case where only the first TB is
1271 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001272 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001273 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001274 }
1275#endif
bellardd720b932004-04-25 17:57:43 +00001276
1277#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001278}
1279
bellard9fa3e852004-01-04 18:06:42 +00001280/* add a new TB and link it to the physical page tables. phys_page2 is
1281 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001282void tb_link_page(TranslationBlock *tb,
1283 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001284{
bellard9fa3e852004-01-04 18:06:42 +00001285 unsigned int h;
1286 TranslationBlock **ptb;
1287
pbrookc8a706f2008-06-02 16:16:42 +00001288 /* Grab the mmap lock to stop another thread invalidating this TB
1289 before we are done. */
1290 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001291 /* add in the physical hash table */
1292 h = tb_phys_hash_func(phys_pc);
1293 ptb = &tb_phys_hash[h];
1294 tb->phys_hash_next = *ptb;
1295 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001296
1297 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001298 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1299 if (phys_page2 != -1)
1300 tb_alloc_page(tb, 1, phys_page2);
1301 else
1302 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001303
bellardd4e81642003-05-25 16:46:15 +00001304 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1305 tb->jmp_next[0] = NULL;
1306 tb->jmp_next[1] = NULL;
1307
1308 /* init original jump addresses */
1309 if (tb->tb_next_offset[0] != 0xffff)
1310 tb_reset_jump(tb, 0);
1311 if (tb->tb_next_offset[1] != 0xffff)
1312 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001313
1314#ifdef DEBUG_TB_CHECK
1315 tb_page_check();
1316#endif
pbrookc8a706f2008-06-02 16:16:42 +00001317 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001318}
1319
bellarda513fe12003-05-27 23:29:48 +00001320/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1321 tb[1].tc_ptr. Return NULL if not found */
1322TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1323{
1324 int m_min, m_max, m;
1325 unsigned long v;
1326 TranslationBlock *tb;
1327
1328 if (nb_tbs <= 0)
1329 return NULL;
1330 if (tc_ptr < (unsigned long)code_gen_buffer ||
1331 tc_ptr >= (unsigned long)code_gen_ptr)
1332 return NULL;
1333 /* binary search (cf Knuth) */
1334 m_min = 0;
1335 m_max = nb_tbs - 1;
1336 while (m_min <= m_max) {
1337 m = (m_min + m_max) >> 1;
1338 tb = &tbs[m];
1339 v = (unsigned long)tb->tc_ptr;
1340 if (v == tc_ptr)
1341 return tb;
1342 else if (tc_ptr < v) {
1343 m_max = m - 1;
1344 } else {
1345 m_min = m + 1;
1346 }
ths5fafdf22007-09-16 21:08:06 +00001347 }
bellarda513fe12003-05-27 23:29:48 +00001348 return &tbs[m_max];
1349}
bellard75012672003-06-21 13:11:07 +00001350
bellardea041c02003-06-25 16:16:50 +00001351static void tb_reset_jump_recursive(TranslationBlock *tb);
1352
1353static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1354{
1355 TranslationBlock *tb1, *tb_next, **ptb;
1356 unsigned int n1;
1357
1358 tb1 = tb->jmp_next[n];
1359 if (tb1 != NULL) {
1360 /* find head of list */
1361 for(;;) {
1362 n1 = (long)tb1 & 3;
1363 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1364 if (n1 == 2)
1365 break;
1366 tb1 = tb1->jmp_next[n1];
1367 }
1368 /* we are now sure now that tb jumps to tb1 */
1369 tb_next = tb1;
1370
1371 /* remove tb from the jmp_first list */
1372 ptb = &tb_next->jmp_first;
1373 for(;;) {
1374 tb1 = *ptb;
1375 n1 = (long)tb1 & 3;
1376 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1377 if (n1 == n && tb1 == tb)
1378 break;
1379 ptb = &tb1->jmp_next[n1];
1380 }
1381 *ptb = tb->jmp_next[n];
1382 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001383
bellardea041c02003-06-25 16:16:50 +00001384 /* suppress the jump to next tb in generated code */
1385 tb_reset_jump(tb, n);
1386
bellard01243112004-01-04 15:48:17 +00001387 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001388 tb_reset_jump_recursive(tb_next);
1389 }
1390}
1391
1392static void tb_reset_jump_recursive(TranslationBlock *tb)
1393{
1394 tb_reset_jump_recursive2(tb, 0);
1395 tb_reset_jump_recursive2(tb, 1);
1396}
1397
bellard1fddef42005-04-17 19:16:13 +00001398#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001399#if defined(CONFIG_USER_ONLY)
1400static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1401{
1402 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1403}
1404#else
bellardd720b932004-04-25 17:57:43 +00001405static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1406{
Anthony Liguoric227f092009-10-01 16:12:16 -05001407 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001408 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001409 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001410 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001411
pbrookc2f07f82006-04-08 17:14:56 +00001412 addr = cpu_get_phys_page_debug(env, pc);
1413 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1414 if (!p) {
1415 pd = IO_MEM_UNASSIGNED;
1416 } else {
1417 pd = p->phys_offset;
1418 }
1419 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001420 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001421}
bellardc27004e2005-01-03 23:35:10 +00001422#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001423#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001424
Paul Brookc527ee82010-03-01 03:31:14 +00001425#if defined(CONFIG_USER_ONLY)
1426void cpu_watchpoint_remove_all(CPUState *env, int mask)
1427
1428{
1429}
1430
1431int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1432 int flags, CPUWatchpoint **watchpoint)
1433{
1434 return -ENOSYS;
1435}
1436#else
pbrook6658ffb2007-03-16 23:58:11 +00001437/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001438int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1439 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001440{
aliguorib4051332008-11-18 20:14:20 +00001441 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001442 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001443
aliguorib4051332008-11-18 20:14:20 +00001444 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1445 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1446 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1447 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1448 return -EINVAL;
1449 }
aliguoria1d1bb32008-11-18 20:07:32 +00001450 wp = qemu_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001451
aliguoria1d1bb32008-11-18 20:07:32 +00001452 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001453 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001454 wp->flags = flags;
1455
aliguori2dc9f412008-11-18 20:56:59 +00001456 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001457 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001458 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001459 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001460 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001461
pbrook6658ffb2007-03-16 23:58:11 +00001462 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001463
1464 if (watchpoint)
1465 *watchpoint = wp;
1466 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001467}
1468
aliguoria1d1bb32008-11-18 20:07:32 +00001469/* Remove a specific watchpoint. */
1470int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1471 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001472{
aliguorib4051332008-11-18 20:14:20 +00001473 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001474 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001475
Blue Swirl72cf2d42009-09-12 07:36:22 +00001476 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001477 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001478 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001479 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001480 return 0;
1481 }
1482 }
aliguoria1d1bb32008-11-18 20:07:32 +00001483 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001484}
1485
aliguoria1d1bb32008-11-18 20:07:32 +00001486/* Remove a specific watchpoint by reference. */
1487void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1488{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001489 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001490
aliguoria1d1bb32008-11-18 20:07:32 +00001491 tlb_flush_page(env, watchpoint->vaddr);
1492
1493 qemu_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001494}
1495
aliguoria1d1bb32008-11-18 20:07:32 +00001496/* Remove all matching watchpoints. */
1497void cpu_watchpoint_remove_all(CPUState *env, int mask)
1498{
aliguoric0ce9982008-11-25 22:13:57 +00001499 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001500
Blue Swirl72cf2d42009-09-12 07:36:22 +00001501 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001502 if (wp->flags & mask)
1503 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001504 }
aliguoria1d1bb32008-11-18 20:07:32 +00001505}
Paul Brookc527ee82010-03-01 03:31:14 +00001506#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001507
1508/* Add a breakpoint. */
1509int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1510 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001511{
bellard1fddef42005-04-17 19:16:13 +00001512#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001513 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001514
aliguoria1d1bb32008-11-18 20:07:32 +00001515 bp = qemu_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001516
1517 bp->pc = pc;
1518 bp->flags = flags;
1519
aliguori2dc9f412008-11-18 20:56:59 +00001520 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001521 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001522 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001523 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001524 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001525
1526 breakpoint_invalidate(env, pc);
1527
1528 if (breakpoint)
1529 *breakpoint = bp;
1530 return 0;
1531#else
1532 return -ENOSYS;
1533#endif
1534}
1535
1536/* Remove a specific breakpoint. */
1537int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1538{
1539#if defined(TARGET_HAS_ICE)
1540 CPUBreakpoint *bp;
1541
Blue Swirl72cf2d42009-09-12 07:36:22 +00001542 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001543 if (bp->pc == pc && bp->flags == flags) {
1544 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001545 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001546 }
bellard4c3a88a2003-07-26 12:06:08 +00001547 }
aliguoria1d1bb32008-11-18 20:07:32 +00001548 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001549#else
aliguoria1d1bb32008-11-18 20:07:32 +00001550 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001551#endif
1552}
1553
aliguoria1d1bb32008-11-18 20:07:32 +00001554/* Remove a specific breakpoint by reference. */
1555void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001556{
bellard1fddef42005-04-17 19:16:13 +00001557#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001558 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001559
aliguoria1d1bb32008-11-18 20:07:32 +00001560 breakpoint_invalidate(env, breakpoint->pc);
1561
1562 qemu_free(breakpoint);
1563#endif
1564}
1565
1566/* Remove all matching breakpoints. */
1567void cpu_breakpoint_remove_all(CPUState *env, int mask)
1568{
1569#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001570 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001571
Blue Swirl72cf2d42009-09-12 07:36:22 +00001572 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001573 if (bp->flags & mask)
1574 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001575 }
bellard4c3a88a2003-07-26 12:06:08 +00001576#endif
1577}
1578
bellardc33a3462003-07-29 20:50:33 +00001579/* enable or disable single step mode. EXCP_DEBUG is returned by the
1580 CPU loop after each instruction */
1581void cpu_single_step(CPUState *env, int enabled)
1582{
bellard1fddef42005-04-17 19:16:13 +00001583#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001584 if (env->singlestep_enabled != enabled) {
1585 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001586 if (kvm_enabled())
1587 kvm_update_guest_debug(env, 0);
1588 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001589 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001590 /* XXX: only flush what is necessary */
1591 tb_flush(env);
1592 }
bellardc33a3462003-07-29 20:50:33 +00001593 }
1594#endif
1595}
1596
bellard34865132003-10-05 14:28:56 +00001597/* enable or disable low levels log */
1598void cpu_set_log(int log_flags)
1599{
1600 loglevel = log_flags;
1601 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001602 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001603 if (!logfile) {
1604 perror(logfilename);
1605 _exit(1);
1606 }
bellard9fa3e852004-01-04 18:06:42 +00001607#if !defined(CONFIG_SOFTMMU)
1608 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1609 {
blueswir1b55266b2008-09-20 08:07:15 +00001610 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001611 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1612 }
Filip Navarabf65f532009-07-27 10:02:04 -05001613#elif !defined(_WIN32)
1614 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001615 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001616#endif
pbrooke735b912007-06-30 13:53:24 +00001617 log_append = 1;
1618 }
1619 if (!loglevel && logfile) {
1620 fclose(logfile);
1621 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001622 }
1623}
1624
1625void cpu_set_log_filename(const char *filename)
1626{
1627 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001628 if (logfile) {
1629 fclose(logfile);
1630 logfile = NULL;
1631 }
1632 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001633}
bellardc33a3462003-07-29 20:50:33 +00001634
aurel323098dba2009-03-07 21:28:24 +00001635static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001636{
pbrookd5975362008-06-07 20:50:51 +00001637 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1638 problem and hope the cpu will stop of its own accord. For userspace
1639 emulation this often isn't actually as bad as it sounds. Often
1640 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001641 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001642 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001643
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001644 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001645 tb = env->current_tb;
1646 /* if the cpu is currently executing code, we must unlink it and
1647 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001648 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001649 env->current_tb = NULL;
1650 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001651 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001652 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001653}
1654
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001655#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001656/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001657static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001658{
1659 int old_mask;
1660
1661 old_mask = env->interrupt_request;
1662 env->interrupt_request |= mask;
1663
aliguori8edac962009-04-24 18:03:45 +00001664 /*
1665 * If called from iothread context, wake the target cpu in
1666 * case its halted.
1667 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001668 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001669 qemu_cpu_kick(env);
1670 return;
1671 }
aliguori8edac962009-04-24 18:03:45 +00001672
pbrook2e70f6e2008-06-29 01:03:05 +00001673 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001674 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001675 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001676 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001677 cpu_abort(env, "Raised interrupt while not in I/O function");
1678 }
pbrook2e70f6e2008-06-29 01:03:05 +00001679 } else {
aurel323098dba2009-03-07 21:28:24 +00001680 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001681 }
1682}
1683
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001684CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1685
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001686#else /* CONFIG_USER_ONLY */
1687
1688void cpu_interrupt(CPUState *env, int mask)
1689{
1690 env->interrupt_request |= mask;
1691 cpu_unlink_tb(env);
1692}
1693#endif /* CONFIG_USER_ONLY */
1694
bellardb54ad042004-05-20 13:42:52 +00001695void cpu_reset_interrupt(CPUState *env, int mask)
1696{
1697 env->interrupt_request &= ~mask;
1698}
1699
aurel323098dba2009-03-07 21:28:24 +00001700void cpu_exit(CPUState *env)
1701{
1702 env->exit_request = 1;
1703 cpu_unlink_tb(env);
1704}
1705
blueswir1c7cd6a32008-10-02 18:27:46 +00001706const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001707 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001708 "show generated host assembly code for each compiled TB" },
1709 { CPU_LOG_TB_IN_ASM, "in_asm",
1710 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001711 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001712 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001713 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001714 "show micro ops "
1715#ifdef TARGET_I386
1716 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001717#endif
blueswir1e01a1152008-03-14 17:37:11 +00001718 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001719 { CPU_LOG_INT, "int",
1720 "show interrupts/exceptions in short format" },
1721 { CPU_LOG_EXEC, "exec",
1722 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001723 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001724 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001725#ifdef TARGET_I386
1726 { CPU_LOG_PCALL, "pcall",
1727 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001728 { CPU_LOG_RESET, "cpu_reset",
1729 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001730#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001731#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001732 { CPU_LOG_IOPORT, "ioport",
1733 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001734#endif
bellardf193c792004-03-21 17:06:25 +00001735 { 0, NULL, NULL },
1736};
1737
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001738#ifndef CONFIG_USER_ONLY
1739static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1740 = QLIST_HEAD_INITIALIZER(memory_client_list);
1741
1742static void cpu_notify_set_memory(target_phys_addr_t start_addr,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001743 ram_addr_t size,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001744 ram_addr_t phys_offset,
1745 bool log_dirty)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001746{
1747 CPUPhysMemoryClient *client;
1748 QLIST_FOREACH(client, &memory_client_list, list) {
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001749 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001750 }
1751}
1752
1753static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001754 target_phys_addr_t end)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001755{
1756 CPUPhysMemoryClient *client;
1757 QLIST_FOREACH(client, &memory_client_list, list) {
1758 int r = client->sync_dirty_bitmap(client, start, end);
1759 if (r < 0)
1760 return r;
1761 }
1762 return 0;
1763}
1764
1765static int cpu_notify_migration_log(int enable)
1766{
1767 CPUPhysMemoryClient *client;
1768 QLIST_FOREACH(client, &memory_client_list, list) {
1769 int r = client->migration_log(client, enable);
1770 if (r < 0)
1771 return r;
1772 }
1773 return 0;
1774}
1775
Alex Williamson2173a752011-05-03 12:36:58 -06001776struct last_map {
1777 target_phys_addr_t start_addr;
1778 ram_addr_t size;
1779 ram_addr_t phys_offset;
1780};
1781
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001782/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1783 * address. Each intermediate table provides the next L2_BITs of guest
1784 * physical address space. The number of levels vary based on host and
1785 * guest configuration, making it efficient to build the final guest
1786 * physical address by seeding the L1 offset and shifting and adding in
1787 * each L2 offset as we recurse through them. */
Alex Williamson2173a752011-05-03 12:36:58 -06001788static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1789 void **lp, target_phys_addr_t addr,
1790 struct last_map *map)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001791{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001792 int i;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001793
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001794 if (*lp == NULL) {
1795 return;
1796 }
1797 if (level == 0) {
1798 PhysPageDesc *pd = *lp;
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001799 addr <<= L2_BITS + TARGET_PAGE_BITS;
Paul Brook7296aba2010-03-14 14:58:46 +00001800 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001801 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
Alex Williamson2173a752011-05-03 12:36:58 -06001802 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1803
1804 if (map->size &&
1805 start_addr == map->start_addr + map->size &&
1806 pd[i].phys_offset == map->phys_offset + map->size) {
1807
1808 map->size += TARGET_PAGE_SIZE;
1809 continue;
1810 } else if (map->size) {
1811 client->set_memory(client, map->start_addr,
1812 map->size, map->phys_offset, false);
1813 }
1814
1815 map->start_addr = start_addr;
1816 map->size = TARGET_PAGE_SIZE;
1817 map->phys_offset = pd[i].phys_offset;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001818 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001819 }
1820 } else {
1821 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001822 for (i = 0; i < L2_SIZE; ++i) {
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001823 phys_page_for_each_1(client, level - 1, pp + i,
Alex Williamson2173a752011-05-03 12:36:58 -06001824 (addr << L2_BITS) | i, map);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001825 }
1826 }
1827}
1828
1829static void phys_page_for_each(CPUPhysMemoryClient *client)
1830{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001831 int i;
Alex Williamson2173a752011-05-03 12:36:58 -06001832 struct last_map map = { };
1833
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001834 for (i = 0; i < P_L1_SIZE; ++i) {
1835 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
Alex Williamson2173a752011-05-03 12:36:58 -06001836 l1_phys_map + i, i, &map);
1837 }
1838 if (map.size) {
1839 client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1840 false);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001841 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001842}
1843
1844void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1845{
1846 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1847 phys_page_for_each(client);
1848}
1849
1850void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1851{
1852 QLIST_REMOVE(client, list);
1853}
1854#endif
1855
bellardf193c792004-03-21 17:06:25 +00001856static int cmp1(const char *s1, int n, const char *s2)
1857{
1858 if (strlen(s2) != n)
1859 return 0;
1860 return memcmp(s1, s2, n) == 0;
1861}
ths3b46e622007-09-17 08:09:54 +00001862
bellardf193c792004-03-21 17:06:25 +00001863/* takes a comma separated list of log masks. Return 0 if error. */
1864int cpu_str_to_log_mask(const char *str)
1865{
blueswir1c7cd6a32008-10-02 18:27:46 +00001866 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001867 int mask;
1868 const char *p, *p1;
1869
1870 p = str;
1871 mask = 0;
1872 for(;;) {
1873 p1 = strchr(p, ',');
1874 if (!p1)
1875 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001876 if(cmp1(p,p1-p,"all")) {
1877 for(item = cpu_log_items; item->mask != 0; item++) {
1878 mask |= item->mask;
1879 }
1880 } else {
1881 for(item = cpu_log_items; item->mask != 0; item++) {
1882 if (cmp1(p, p1 - p, item->name))
1883 goto found;
1884 }
1885 return 0;
bellardf193c792004-03-21 17:06:25 +00001886 }
bellardf193c792004-03-21 17:06:25 +00001887 found:
1888 mask |= item->mask;
1889 if (*p1 != ',')
1890 break;
1891 p = p1 + 1;
1892 }
1893 return mask;
1894}
bellardea041c02003-06-25 16:16:50 +00001895
bellard75012672003-06-21 13:11:07 +00001896void cpu_abort(CPUState *env, const char *fmt, ...)
1897{
1898 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001899 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001900
1901 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001902 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001903 fprintf(stderr, "qemu: fatal: ");
1904 vfprintf(stderr, fmt, ap);
1905 fprintf(stderr, "\n");
1906#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001907 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1908#else
1909 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001910#endif
aliguori93fcfe32009-01-15 22:34:14 +00001911 if (qemu_log_enabled()) {
1912 qemu_log("qemu: fatal: ");
1913 qemu_log_vprintf(fmt, ap2);
1914 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001915#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001916 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001917#else
aliguori93fcfe32009-01-15 22:34:14 +00001918 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001919#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001920 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001921 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001922 }
pbrook493ae1f2007-11-23 16:53:59 +00001923 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001924 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001925#if defined(CONFIG_USER_ONLY)
1926 {
1927 struct sigaction act;
1928 sigfillset(&act.sa_mask);
1929 act.sa_handler = SIG_DFL;
1930 sigaction(SIGABRT, &act, NULL);
1931 }
1932#endif
bellard75012672003-06-21 13:11:07 +00001933 abort();
1934}
1935
thsc5be9f02007-02-28 20:20:53 +00001936CPUState *cpu_copy(CPUState *env)
1937{
ths01ba9812007-12-09 02:22:57 +00001938 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001939 CPUState *next_cpu = new_env->next_cpu;
1940 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001941#if defined(TARGET_HAS_ICE)
1942 CPUBreakpoint *bp;
1943 CPUWatchpoint *wp;
1944#endif
1945
thsc5be9f02007-02-28 20:20:53 +00001946 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001947
1948 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001949 new_env->next_cpu = next_cpu;
1950 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001951
1952 /* Clone all break/watchpoints.
1953 Note: Once we support ptrace with hw-debug register access, make sure
1954 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001955 QTAILQ_INIT(&env->breakpoints);
1956 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001957#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001958 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001959 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1960 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001961 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001962 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1963 wp->flags, NULL);
1964 }
1965#endif
1966
thsc5be9f02007-02-28 20:20:53 +00001967 return new_env;
1968}
1969
bellard01243112004-01-04 15:48:17 +00001970#if !defined(CONFIG_USER_ONLY)
1971
edgar_igl5c751e92008-05-06 08:44:21 +00001972static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1973{
1974 unsigned int i;
1975
1976 /* Discard jump cache entries for any tb which might potentially
1977 overlap the flushed page. */
1978 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1979 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001980 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001981
1982 i = tb_jmp_cache_hash_page(addr);
1983 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001984 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001985}
1986
Igor Kovalenko08738982009-07-12 02:15:40 +04001987static CPUTLBEntry s_cputlb_empty_entry = {
1988 .addr_read = -1,
1989 .addr_write = -1,
1990 .addr_code = -1,
1991 .addend = -1,
1992};
1993
bellardee8b7022004-02-03 23:35:10 +00001994/* NOTE: if flush_global is true, also flush global entries (not
1995 implemented yet) */
1996void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001997{
bellard33417e72003-08-10 21:47:01 +00001998 int i;
bellard01243112004-01-04 15:48:17 +00001999
bellard9fa3e852004-01-04 18:06:42 +00002000#if defined(DEBUG_TLB)
2001 printf("tlb_flush:\n");
2002#endif
bellard01243112004-01-04 15:48:17 +00002003 /* must reset current TB so that interrupts cannot modify the
2004 links while we are modifying them */
2005 env->current_tb = NULL;
2006
bellard33417e72003-08-10 21:47:01 +00002007 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002008 int mmu_idx;
2009 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04002010 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002011 }
bellard33417e72003-08-10 21:47:01 +00002012 }
bellard9fa3e852004-01-04 18:06:42 +00002013
bellard8a40a182005-11-20 10:35:40 +00002014 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00002015
Paul Brookd4c430a2010-03-17 02:14:28 +00002016 env->tlb_flush_addr = -1;
2017 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00002018 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00002019}
2020
bellard274da6b2004-05-20 21:56:27 +00002021static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00002022{
ths5fafdf22007-09-16 21:08:06 +00002023 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00002024 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002025 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00002026 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002027 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00002028 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04002029 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00002030 }
bellard61382a52003-10-27 21:22:23 +00002031}
2032
bellard2e126692004-04-25 21:28:44 +00002033void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00002034{
bellard8a40a182005-11-20 10:35:40 +00002035 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002036 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00002037
bellard9fa3e852004-01-04 18:06:42 +00002038#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00002039 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00002040#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00002041 /* Check if we need to flush due to large pages. */
2042 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2043#if defined(DEBUG_TLB)
2044 printf("tlb_flush_page: forced full flush ("
2045 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2046 env->tlb_flush_addr, env->tlb_flush_mask);
2047#endif
2048 tlb_flush(env, 1);
2049 return;
2050 }
bellard01243112004-01-04 15:48:17 +00002051 /* must reset current TB so that interrupts cannot modify the
2052 links while we are modifying them */
2053 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002054
bellard61382a52003-10-27 21:22:23 +00002055 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002056 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002057 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2058 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002059
edgar_igl5c751e92008-05-06 08:44:21 +00002060 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002061}
2062
bellard9fa3e852004-01-04 18:06:42 +00002063/* update the TLBs so that writes to code in the virtual page 'addr'
2064 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002065static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002066{
ths5fafdf22007-09-16 21:08:06 +00002067 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002068 ram_addr + TARGET_PAGE_SIZE,
2069 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002070}
2071
bellard9fa3e852004-01-04 18:06:42 +00002072/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002073 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002074static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002075 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002076{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002077 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002078}
2079
ths5fafdf22007-09-16 21:08:06 +00002080static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002081 unsigned long start, unsigned long length)
2082{
2083 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00002084 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2085 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002086 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002087 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002088 }
2089 }
2090}
2091
pbrook5579c7f2009-04-11 14:47:08 +00002092/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002093void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002094 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002095{
2096 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002097 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002098 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002099
2100 start &= TARGET_PAGE_MASK;
2101 end = TARGET_PAGE_ALIGN(end);
2102
2103 length = end - start;
2104 if (length == 0)
2105 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002106 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002107
bellard1ccde1c2004-02-06 19:46:14 +00002108 /* we modify the TLB cache so that the dirty bit will be set again
2109 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002110 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002111 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002112 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002113 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002114 != (end - 1) - start) {
2115 abort();
2116 }
2117
bellard6a00d602005-11-21 23:25:50 +00002118 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002119 int mmu_idx;
2120 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2121 for(i = 0; i < CPU_TLB_SIZE; i++)
2122 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2123 start1, length);
2124 }
bellard6a00d602005-11-21 23:25:50 +00002125 }
bellard1ccde1c2004-02-06 19:46:14 +00002126}
2127
aliguori74576192008-10-06 14:02:03 +00002128int cpu_physical_memory_set_dirty_tracking(int enable)
2129{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002130 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002131 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002132 ret = cpu_notify_migration_log(!!enable);
2133 return ret;
aliguori74576192008-10-06 14:02:03 +00002134}
2135
2136int cpu_physical_memory_get_dirty_tracking(void)
2137{
2138 return in_migration;
2139}
2140
Anthony Liguoric227f092009-10-01 16:12:16 -05002141int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2142 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00002143{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02002144 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02002145
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002146 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02002147 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00002148}
2149
Anthony PERARDe5896b12011-02-07 12:19:23 +01002150int cpu_physical_log_start(target_phys_addr_t start_addr,
2151 ram_addr_t size)
2152{
2153 CPUPhysMemoryClient *client;
2154 QLIST_FOREACH(client, &memory_client_list, list) {
2155 if (client->log_start) {
2156 int r = client->log_start(client, start_addr, size);
2157 if (r < 0) {
2158 return r;
2159 }
2160 }
2161 }
2162 return 0;
2163}
2164
2165int cpu_physical_log_stop(target_phys_addr_t start_addr,
2166 ram_addr_t size)
2167{
2168 CPUPhysMemoryClient *client;
2169 QLIST_FOREACH(client, &memory_client_list, list) {
2170 if (client->log_stop) {
2171 int r = client->log_stop(client, start_addr, size);
2172 if (r < 0) {
2173 return r;
2174 }
2175 }
2176 }
2177 return 0;
2178}
2179
bellard3a7d9292005-08-21 09:26:42 +00002180static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2181{
Anthony Liguoric227f092009-10-01 16:12:16 -05002182 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002183 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002184
bellard84b7b8e2005-11-28 21:19:04 +00002185 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002186 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2187 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002188 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002189 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002190 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002191 }
2192 }
2193}
2194
2195/* update the TLB according to the current state of the dirty bits */
2196void cpu_tlb_update_dirty(CPUState *env)
2197{
2198 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002199 int mmu_idx;
2200 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2201 for(i = 0; i < CPU_TLB_SIZE; i++)
2202 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2203 }
bellard3a7d9292005-08-21 09:26:42 +00002204}
2205
pbrook0f459d12008-06-09 00:20:13 +00002206static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002207{
pbrook0f459d12008-06-09 00:20:13 +00002208 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2209 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002210}
2211
pbrook0f459d12008-06-09 00:20:13 +00002212/* update the TLB corresponding to virtual page vaddr
2213 so that it is no longer dirty */
2214static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002215{
bellard1ccde1c2004-02-06 19:46:14 +00002216 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002217 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002218
pbrook0f459d12008-06-09 00:20:13 +00002219 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002220 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002221 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2222 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002223}
2224
Paul Brookd4c430a2010-03-17 02:14:28 +00002225/* Our TLB does not support large pages, so remember the area covered by
2226 large pages and trigger a full TLB flush if these are invalidated. */
2227static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2228 target_ulong size)
2229{
2230 target_ulong mask = ~(size - 1);
2231
2232 if (env->tlb_flush_addr == (target_ulong)-1) {
2233 env->tlb_flush_addr = vaddr & mask;
2234 env->tlb_flush_mask = mask;
2235 return;
2236 }
2237 /* Extend the existing region to include the new page.
2238 This is a compromise between unnecessary flushes and the cost
2239 of maintaining a full variable size TLB. */
2240 mask &= env->tlb_flush_mask;
2241 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2242 mask <<= 1;
2243 }
2244 env->tlb_flush_addr &= mask;
2245 env->tlb_flush_mask = mask;
2246}
2247
2248/* Add a new TLB entry. At most one entry for a given virtual address
2249 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2250 supplied size is only used by tlb_flush_page. */
2251void tlb_set_page(CPUState *env, target_ulong vaddr,
2252 target_phys_addr_t paddr, int prot,
2253 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002254{
bellard92e873b2004-05-21 14:52:29 +00002255 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002256 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002257 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002258 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002259 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002260 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002261 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002262 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002263 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002264
Paul Brookd4c430a2010-03-17 02:14:28 +00002265 assert(size >= TARGET_PAGE_SIZE);
2266 if (size != TARGET_PAGE_SIZE) {
2267 tlb_add_large_page(env, vaddr, size);
2268 }
bellard92e873b2004-05-21 14:52:29 +00002269 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002270 if (!p) {
2271 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002272 } else {
2273 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002274 }
2275#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002276 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2277 " prot=%x idx=%d pd=0x%08lx\n",
2278 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002279#endif
2280
pbrook0f459d12008-06-09 00:20:13 +00002281 address = vaddr;
2282 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2283 /* IO memory case (romd handled later) */
2284 address |= TLB_MMIO;
2285 }
pbrook5579c7f2009-04-11 14:47:08 +00002286 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002287 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2288 /* Normal RAM. */
2289 iotlb = pd & TARGET_PAGE_MASK;
2290 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2291 iotlb |= IO_MEM_NOTDIRTY;
2292 else
2293 iotlb |= IO_MEM_ROM;
2294 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002295 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002296 It would be nice to pass an offset from the base address
2297 of that region. This would avoid having to special case RAM,
2298 and avoid full address decoding in every device.
2299 We can't use the high bits of pd for this because
2300 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002301 iotlb = (pd & ~TARGET_PAGE_MASK);
2302 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002303 iotlb += p->region_offset;
2304 } else {
2305 iotlb += paddr;
2306 }
pbrook0f459d12008-06-09 00:20:13 +00002307 }
pbrook6658ffb2007-03-16 23:58:11 +00002308
pbrook0f459d12008-06-09 00:20:13 +00002309 code_address = address;
2310 /* Make accesses to pages with watchpoints go via the
2311 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002312 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002313 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002314 /* Avoid trapping reads of pages with a write breakpoint. */
2315 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2316 iotlb = io_mem_watch + paddr;
2317 address |= TLB_MMIO;
2318 break;
2319 }
pbrook6658ffb2007-03-16 23:58:11 +00002320 }
pbrook0f459d12008-06-09 00:20:13 +00002321 }
balrogd79acba2007-06-26 20:01:13 +00002322
pbrook0f459d12008-06-09 00:20:13 +00002323 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2324 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2325 te = &env->tlb_table[mmu_idx][index];
2326 te->addend = addend - vaddr;
2327 if (prot & PAGE_READ) {
2328 te->addr_read = address;
2329 } else {
2330 te->addr_read = -1;
2331 }
edgar_igl5c751e92008-05-06 08:44:21 +00002332
pbrook0f459d12008-06-09 00:20:13 +00002333 if (prot & PAGE_EXEC) {
2334 te->addr_code = code_address;
2335 } else {
2336 te->addr_code = -1;
2337 }
2338 if (prot & PAGE_WRITE) {
2339 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2340 (pd & IO_MEM_ROMD)) {
2341 /* Write access calls the I/O callback. */
2342 te->addr_write = address | TLB_MMIO;
2343 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2344 !cpu_physical_memory_is_dirty(pd)) {
2345 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002346 } else {
pbrook0f459d12008-06-09 00:20:13 +00002347 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002348 }
pbrook0f459d12008-06-09 00:20:13 +00002349 } else {
2350 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002351 }
bellard9fa3e852004-01-04 18:06:42 +00002352}
2353
bellard01243112004-01-04 15:48:17 +00002354#else
2355
bellardee8b7022004-02-03 23:35:10 +00002356void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002357{
2358}
2359
bellard2e126692004-04-25 21:28:44 +00002360void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002361{
2362}
2363
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002364/*
2365 * Walks guest process memory "regions" one by one
2366 * and calls callback function 'fn' for each region.
2367 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002368
2369struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002370{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002371 walk_memory_regions_fn fn;
2372 void *priv;
2373 unsigned long start;
2374 int prot;
2375};
bellard9fa3e852004-01-04 18:06:42 +00002376
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002377static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002378 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002379{
2380 if (data->start != -1ul) {
2381 int rc = data->fn(data->priv, data->start, end, data->prot);
2382 if (rc != 0) {
2383 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002384 }
bellard33417e72003-08-10 21:47:01 +00002385 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002386
2387 data->start = (new_prot ? end : -1ul);
2388 data->prot = new_prot;
2389
2390 return 0;
2391}
2392
2393static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002394 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002395{
Paul Brookb480d9b2010-03-12 23:23:29 +00002396 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002397 int i, rc;
2398
2399 if (*lp == NULL) {
2400 return walk_memory_regions_end(data, base, 0);
2401 }
2402
2403 if (level == 0) {
2404 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002405 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002406 int prot = pd[i].flags;
2407
2408 pa = base | (i << TARGET_PAGE_BITS);
2409 if (prot != data->prot) {
2410 rc = walk_memory_regions_end(data, pa, prot);
2411 if (rc != 0) {
2412 return rc;
2413 }
2414 }
2415 }
2416 } else {
2417 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002418 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002419 pa = base | ((abi_ulong)i <<
2420 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002421 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2422 if (rc != 0) {
2423 return rc;
2424 }
2425 }
2426 }
2427
2428 return 0;
2429}
2430
2431int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2432{
2433 struct walk_memory_regions_data data;
2434 unsigned long i;
2435
2436 data.fn = fn;
2437 data.priv = priv;
2438 data.start = -1ul;
2439 data.prot = 0;
2440
2441 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002442 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002443 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2444 if (rc != 0) {
2445 return rc;
2446 }
2447 }
2448
2449 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002450}
2451
Paul Brookb480d9b2010-03-12 23:23:29 +00002452static int dump_region(void *priv, abi_ulong start,
2453 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002454{
2455 FILE *f = (FILE *)priv;
2456
Paul Brookb480d9b2010-03-12 23:23:29 +00002457 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2458 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002459 start, end, end - start,
2460 ((prot & PAGE_READ) ? 'r' : '-'),
2461 ((prot & PAGE_WRITE) ? 'w' : '-'),
2462 ((prot & PAGE_EXEC) ? 'x' : '-'));
2463
2464 return (0);
2465}
2466
2467/* dump memory mappings */
2468void page_dump(FILE *f)
2469{
2470 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2471 "start", "end", "size", "prot");
2472 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002473}
2474
pbrook53a59602006-03-25 19:31:22 +00002475int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002476{
bellard9fa3e852004-01-04 18:06:42 +00002477 PageDesc *p;
2478
2479 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002480 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002481 return 0;
2482 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002483}
2484
Richard Henderson376a7902010-03-10 15:57:04 -08002485/* Modify the flags of a page and invalidate the code if necessary.
2486 The flag PAGE_WRITE_ORG is positioned automatically depending
2487 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002488void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002489{
Richard Henderson376a7902010-03-10 15:57:04 -08002490 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002491
Richard Henderson376a7902010-03-10 15:57:04 -08002492 /* This function should never be called with addresses outside the
2493 guest address space. If this assert fires, it probably indicates
2494 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002495#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2496 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002497#endif
2498 assert(start < end);
2499
bellard9fa3e852004-01-04 18:06:42 +00002500 start = start & TARGET_PAGE_MASK;
2501 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002502
2503 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002504 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002505 }
2506
2507 for (addr = start, len = end - start;
2508 len != 0;
2509 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2510 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2511
2512 /* If the write protection bit is set, then we invalidate
2513 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002514 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002515 (flags & PAGE_WRITE) &&
2516 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002517 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002518 }
2519 p->flags = flags;
2520 }
bellard9fa3e852004-01-04 18:06:42 +00002521}
2522
ths3d97b402007-11-02 19:02:07 +00002523int page_check_range(target_ulong start, target_ulong len, int flags)
2524{
2525 PageDesc *p;
2526 target_ulong end;
2527 target_ulong addr;
2528
Richard Henderson376a7902010-03-10 15:57:04 -08002529 /* This function should never be called with addresses outside the
2530 guest address space. If this assert fires, it probably indicates
2531 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002532#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2533 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002534#endif
2535
Richard Henderson3e0650a2010-03-29 10:54:42 -07002536 if (len == 0) {
2537 return 0;
2538 }
Richard Henderson376a7902010-03-10 15:57:04 -08002539 if (start + len - 1 < start) {
2540 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002541 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002542 }
balrog55f280c2008-10-28 10:24:11 +00002543
ths3d97b402007-11-02 19:02:07 +00002544 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2545 start = start & TARGET_PAGE_MASK;
2546
Richard Henderson376a7902010-03-10 15:57:04 -08002547 for (addr = start, len = end - start;
2548 len != 0;
2549 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002550 p = page_find(addr >> TARGET_PAGE_BITS);
2551 if( !p )
2552 return -1;
2553 if( !(p->flags & PAGE_VALID) )
2554 return -1;
2555
bellarddae32702007-11-14 10:51:00 +00002556 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002557 return -1;
bellarddae32702007-11-14 10:51:00 +00002558 if (flags & PAGE_WRITE) {
2559 if (!(p->flags & PAGE_WRITE_ORG))
2560 return -1;
2561 /* unprotect the page if it was put read-only because it
2562 contains translated code */
2563 if (!(p->flags & PAGE_WRITE)) {
2564 if (!page_unprotect(addr, 0, NULL))
2565 return -1;
2566 }
2567 return 0;
2568 }
ths3d97b402007-11-02 19:02:07 +00002569 }
2570 return 0;
2571}
2572
bellard9fa3e852004-01-04 18:06:42 +00002573/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002574 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002575int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002576{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002577 unsigned int prot;
2578 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002579 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002580
pbrookc8a706f2008-06-02 16:16:42 +00002581 /* Technically this isn't safe inside a signal handler. However we
2582 know this only ever happens in a synchronous SEGV handler, so in
2583 practice it seems to be ok. */
2584 mmap_lock();
2585
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002586 p = page_find(address >> TARGET_PAGE_BITS);
2587 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002588 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002589 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002590 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002591
bellard9fa3e852004-01-04 18:06:42 +00002592 /* if the page was really writable, then we change its
2593 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002594 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2595 host_start = address & qemu_host_page_mask;
2596 host_end = host_start + qemu_host_page_size;
2597
2598 prot = 0;
2599 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2600 p = page_find(addr >> TARGET_PAGE_BITS);
2601 p->flags |= PAGE_WRITE;
2602 prot |= p->flags;
2603
bellard9fa3e852004-01-04 18:06:42 +00002604 /* and since the content will be modified, we must invalidate
2605 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002606 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002607#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002608 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002609#endif
bellard9fa3e852004-01-04 18:06:42 +00002610 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002611 mprotect((void *)g2h(host_start), qemu_host_page_size,
2612 prot & PAGE_BITS);
2613
2614 mmap_unlock();
2615 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002616 }
pbrookc8a706f2008-06-02 16:16:42 +00002617 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002618 return 0;
2619}
2620
bellard6a00d602005-11-21 23:25:50 +00002621static inline void tlb_set_dirty(CPUState *env,
2622 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002623{
2624}
bellard9fa3e852004-01-04 18:06:42 +00002625#endif /* defined(CONFIG_USER_ONLY) */
2626
pbrooke2eef172008-06-08 01:09:01 +00002627#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002628
Paul Brookc04b2b72010-03-01 03:31:14 +00002629#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2630typedef struct subpage_t {
2631 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002632 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2633 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002634} subpage_t;
2635
Anthony Liguoric227f092009-10-01 16:12:16 -05002636static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2637 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002638static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2639 ram_addr_t orig_memory,
2640 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002641#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2642 need_subpage) \
2643 do { \
2644 if (addr > start_addr) \
2645 start_addr2 = 0; \
2646 else { \
2647 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2648 if (start_addr2 > 0) \
2649 need_subpage = 1; \
2650 } \
2651 \
blueswir149e9fba2007-05-30 17:25:06 +00002652 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002653 end_addr2 = TARGET_PAGE_SIZE - 1; \
2654 else { \
2655 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2656 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2657 need_subpage = 1; \
2658 } \
2659 } while (0)
2660
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002661/* register physical memory.
2662 For RAM, 'size' must be a multiple of the target page size.
2663 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002664 io memory page. The address used when calling the IO function is
2665 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002666 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002667 before calculating this offset. This should not be a problem unless
2668 the low bits of start_addr and region_offset differ. */
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002669void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002670 ram_addr_t size,
2671 ram_addr_t phys_offset,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002672 ram_addr_t region_offset,
2673 bool log_dirty)
bellard33417e72003-08-10 21:47:01 +00002674{
Anthony Liguoric227f092009-10-01 16:12:16 -05002675 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002676 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002677 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002678 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002679 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002680
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002681 assert(size);
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002682 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002683
pbrook67c4d232009-02-23 13:16:07 +00002684 if (phys_offset == IO_MEM_UNASSIGNED) {
2685 region_offset = start_addr;
2686 }
pbrook8da3ff12008-12-01 18:59:50 +00002687 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002688 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002689 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002690
2691 addr = start_addr;
2692 do {
blueswir1db7b5422007-05-26 17:36:03 +00002693 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2694 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002695 ram_addr_t orig_memory = p->phys_offset;
2696 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002697 int need_subpage = 0;
2698
2699 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2700 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002701 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002702 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2703 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002704 &p->phys_offset, orig_memory,
2705 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002706 } else {
2707 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2708 >> IO_MEM_SHIFT];
2709 }
pbrook8da3ff12008-12-01 18:59:50 +00002710 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2711 region_offset);
2712 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002713 } else {
2714 p->phys_offset = phys_offset;
2715 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2716 (phys_offset & IO_MEM_ROMD))
2717 phys_offset += TARGET_PAGE_SIZE;
2718 }
2719 } else {
2720 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2721 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002722 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002723 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002724 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002725 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002726 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002727 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002728 int need_subpage = 0;
2729
2730 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2731 end_addr2, need_subpage);
2732
Richard Hendersonf6405242010-04-22 16:47:31 -07002733 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002734 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002735 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002736 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002737 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002738 phys_offset, region_offset);
2739 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002740 }
2741 }
2742 }
pbrook8da3ff12008-12-01 18:59:50 +00002743 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002744 addr += TARGET_PAGE_SIZE;
2745 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002746
bellard9d420372006-06-25 22:25:22 +00002747 /* since each CPU stores ram addresses in its TLB cache, we must
2748 reset the modified entries */
2749 /* XXX: slow ! */
2750 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2751 tlb_flush(env, 1);
2752 }
bellard33417e72003-08-10 21:47:01 +00002753}
2754
bellardba863452006-09-24 18:41:10 +00002755/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002756ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002757{
2758 PhysPageDesc *p;
2759
2760 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2761 if (!p)
2762 return IO_MEM_UNASSIGNED;
2763 return p->phys_offset;
2764}
2765
Anthony Liguoric227f092009-10-01 16:12:16 -05002766void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002767{
2768 if (kvm_enabled())
2769 kvm_coalesce_mmio_region(addr, size);
2770}
2771
Anthony Liguoric227f092009-10-01 16:12:16 -05002772void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002773{
2774 if (kvm_enabled())
2775 kvm_uncoalesce_mmio_region(addr, size);
2776}
2777
Sheng Yang62a27442010-01-26 19:21:16 +08002778void qemu_flush_coalesced_mmio_buffer(void)
2779{
2780 if (kvm_enabled())
2781 kvm_flush_coalesced_mmio_buffer();
2782}
2783
Marcelo Tosattic9027602010-03-01 20:25:08 -03002784#if defined(__linux__) && !defined(TARGET_S390X)
2785
2786#include <sys/vfs.h>
2787
2788#define HUGETLBFS_MAGIC 0x958458f6
2789
2790static long gethugepagesize(const char *path)
2791{
2792 struct statfs fs;
2793 int ret;
2794
2795 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002796 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002797 } while (ret != 0 && errno == EINTR);
2798
2799 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002800 perror(path);
2801 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002802 }
2803
2804 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002805 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002806
2807 return fs.f_bsize;
2808}
2809
Alex Williamson04b16652010-07-02 11:13:17 -06002810static void *file_ram_alloc(RAMBlock *block,
2811 ram_addr_t memory,
2812 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002813{
2814 char *filename;
2815 void *area;
2816 int fd;
2817#ifdef MAP_POPULATE
2818 int flags;
2819#endif
2820 unsigned long hpagesize;
2821
2822 hpagesize = gethugepagesize(path);
2823 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002824 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002825 }
2826
2827 if (memory < hpagesize) {
2828 return NULL;
2829 }
2830
2831 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2832 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2833 return NULL;
2834 }
2835
2836 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002837 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002838 }
2839
2840 fd = mkstemp(filename);
2841 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002842 perror("unable to create backing store for hugepages");
2843 free(filename);
2844 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002845 }
2846 unlink(filename);
2847 free(filename);
2848
2849 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2850
2851 /*
2852 * ftruncate is not supported by hugetlbfs in older
2853 * hosts, so don't bother bailing out on errors.
2854 * If anything goes wrong with it under other filesystems,
2855 * mmap will fail.
2856 */
2857 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002858 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002859
2860#ifdef MAP_POPULATE
2861 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2862 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2863 * to sidestep this quirk.
2864 */
2865 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2866 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2867#else
2868 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2869#endif
2870 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002871 perror("file_ram_alloc: can't mmap RAM pages");
2872 close(fd);
2873 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002874 }
Alex Williamson04b16652010-07-02 11:13:17 -06002875 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002876 return area;
2877}
2878#endif
2879
Alex Williamsond17b5282010-06-25 11:08:38 -06002880static ram_addr_t find_ram_offset(ram_addr_t size)
2881{
Alex Williamson04b16652010-07-02 11:13:17 -06002882 RAMBlock *block, *next_block;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002883 ram_addr_t offset = 0, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002884
2885 if (QLIST_EMPTY(&ram_list.blocks))
2886 return 0;
2887
2888 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002889 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002890
2891 end = block->offset + block->length;
2892
2893 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2894 if (next_block->offset >= end) {
2895 next = MIN(next, next_block->offset);
2896 }
2897 }
2898 if (next - end >= size && next - end < mingap) {
2899 offset = end;
2900 mingap = next - end;
2901 }
2902 }
2903 return offset;
2904}
2905
2906static ram_addr_t last_ram_offset(void)
2907{
Alex Williamsond17b5282010-06-25 11:08:38 -06002908 RAMBlock *block;
2909 ram_addr_t last = 0;
2910
2911 QLIST_FOREACH(block, &ram_list.blocks, next)
2912 last = MAX(last, block->offset + block->length);
2913
2914 return last;
2915}
2916
Cam Macdonell84b89d72010-07-26 18:10:57 -06002917ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002918 ram_addr_t size, void *host)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002919{
2920 RAMBlock *new_block, *block;
2921
2922 size = TARGET_PAGE_ALIGN(size);
2923 new_block = qemu_mallocz(sizeof(*new_block));
2924
2925 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2926 char *id = dev->parent_bus->info->get_dev_path(dev);
2927 if (id) {
2928 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2929 qemu_free(id);
2930 }
2931 }
2932 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2933
2934 QLIST_FOREACH(block, &ram_list.blocks, next) {
2935 if (!strcmp(block->idstr, new_block->idstr)) {
2936 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2937 new_block->idstr);
2938 abort();
2939 }
2940 }
2941
Jun Nakajima432d2682010-08-31 16:41:25 +01002942 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002943 if (host) {
2944 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002945 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002946 } else {
2947 if (mem_path) {
2948#if defined (__linux__) && !defined(TARGET_S390X)
2949 new_block->host = file_ram_alloc(new_block, size, mem_path);
2950 if (!new_block->host) {
2951 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002952 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002953 }
2954#else
2955 fprintf(stderr, "-mem-path option unsupported\n");
2956 exit(1);
2957#endif
2958 } else {
2959#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002960 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2961 an system defined value, which is at least 256GB. Larger systems
2962 have larger values. We put the guest between the end of data
2963 segment (system break) and this value. We use 32GB as a base to
2964 have enough room for the system break to grow. */
2965 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002966 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002967 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002968 if (new_block->host == MAP_FAILED) {
2969 fprintf(stderr, "Allocating RAM failed\n");
2970 abort();
2971 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002972#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002973 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002974 xen_ram_alloc(new_block->offset, size);
2975 } else {
2976 new_block->host = qemu_vmalloc(size);
2977 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002978#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002979 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002980 }
2981 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002982 new_block->length = size;
2983
2984 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2985
2986 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2987 last_ram_offset() >> TARGET_PAGE_BITS);
2988 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2989 0xff, size >> TARGET_PAGE_BITS);
2990
2991 if (kvm_enabled())
2992 kvm_setup_guest_memory(new_block->host, size);
2993
2994 return new_block->offset;
2995}
2996
Alex Williamson1724f042010-06-25 11:09:35 -06002997ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002998{
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002999 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
pbrook94a6b542009-04-11 17:15:54 +00003000}
bellarde9a1ab12007-02-08 23:08:38 +00003001
Alex Williamson1f2e98b2011-05-03 12:48:09 -06003002void qemu_ram_free_from_ptr(ram_addr_t addr)
3003{
3004 RAMBlock *block;
3005
3006 QLIST_FOREACH(block, &ram_list.blocks, next) {
3007 if (addr == block->offset) {
3008 QLIST_REMOVE(block, next);
3009 qemu_free(block);
3010 return;
3011 }
3012 }
3013}
3014
Anthony Liguoric227f092009-10-01 16:12:16 -05003015void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00003016{
Alex Williamson04b16652010-07-02 11:13:17 -06003017 RAMBlock *block;
3018
3019 QLIST_FOREACH(block, &ram_list.blocks, next) {
3020 if (addr == block->offset) {
3021 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01003022 if (block->flags & RAM_PREALLOC_MASK) {
3023 ;
3024 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06003025#if defined (__linux__) && !defined(TARGET_S390X)
3026 if (block->fd) {
3027 munmap(block->host, block->length);
3028 close(block->fd);
3029 } else {
3030 qemu_vfree(block->host);
3031 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003032#else
3033 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06003034#endif
3035 } else {
3036#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3037 munmap(block->host, block->length);
3038#else
Jan Kiszka868bb332011-06-21 22:59:09 +02003039 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003040 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01003041 } else {
3042 qemu_vfree(block->host);
3043 }
Alex Williamson04b16652010-07-02 11:13:17 -06003044#endif
3045 }
3046 qemu_free(block);
3047 return;
3048 }
3049 }
3050
bellarde9a1ab12007-02-08 23:08:38 +00003051}
3052
Huang Yingcd19cfa2011-03-02 08:56:19 +01003053#ifndef _WIN32
3054void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3055{
3056 RAMBlock *block;
3057 ram_addr_t offset;
3058 int flags;
3059 void *area, *vaddr;
3060
3061 QLIST_FOREACH(block, &ram_list.blocks, next) {
3062 offset = addr - block->offset;
3063 if (offset < block->length) {
3064 vaddr = block->host + offset;
3065 if (block->flags & RAM_PREALLOC_MASK) {
3066 ;
3067 } else {
3068 flags = MAP_FIXED;
3069 munmap(vaddr, length);
3070 if (mem_path) {
3071#if defined(__linux__) && !defined(TARGET_S390X)
3072 if (block->fd) {
3073#ifdef MAP_POPULATE
3074 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3075 MAP_PRIVATE;
3076#else
3077 flags |= MAP_PRIVATE;
3078#endif
3079 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3080 flags, block->fd, offset);
3081 } else {
3082 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3083 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3084 flags, -1, 0);
3085 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003086#else
3087 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003088#endif
3089 } else {
3090#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3091 flags |= MAP_SHARED | MAP_ANONYMOUS;
3092 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3093 flags, -1, 0);
3094#else
3095 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3096 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3097 flags, -1, 0);
3098#endif
3099 }
3100 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003101 fprintf(stderr, "Could not remap addr: "
3102 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003103 length, addr);
3104 exit(1);
3105 }
3106 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3107 }
3108 return;
3109 }
3110 }
3111}
3112#endif /* !_WIN32 */
3113
pbrookdc828ca2009-04-09 22:21:07 +00003114/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003115 With the exception of the softmmu code in this file, this should
3116 only be used for local memory (e.g. video ram) that the device owns,
3117 and knows it isn't going to access beyond the end of the block.
3118
3119 It should not be used for general purpose DMA.
3120 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3121 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003122void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003123{
pbrook94a6b542009-04-11 17:15:54 +00003124 RAMBlock *block;
3125
Alex Williamsonf471a172010-06-11 11:11:42 -06003126 QLIST_FOREACH(block, &ram_list.blocks, next) {
3127 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003128 /* Move this entry to to start of the list. */
3129 if (block != QLIST_FIRST(&ram_list.blocks)) {
3130 QLIST_REMOVE(block, next);
3131 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3132 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003133 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003134 /* We need to check if the requested address is in the RAM
3135 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003136 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003137 */
3138 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003139 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003140 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003141 block->host =
3142 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003143 }
3144 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003145 return block->host + (addr - block->offset);
3146 }
pbrook94a6b542009-04-11 17:15:54 +00003147 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003148
3149 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3150 abort();
3151
3152 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003153}
3154
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003155/* Return a host pointer to ram allocated with qemu_ram_alloc.
3156 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3157 */
3158void *qemu_safe_ram_ptr(ram_addr_t addr)
3159{
3160 RAMBlock *block;
3161
3162 QLIST_FOREACH(block, &ram_list.blocks, next) {
3163 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003164 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003165 /* We need to check if the requested address is in the RAM
3166 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003167 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003168 */
3169 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003170 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003171 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003172 block->host =
3173 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003174 }
3175 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003176 return block->host + (addr - block->offset);
3177 }
3178 }
3179
3180 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3181 abort();
3182
3183 return NULL;
3184}
3185
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003186/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3187 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003188void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003189{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003190 if (*size == 0) {
3191 return NULL;
3192 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003193 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003194 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003195 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003196 RAMBlock *block;
3197
3198 QLIST_FOREACH(block, &ram_list.blocks, next) {
3199 if (addr - block->offset < block->length) {
3200 if (addr - block->offset + *size > block->length)
3201 *size = block->length - addr + block->offset;
3202 return block->host + (addr - block->offset);
3203 }
3204 }
3205
3206 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3207 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003208 }
3209}
3210
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003211void qemu_put_ram_ptr(void *addr)
3212{
3213 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003214}
3215
Marcelo Tosattie8902612010-10-11 15:31:19 -03003216int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003217{
pbrook94a6b542009-04-11 17:15:54 +00003218 RAMBlock *block;
3219 uint8_t *host = ptr;
3220
Jan Kiszka868bb332011-06-21 22:59:09 +02003221 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003222 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003223 return 0;
3224 }
3225
Alex Williamsonf471a172010-06-11 11:11:42 -06003226 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003227 /* This case append when the block is not mapped. */
3228 if (block->host == NULL) {
3229 continue;
3230 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003231 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003232 *ram_addr = block->offset + (host - block->host);
3233 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003234 }
pbrook94a6b542009-04-11 17:15:54 +00003235 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003236
Marcelo Tosattie8902612010-10-11 15:31:19 -03003237 return -1;
3238}
Alex Williamsonf471a172010-06-11 11:11:42 -06003239
Marcelo Tosattie8902612010-10-11 15:31:19 -03003240/* Some of the softmmu routines need to translate from a host pointer
3241 (typically a TLB entry) back to a ram offset. */
3242ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3243{
3244 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003245
Marcelo Tosattie8902612010-10-11 15:31:19 -03003246 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3247 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3248 abort();
3249 }
3250 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003251}
3252
Anthony Liguoric227f092009-10-01 16:12:16 -05003253static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00003254{
pbrook67d3b952006-12-18 05:03:52 +00003255#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003256 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003257#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003258#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003259 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003260#endif
3261 return 0;
3262}
3263
Anthony Liguoric227f092009-10-01 16:12:16 -05003264static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003265{
3266#ifdef DEBUG_UNASSIGNED
3267 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3268#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003269#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003270 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003271#endif
3272 return 0;
3273}
3274
Anthony Liguoric227f092009-10-01 16:12:16 -05003275static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003276{
3277#ifdef DEBUG_UNASSIGNED
3278 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3279#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003280#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003281 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003282#endif
bellard33417e72003-08-10 21:47:01 +00003283 return 0;
3284}
3285
Anthony Liguoric227f092009-10-01 16:12:16 -05003286static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00003287{
pbrook67d3b952006-12-18 05:03:52 +00003288#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003289 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00003290#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003291#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003292 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003293#endif
3294}
3295
Anthony Liguoric227f092009-10-01 16:12:16 -05003296static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003297{
3298#ifdef DEBUG_UNASSIGNED
3299 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3300#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003301#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003302 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003303#endif
3304}
3305
Anthony Liguoric227f092009-10-01 16:12:16 -05003306static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003307{
3308#ifdef DEBUG_UNASSIGNED
3309 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3310#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003311#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003312 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003313#endif
bellard33417e72003-08-10 21:47:01 +00003314}
3315
Blue Swirld60efc62009-08-25 18:29:31 +00003316static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00003317 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00003318 unassigned_mem_readw,
3319 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00003320};
3321
Blue Swirld60efc62009-08-25 18:29:31 +00003322static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00003323 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00003324 unassigned_mem_writew,
3325 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00003326};
3327
Anthony Liguoric227f092009-10-01 16:12:16 -05003328static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003329 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003330{
bellard3a7d9292005-08-21 09:26:42 +00003331 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003332 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003333 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3334#if !defined(CONFIG_USER_ONLY)
3335 tb_invalidate_phys_page_fast(ram_addr, 1);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003336 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003337#endif
3338 }
pbrook5579c7f2009-04-11 14:47:08 +00003339 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003340 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003341 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003342 /* we remove the notdirty callback only if the code has been
3343 flushed */
3344 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003345 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003346}
3347
Anthony Liguoric227f092009-10-01 16:12:16 -05003348static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003349 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003350{
bellard3a7d9292005-08-21 09:26:42 +00003351 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003352 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003353 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3354#if !defined(CONFIG_USER_ONLY)
3355 tb_invalidate_phys_page_fast(ram_addr, 2);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003356 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003357#endif
3358 }
pbrook5579c7f2009-04-11 14:47:08 +00003359 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003360 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003361 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003362 /* we remove the notdirty callback only if the code has been
3363 flushed */
3364 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003365 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003366}
3367
Anthony Liguoric227f092009-10-01 16:12:16 -05003368static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003369 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003370{
bellard3a7d9292005-08-21 09:26:42 +00003371 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003372 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003373 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3374#if !defined(CONFIG_USER_ONLY)
3375 tb_invalidate_phys_page_fast(ram_addr, 4);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003376 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003377#endif
3378 }
pbrook5579c7f2009-04-11 14:47:08 +00003379 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003380 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003381 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003382 /* we remove the notdirty callback only if the code has been
3383 flushed */
3384 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003385 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003386}
3387
Blue Swirld60efc62009-08-25 18:29:31 +00003388static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00003389 NULL, /* never used */
3390 NULL, /* never used */
3391 NULL, /* never used */
3392};
3393
Blue Swirld60efc62009-08-25 18:29:31 +00003394static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00003395 notdirty_mem_writeb,
3396 notdirty_mem_writew,
3397 notdirty_mem_writel,
3398};
3399
pbrook0f459d12008-06-09 00:20:13 +00003400/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003401static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003402{
3403 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003404 target_ulong pc, cs_base;
3405 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003406 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003407 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003408 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003409
aliguori06d55cc2008-11-18 20:24:06 +00003410 if (env->watchpoint_hit) {
3411 /* We re-entered the check after replacing the TB. Now raise
3412 * the debug interrupt so that is will trigger after the
3413 * current instruction. */
3414 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3415 return;
3416 }
pbrook2e70f6e2008-06-29 01:03:05 +00003417 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003418 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003419 if ((vaddr == (wp->vaddr & len_mask) ||
3420 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003421 wp->flags |= BP_WATCHPOINT_HIT;
3422 if (!env->watchpoint_hit) {
3423 env->watchpoint_hit = wp;
3424 tb = tb_find_pc(env->mem_io_pc);
3425 if (!tb) {
3426 cpu_abort(env, "check_watchpoint: could not find TB for "
3427 "pc=%p", (void *)env->mem_io_pc);
3428 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003429 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003430 tb_phys_invalidate(tb, -1);
3431 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3432 env->exception_index = EXCP_DEBUG;
3433 } else {
3434 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3435 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3436 }
3437 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003438 }
aliguori6e140f22008-11-18 20:37:55 +00003439 } else {
3440 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003441 }
3442 }
3443}
3444
pbrook6658ffb2007-03-16 23:58:11 +00003445/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3446 so these check for a hit then pass through to the normal out-of-line
3447 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003448static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003449{
aliguorib4051332008-11-18 20:14:20 +00003450 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003451 return ldub_phys(addr);
3452}
3453
Anthony Liguoric227f092009-10-01 16:12:16 -05003454static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003455{
aliguorib4051332008-11-18 20:14:20 +00003456 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003457 return lduw_phys(addr);
3458}
3459
Anthony Liguoric227f092009-10-01 16:12:16 -05003460static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003461{
aliguorib4051332008-11-18 20:14:20 +00003462 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003463 return ldl_phys(addr);
3464}
3465
Anthony Liguoric227f092009-10-01 16:12:16 -05003466static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003467 uint32_t val)
3468{
aliguorib4051332008-11-18 20:14:20 +00003469 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003470 stb_phys(addr, val);
3471}
3472
Anthony Liguoric227f092009-10-01 16:12:16 -05003473static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003474 uint32_t val)
3475{
aliguorib4051332008-11-18 20:14:20 +00003476 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003477 stw_phys(addr, val);
3478}
3479
Anthony Liguoric227f092009-10-01 16:12:16 -05003480static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003481 uint32_t val)
3482{
aliguorib4051332008-11-18 20:14:20 +00003483 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003484 stl_phys(addr, val);
3485}
3486
Blue Swirld60efc62009-08-25 18:29:31 +00003487static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003488 watch_mem_readb,
3489 watch_mem_readw,
3490 watch_mem_readl,
3491};
3492
Blue Swirld60efc62009-08-25 18:29:31 +00003493static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003494 watch_mem_writeb,
3495 watch_mem_writew,
3496 watch_mem_writel,
3497};
pbrook6658ffb2007-03-16 23:58:11 +00003498
Richard Hendersonf6405242010-04-22 16:47:31 -07003499static inline uint32_t subpage_readlen (subpage_t *mmio,
3500 target_phys_addr_t addr,
3501 unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003502{
Richard Hendersonf6405242010-04-22 16:47:31 -07003503 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003504#if defined(DEBUG_SUBPAGE)
3505 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3506 mmio, len, addr, idx);
3507#endif
blueswir1db7b5422007-05-26 17:36:03 +00003508
Richard Hendersonf6405242010-04-22 16:47:31 -07003509 addr += mmio->region_offset[idx];
3510 idx = mmio->sub_io_index[idx];
3511 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
blueswir1db7b5422007-05-26 17:36:03 +00003512}
3513
Anthony Liguoric227f092009-10-01 16:12:16 -05003514static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
Richard Hendersonf6405242010-04-22 16:47:31 -07003515 uint32_t value, unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003516{
Richard Hendersonf6405242010-04-22 16:47:31 -07003517 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003518#if defined(DEBUG_SUBPAGE)
Richard Hendersonf6405242010-04-22 16:47:31 -07003519 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3520 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003521#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003522
3523 addr += mmio->region_offset[idx];
3524 idx = mmio->sub_io_index[idx];
3525 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00003526}
3527
Anthony Liguoric227f092009-10-01 16:12:16 -05003528static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003529{
blueswir1db7b5422007-05-26 17:36:03 +00003530 return subpage_readlen(opaque, addr, 0);
3531}
3532
Anthony Liguoric227f092009-10-01 16:12:16 -05003533static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003534 uint32_t value)
3535{
blueswir1db7b5422007-05-26 17:36:03 +00003536 subpage_writelen(opaque, addr, value, 0);
3537}
3538
Anthony Liguoric227f092009-10-01 16:12:16 -05003539static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003540{
blueswir1db7b5422007-05-26 17:36:03 +00003541 return subpage_readlen(opaque, addr, 1);
3542}
3543
Anthony Liguoric227f092009-10-01 16:12:16 -05003544static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003545 uint32_t value)
3546{
blueswir1db7b5422007-05-26 17:36:03 +00003547 subpage_writelen(opaque, addr, value, 1);
3548}
3549
Anthony Liguoric227f092009-10-01 16:12:16 -05003550static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003551{
blueswir1db7b5422007-05-26 17:36:03 +00003552 return subpage_readlen(opaque, addr, 2);
3553}
3554
Richard Hendersonf6405242010-04-22 16:47:31 -07003555static void subpage_writel (void *opaque, target_phys_addr_t addr,
3556 uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003557{
blueswir1db7b5422007-05-26 17:36:03 +00003558 subpage_writelen(opaque, addr, value, 2);
3559}
3560
Blue Swirld60efc62009-08-25 18:29:31 +00003561static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003562 &subpage_readb,
3563 &subpage_readw,
3564 &subpage_readl,
3565};
3566
Blue Swirld60efc62009-08-25 18:29:31 +00003567static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003568 &subpage_writeb,
3569 &subpage_writew,
3570 &subpage_writel,
3571};
3572
Anthony Liguoric227f092009-10-01 16:12:16 -05003573static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3574 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003575{
3576 int idx, eidx;
3577
3578 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3579 return -1;
3580 idx = SUBPAGE_IDX(start);
3581 eidx = SUBPAGE_IDX(end);
3582#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003583 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003584 mmio, start, end, idx, eidx, memory);
3585#endif
Gleb Natapov95c318f2010-07-29 10:41:45 +03003586 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3587 memory = IO_MEM_UNASSIGNED;
Richard Hendersonf6405242010-04-22 16:47:31 -07003588 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003589 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003590 mmio->sub_io_index[idx] = memory;
3591 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003592 }
3593
3594 return 0;
3595}
3596
Richard Hendersonf6405242010-04-22 16:47:31 -07003597static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3598 ram_addr_t orig_memory,
3599 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003600{
Anthony Liguoric227f092009-10-01 16:12:16 -05003601 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003602 int subpage_memory;
3603
Anthony Liguoric227f092009-10-01 16:12:16 -05003604 mmio = qemu_mallocz(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003605
3606 mmio->base = base;
Alexander Graf2507c122010-12-08 12:05:37 +01003607 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3608 DEVICE_NATIVE_ENDIAN);
blueswir1db7b5422007-05-26 17:36:03 +00003609#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003610 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3611 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003612#endif
aliguori1eec6142009-02-05 22:06:18 +00003613 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003614 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003615
3616 return mmio;
3617}
3618
aliguori88715652009-02-11 15:20:58 +00003619static int get_free_io_mem_idx(void)
3620{
3621 int i;
3622
3623 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3624 if (!io_mem_used[i]) {
3625 io_mem_used[i] = 1;
3626 return i;
3627 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003628 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003629 return -1;
3630}
3631
Alexander Grafdd310532010-12-08 12:05:36 +01003632/*
3633 * Usually, devices operate in little endian mode. There are devices out
3634 * there that operate in big endian too. Each device gets byte swapped
3635 * mmio if plugged onto a CPU that does the other endianness.
3636 *
3637 * CPU Device swap?
3638 *
3639 * little little no
3640 * little big yes
3641 * big little yes
3642 * big big no
3643 */
3644
3645typedef struct SwapEndianContainer {
3646 CPUReadMemoryFunc *read[3];
3647 CPUWriteMemoryFunc *write[3];
3648 void *opaque;
3649} SwapEndianContainer;
3650
3651static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3652{
3653 uint32_t val;
3654 SwapEndianContainer *c = opaque;
3655 val = c->read[0](c->opaque, addr);
3656 return val;
3657}
3658
3659static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3660{
3661 uint32_t val;
3662 SwapEndianContainer *c = opaque;
3663 val = bswap16(c->read[1](c->opaque, addr));
3664 return val;
3665}
3666
3667static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3668{
3669 uint32_t val;
3670 SwapEndianContainer *c = opaque;
3671 val = bswap32(c->read[2](c->opaque, addr));
3672 return val;
3673}
3674
3675static CPUReadMemoryFunc * const swapendian_readfn[3]={
3676 swapendian_mem_readb,
3677 swapendian_mem_readw,
3678 swapendian_mem_readl
3679};
3680
3681static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3682 uint32_t val)
3683{
3684 SwapEndianContainer *c = opaque;
3685 c->write[0](c->opaque, addr, val);
3686}
3687
3688static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3689 uint32_t val)
3690{
3691 SwapEndianContainer *c = opaque;
3692 c->write[1](c->opaque, addr, bswap16(val));
3693}
3694
3695static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3696 uint32_t val)
3697{
3698 SwapEndianContainer *c = opaque;
3699 c->write[2](c->opaque, addr, bswap32(val));
3700}
3701
3702static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3703 swapendian_mem_writeb,
3704 swapendian_mem_writew,
3705 swapendian_mem_writel
3706};
3707
3708static void swapendian_init(int io_index)
3709{
3710 SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3711 int i;
3712
3713 /* Swap mmio for big endian targets */
3714 c->opaque = io_mem_opaque[io_index];
3715 for (i = 0; i < 3; i++) {
3716 c->read[i] = io_mem_read[io_index][i];
3717 c->write[i] = io_mem_write[io_index][i];
3718
3719 io_mem_read[io_index][i] = swapendian_readfn[i];
3720 io_mem_write[io_index][i] = swapendian_writefn[i];
3721 }
3722 io_mem_opaque[io_index] = c;
3723}
3724
3725static void swapendian_del(int io_index)
3726{
3727 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3728 qemu_free(io_mem_opaque[io_index]);
3729 }
3730}
3731
bellard33417e72003-08-10 21:47:01 +00003732/* mem_read and mem_write are arrays of functions containing the
3733 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003734 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003735 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003736 modified. If it is zero, a new io zone is allocated. The return
3737 value can be used with cpu_register_physical_memory(). (-1) is
3738 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003739static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003740 CPUReadMemoryFunc * const *mem_read,
3741 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003742 void *opaque, enum device_endian endian)
bellard33417e72003-08-10 21:47:01 +00003743{
Richard Henderson3cab7212010-05-07 09:52:51 -07003744 int i;
3745
bellard33417e72003-08-10 21:47:01 +00003746 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003747 io_index = get_free_io_mem_idx();
3748 if (io_index == -1)
3749 return io_index;
bellard33417e72003-08-10 21:47:01 +00003750 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003751 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003752 if (io_index >= IO_MEM_NB_ENTRIES)
3753 return -1;
3754 }
bellardb5ff1b32005-11-26 10:38:39 +00003755
Richard Henderson3cab7212010-05-07 09:52:51 -07003756 for (i = 0; i < 3; ++i) {
3757 io_mem_read[io_index][i]
3758 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3759 }
3760 for (i = 0; i < 3; ++i) {
3761 io_mem_write[io_index][i]
3762 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3763 }
bellarda4193c82004-06-03 14:01:43 +00003764 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003765
Alexander Grafdd310532010-12-08 12:05:36 +01003766 switch (endian) {
3767 case DEVICE_BIG_ENDIAN:
3768#ifndef TARGET_WORDS_BIGENDIAN
3769 swapendian_init(io_index);
3770#endif
3771 break;
3772 case DEVICE_LITTLE_ENDIAN:
3773#ifdef TARGET_WORDS_BIGENDIAN
3774 swapendian_init(io_index);
3775#endif
3776 break;
3777 case DEVICE_NATIVE_ENDIAN:
3778 default:
3779 break;
3780 }
3781
Richard Hendersonf6405242010-04-22 16:47:31 -07003782 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003783}
bellard61382a52003-10-27 21:22:23 +00003784
Blue Swirld60efc62009-08-25 18:29:31 +00003785int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3786 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003787 void *opaque, enum device_endian endian)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003788{
Alexander Graf2507c122010-12-08 12:05:37 +01003789 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003790}
3791
aliguori88715652009-02-11 15:20:58 +00003792void cpu_unregister_io_memory(int io_table_address)
3793{
3794 int i;
3795 int io_index = io_table_address >> IO_MEM_SHIFT;
3796
Alexander Grafdd310532010-12-08 12:05:36 +01003797 swapendian_del(io_index);
3798
aliguori88715652009-02-11 15:20:58 +00003799 for (i=0;i < 3; i++) {
3800 io_mem_read[io_index][i] = unassigned_mem_read[i];
3801 io_mem_write[io_index][i] = unassigned_mem_write[i];
3802 }
3803 io_mem_opaque[io_index] = NULL;
3804 io_mem_used[io_index] = 0;
3805}
3806
Avi Kivitye9179ce2009-06-14 11:38:52 +03003807static void io_mem_init(void)
3808{
3809 int i;
3810
Alexander Graf2507c122010-12-08 12:05:37 +01003811 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3812 unassigned_mem_write, NULL,
3813 DEVICE_NATIVE_ENDIAN);
3814 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3815 unassigned_mem_write, NULL,
3816 DEVICE_NATIVE_ENDIAN);
3817 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3818 notdirty_mem_write, NULL,
3819 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003820 for (i=0; i<5; i++)
3821 io_mem_used[i] = 1;
3822
3823 io_mem_watch = cpu_register_io_memory(watch_mem_read,
Alexander Graf2507c122010-12-08 12:05:37 +01003824 watch_mem_write, NULL,
3825 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003826}
3827
Avi Kivity62152b82011-07-26 14:26:14 +03003828static void memory_map_init(void)
3829{
3830 system_memory = qemu_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003831 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003832 set_system_memory_map(system_memory);
3833}
3834
3835MemoryRegion *get_system_memory(void)
3836{
3837 return system_memory;
3838}
3839
pbrooke2eef172008-06-08 01:09:01 +00003840#endif /* !defined(CONFIG_USER_ONLY) */
3841
bellard13eb76e2004-01-24 15:23:36 +00003842/* physical memory access (slow version, mainly for debug) */
3843#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003844int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3845 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003846{
3847 int l, flags;
3848 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003849 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003850
3851 while (len > 0) {
3852 page = addr & TARGET_PAGE_MASK;
3853 l = (page + TARGET_PAGE_SIZE) - addr;
3854 if (l > len)
3855 l = len;
3856 flags = page_get_flags(page);
3857 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003858 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003859 if (is_write) {
3860 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003861 return -1;
bellard579a97f2007-11-11 14:26:47 +00003862 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003863 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003864 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003865 memcpy(p, buf, l);
3866 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003867 } else {
3868 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003869 return -1;
bellard579a97f2007-11-11 14:26:47 +00003870 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003871 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003872 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003873 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003874 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003875 }
3876 len -= l;
3877 buf += l;
3878 addr += l;
3879 }
Paul Brooka68fe892010-03-01 00:08:59 +00003880 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003881}
bellard8df1cd02005-01-28 22:37:22 +00003882
bellard13eb76e2004-01-24 15:23:36 +00003883#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003884void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003885 int len, int is_write)
3886{
3887 int l, io_index;
3888 uint8_t *ptr;
3889 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003890 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003891 ram_addr_t pd;
bellard92e873b2004-05-21 14:52:29 +00003892 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003893
bellard13eb76e2004-01-24 15:23:36 +00003894 while (len > 0) {
3895 page = addr & TARGET_PAGE_MASK;
3896 l = (page + TARGET_PAGE_SIZE) - addr;
3897 if (l > len)
3898 l = len;
bellard92e873b2004-05-21 14:52:29 +00003899 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003900 if (!p) {
3901 pd = IO_MEM_UNASSIGNED;
3902 } else {
3903 pd = p->phys_offset;
3904 }
ths3b46e622007-09-17 08:09:54 +00003905
bellard13eb76e2004-01-24 15:23:36 +00003906 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003907 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003908 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003909 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003910 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003911 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003912 /* XXX: could force cpu_single_env to NULL to avoid
3913 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003914 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003915 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003916 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003917 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003918 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003919 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003920 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003921 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003922 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003923 l = 2;
3924 } else {
bellard1c213d12005-09-03 10:49:04 +00003925 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003926 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003927 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003928 l = 1;
3929 }
3930 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003931 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003932 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003933 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003934 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003935 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003936 if (!cpu_physical_memory_is_dirty(addr1)) {
3937 /* invalidate code */
3938 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3939 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003940 cpu_physical_memory_set_dirty_flags(
3941 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003942 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003943 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003944 }
3945 } else {
ths5fafdf22007-09-16 21:08:06 +00003946 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003947 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003948 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003949 /* I/O case */
3950 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003951 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003952 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3953 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003954 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003955 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003956 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003957 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003958 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003959 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003960 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003961 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003962 l = 2;
3963 } else {
bellard1c213d12005-09-03 10:49:04 +00003964 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003965 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003966 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003967 l = 1;
3968 }
3969 } else {
3970 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003971 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3972 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3973 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003974 }
3975 }
3976 len -= l;
3977 buf += l;
3978 addr += l;
3979 }
3980}
bellard8df1cd02005-01-28 22:37:22 +00003981
bellardd0ecd2a2006-04-23 17:14:48 +00003982/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003983void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003984 const uint8_t *buf, int len)
3985{
3986 int l;
3987 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003988 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003989 unsigned long pd;
3990 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003991
bellardd0ecd2a2006-04-23 17:14:48 +00003992 while (len > 0) {
3993 page = addr & TARGET_PAGE_MASK;
3994 l = (page + TARGET_PAGE_SIZE) - addr;
3995 if (l > len)
3996 l = len;
3997 p = phys_page_find(page >> TARGET_PAGE_BITS);
3998 if (!p) {
3999 pd = IO_MEM_UNASSIGNED;
4000 } else {
4001 pd = p->phys_offset;
4002 }
ths3b46e622007-09-17 08:09:54 +00004003
bellardd0ecd2a2006-04-23 17:14:48 +00004004 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00004005 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
4006 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00004007 /* do nothing */
4008 } else {
4009 unsigned long addr1;
4010 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4011 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004012 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00004013 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004014 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00004015 }
4016 len -= l;
4017 buf += l;
4018 addr += l;
4019 }
4020}
4021
aliguori6d16c2f2009-01-22 16:59:11 +00004022typedef struct {
4023 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05004024 target_phys_addr_t addr;
4025 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00004026} BounceBuffer;
4027
4028static BounceBuffer bounce;
4029
aliguoriba223c22009-01-22 16:59:16 +00004030typedef struct MapClient {
4031 void *opaque;
4032 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00004033 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00004034} MapClient;
4035
Blue Swirl72cf2d42009-09-12 07:36:22 +00004036static QLIST_HEAD(map_client_list, MapClient) map_client_list
4037 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004038
4039void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4040{
4041 MapClient *client = qemu_malloc(sizeof(*client));
4042
4043 client->opaque = opaque;
4044 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00004045 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00004046 return client;
4047}
4048
4049void cpu_unregister_map_client(void *_client)
4050{
4051 MapClient *client = (MapClient *)_client;
4052
Blue Swirl72cf2d42009-09-12 07:36:22 +00004053 QLIST_REMOVE(client, link);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09004054 qemu_free(client);
aliguoriba223c22009-01-22 16:59:16 +00004055}
4056
4057static void cpu_notify_map_clients(void)
4058{
4059 MapClient *client;
4060
Blue Swirl72cf2d42009-09-12 07:36:22 +00004061 while (!QLIST_EMPTY(&map_client_list)) {
4062 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004063 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09004064 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00004065 }
4066}
4067
aliguori6d16c2f2009-01-22 16:59:11 +00004068/* Map a physical memory region into a host virtual address.
4069 * May map a subset of the requested range, given by and returned in *plen.
4070 * May return NULL if resources needed to perform the mapping are exhausted.
4071 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00004072 * Use cpu_register_map_client() to know when retrying the map operation is
4073 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00004074 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004075void *cpu_physical_memory_map(target_phys_addr_t addr,
4076 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00004077 int is_write)
4078{
Anthony Liguoric227f092009-10-01 16:12:16 -05004079 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004080 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00004081 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004082 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00004083 unsigned long pd;
4084 PhysPageDesc *p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00004085 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004086 ram_addr_t rlen;
4087 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004088
4089 while (len > 0) {
4090 page = addr & TARGET_PAGE_MASK;
4091 l = (page + TARGET_PAGE_SIZE) - addr;
4092 if (l > len)
4093 l = len;
4094 p = phys_page_find(page >> TARGET_PAGE_BITS);
4095 if (!p) {
4096 pd = IO_MEM_UNASSIGNED;
4097 } else {
4098 pd = p->phys_offset;
4099 }
4100
4101 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004102 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004103 break;
4104 }
4105 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4106 bounce.addr = addr;
4107 bounce.len = l;
4108 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004109 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004110 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004111
4112 *plen = l;
4113 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004114 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004115 if (!todo) {
4116 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4117 }
aliguori6d16c2f2009-01-22 16:59:11 +00004118
4119 len -= l;
4120 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004121 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004122 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004123 rlen = todo;
4124 ret = qemu_ram_ptr_length(raddr, &rlen);
4125 *plen = rlen;
4126 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004127}
4128
4129/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4130 * Will also mark the memory as dirty if is_write == 1. access_len gives
4131 * the amount of memory that was actually read or written by the caller.
4132 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004133void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4134 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004135{
4136 if (buffer != bounce.buffer) {
4137 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004138 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004139 while (access_len) {
4140 unsigned l;
4141 l = TARGET_PAGE_SIZE;
4142 if (l > access_len)
4143 l = access_len;
4144 if (!cpu_physical_memory_is_dirty(addr1)) {
4145 /* invalidate code */
4146 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4147 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004148 cpu_physical_memory_set_dirty_flags(
4149 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004150 }
4151 addr1 += l;
4152 access_len -= l;
4153 }
4154 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004155 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004156 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004157 }
aliguori6d16c2f2009-01-22 16:59:11 +00004158 return;
4159 }
4160 if (is_write) {
4161 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4162 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004163 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004164 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004165 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004166}
bellardd0ecd2a2006-04-23 17:14:48 +00004167
bellard8df1cd02005-01-28 22:37:22 +00004168/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004169static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4170 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004171{
4172 int io_index;
4173 uint8_t *ptr;
4174 uint32_t val;
4175 unsigned long pd;
4176 PhysPageDesc *p;
4177
4178 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4179 if (!p) {
4180 pd = IO_MEM_UNASSIGNED;
4181 } else {
4182 pd = p->phys_offset;
4183 }
ths3b46e622007-09-17 08:09:54 +00004184
ths5fafdf22007-09-16 21:08:06 +00004185 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00004186 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00004187 /* I/O case */
4188 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004189 if (p)
4190 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004191 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004192#if defined(TARGET_WORDS_BIGENDIAN)
4193 if (endian == DEVICE_LITTLE_ENDIAN) {
4194 val = bswap32(val);
4195 }
4196#else
4197 if (endian == DEVICE_BIG_ENDIAN) {
4198 val = bswap32(val);
4199 }
4200#endif
bellard8df1cd02005-01-28 22:37:22 +00004201 } else {
4202 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004203 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004204 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004205 switch (endian) {
4206 case DEVICE_LITTLE_ENDIAN:
4207 val = ldl_le_p(ptr);
4208 break;
4209 case DEVICE_BIG_ENDIAN:
4210 val = ldl_be_p(ptr);
4211 break;
4212 default:
4213 val = ldl_p(ptr);
4214 break;
4215 }
bellard8df1cd02005-01-28 22:37:22 +00004216 }
4217 return val;
4218}
4219
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004220uint32_t ldl_phys(target_phys_addr_t addr)
4221{
4222 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4223}
4224
4225uint32_t ldl_le_phys(target_phys_addr_t addr)
4226{
4227 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4228}
4229
4230uint32_t ldl_be_phys(target_phys_addr_t addr)
4231{
4232 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4233}
4234
bellard84b7b8e2005-11-28 21:19:04 +00004235/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004236static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4237 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004238{
4239 int io_index;
4240 uint8_t *ptr;
4241 uint64_t val;
4242 unsigned long pd;
4243 PhysPageDesc *p;
4244
4245 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4246 if (!p) {
4247 pd = IO_MEM_UNASSIGNED;
4248 } else {
4249 pd = p->phys_offset;
4250 }
ths3b46e622007-09-17 08:09:54 +00004251
bellard2a4188a2006-06-25 21:54:59 +00004252 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4253 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00004254 /* I/O case */
4255 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004256 if (p)
4257 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004258
4259 /* XXX This is broken when device endian != cpu endian.
4260 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004261#ifdef TARGET_WORDS_BIGENDIAN
4262 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4263 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4264#else
4265 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4266 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4267#endif
4268 } else {
4269 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004270 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004271 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004272 switch (endian) {
4273 case DEVICE_LITTLE_ENDIAN:
4274 val = ldq_le_p(ptr);
4275 break;
4276 case DEVICE_BIG_ENDIAN:
4277 val = ldq_be_p(ptr);
4278 break;
4279 default:
4280 val = ldq_p(ptr);
4281 break;
4282 }
bellard84b7b8e2005-11-28 21:19:04 +00004283 }
4284 return val;
4285}
4286
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004287uint64_t ldq_phys(target_phys_addr_t addr)
4288{
4289 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4290}
4291
4292uint64_t ldq_le_phys(target_phys_addr_t addr)
4293{
4294 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4295}
4296
4297uint64_t ldq_be_phys(target_phys_addr_t addr)
4298{
4299 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4300}
4301
bellardaab33092005-10-30 20:48:42 +00004302/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004303uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004304{
4305 uint8_t val;
4306 cpu_physical_memory_read(addr, &val, 1);
4307 return val;
4308}
4309
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004310/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004311static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4312 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004313{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004314 int io_index;
4315 uint8_t *ptr;
4316 uint64_t val;
4317 unsigned long pd;
4318 PhysPageDesc *p;
4319
4320 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4321 if (!p) {
4322 pd = IO_MEM_UNASSIGNED;
4323 } else {
4324 pd = p->phys_offset;
4325 }
4326
4327 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4328 !(pd & IO_MEM_ROMD)) {
4329 /* I/O case */
4330 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4331 if (p)
4332 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4333 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004334#if defined(TARGET_WORDS_BIGENDIAN)
4335 if (endian == DEVICE_LITTLE_ENDIAN) {
4336 val = bswap16(val);
4337 }
4338#else
4339 if (endian == DEVICE_BIG_ENDIAN) {
4340 val = bswap16(val);
4341 }
4342#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004343 } else {
4344 /* RAM case */
4345 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4346 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004347 switch (endian) {
4348 case DEVICE_LITTLE_ENDIAN:
4349 val = lduw_le_p(ptr);
4350 break;
4351 case DEVICE_BIG_ENDIAN:
4352 val = lduw_be_p(ptr);
4353 break;
4354 default:
4355 val = lduw_p(ptr);
4356 break;
4357 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004358 }
4359 return val;
bellardaab33092005-10-30 20:48:42 +00004360}
4361
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004362uint32_t lduw_phys(target_phys_addr_t addr)
4363{
4364 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4365}
4366
4367uint32_t lduw_le_phys(target_phys_addr_t addr)
4368{
4369 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4370}
4371
4372uint32_t lduw_be_phys(target_phys_addr_t addr)
4373{
4374 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4375}
4376
bellard8df1cd02005-01-28 22:37:22 +00004377/* warning: addr must be aligned. The ram page is not masked as dirty
4378 and the code inside is not invalidated. It is useful if the dirty
4379 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004380void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004381{
4382 int io_index;
4383 uint8_t *ptr;
4384 unsigned long pd;
4385 PhysPageDesc *p;
4386
4387 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4388 if (!p) {
4389 pd = IO_MEM_UNASSIGNED;
4390 } else {
4391 pd = p->phys_offset;
4392 }
ths3b46e622007-09-17 08:09:54 +00004393
bellard3a7d9292005-08-21 09:26:42 +00004394 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004395 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004396 if (p)
4397 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004398 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4399 } else {
aliguori74576192008-10-06 14:02:03 +00004400 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004401 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004402 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004403
4404 if (unlikely(in_migration)) {
4405 if (!cpu_physical_memory_is_dirty(addr1)) {
4406 /* invalidate code */
4407 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4408 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004409 cpu_physical_memory_set_dirty_flags(
4410 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004411 }
4412 }
bellard8df1cd02005-01-28 22:37:22 +00004413 }
4414}
4415
Anthony Liguoric227f092009-10-01 16:12:16 -05004416void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004417{
4418 int io_index;
4419 uint8_t *ptr;
4420 unsigned long pd;
4421 PhysPageDesc *p;
4422
4423 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4424 if (!p) {
4425 pd = IO_MEM_UNASSIGNED;
4426 } else {
4427 pd = p->phys_offset;
4428 }
ths3b46e622007-09-17 08:09:54 +00004429
j_mayerbc98a7e2007-04-04 07:55:12 +00004430 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4431 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004432 if (p)
4433 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004434#ifdef TARGET_WORDS_BIGENDIAN
4435 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4436 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4437#else
4438 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4439 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4440#endif
4441 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004442 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004443 (addr & ~TARGET_PAGE_MASK);
4444 stq_p(ptr, val);
4445 }
4446}
4447
bellard8df1cd02005-01-28 22:37:22 +00004448/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004449static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4450 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004451{
4452 int io_index;
4453 uint8_t *ptr;
4454 unsigned long pd;
4455 PhysPageDesc *p;
4456
4457 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4458 if (!p) {
4459 pd = IO_MEM_UNASSIGNED;
4460 } else {
4461 pd = p->phys_offset;
4462 }
ths3b46e622007-09-17 08:09:54 +00004463
bellard3a7d9292005-08-21 09:26:42 +00004464 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004465 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004466 if (p)
4467 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004468#if defined(TARGET_WORDS_BIGENDIAN)
4469 if (endian == DEVICE_LITTLE_ENDIAN) {
4470 val = bswap32(val);
4471 }
4472#else
4473 if (endian == DEVICE_BIG_ENDIAN) {
4474 val = bswap32(val);
4475 }
4476#endif
bellard8df1cd02005-01-28 22:37:22 +00004477 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4478 } else {
4479 unsigned long addr1;
4480 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4481 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004482 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004483 switch (endian) {
4484 case DEVICE_LITTLE_ENDIAN:
4485 stl_le_p(ptr, val);
4486 break;
4487 case DEVICE_BIG_ENDIAN:
4488 stl_be_p(ptr, val);
4489 break;
4490 default:
4491 stl_p(ptr, val);
4492 break;
4493 }
bellard3a7d9292005-08-21 09:26:42 +00004494 if (!cpu_physical_memory_is_dirty(addr1)) {
4495 /* invalidate code */
4496 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4497 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004498 cpu_physical_memory_set_dirty_flags(addr1,
4499 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004500 }
bellard8df1cd02005-01-28 22:37:22 +00004501 }
4502}
4503
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004504void stl_phys(target_phys_addr_t addr, uint32_t val)
4505{
4506 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4507}
4508
4509void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4510{
4511 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4512}
4513
4514void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4515{
4516 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4517}
4518
bellardaab33092005-10-30 20:48:42 +00004519/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004520void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004521{
4522 uint8_t v = val;
4523 cpu_physical_memory_write(addr, &v, 1);
4524}
4525
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004526/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004527static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4528 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004529{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004530 int io_index;
4531 uint8_t *ptr;
4532 unsigned long pd;
4533 PhysPageDesc *p;
4534
4535 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4536 if (!p) {
4537 pd = IO_MEM_UNASSIGNED;
4538 } else {
4539 pd = p->phys_offset;
4540 }
4541
4542 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4543 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4544 if (p)
4545 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004546#if defined(TARGET_WORDS_BIGENDIAN)
4547 if (endian == DEVICE_LITTLE_ENDIAN) {
4548 val = bswap16(val);
4549 }
4550#else
4551 if (endian == DEVICE_BIG_ENDIAN) {
4552 val = bswap16(val);
4553 }
4554#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004555 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4556 } else {
4557 unsigned long addr1;
4558 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4559 /* RAM case */
4560 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004561 switch (endian) {
4562 case DEVICE_LITTLE_ENDIAN:
4563 stw_le_p(ptr, val);
4564 break;
4565 case DEVICE_BIG_ENDIAN:
4566 stw_be_p(ptr, val);
4567 break;
4568 default:
4569 stw_p(ptr, val);
4570 break;
4571 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004572 if (!cpu_physical_memory_is_dirty(addr1)) {
4573 /* invalidate code */
4574 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4575 /* set dirty bit */
4576 cpu_physical_memory_set_dirty_flags(addr1,
4577 (0xff & ~CODE_DIRTY_FLAG));
4578 }
4579 }
bellardaab33092005-10-30 20:48:42 +00004580}
4581
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004582void stw_phys(target_phys_addr_t addr, uint32_t val)
4583{
4584 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4585}
4586
4587void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4588{
4589 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4590}
4591
4592void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4593{
4594 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4595}
4596
bellardaab33092005-10-30 20:48:42 +00004597/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004598void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004599{
4600 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004601 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004602}
4603
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004604void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4605{
4606 val = cpu_to_le64(val);
4607 cpu_physical_memory_write(addr, &val, 8);
4608}
4609
4610void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4611{
4612 val = cpu_to_be64(val);
4613 cpu_physical_memory_write(addr, &val, 8);
4614}
4615
aliguori5e2972f2009-03-28 17:51:36 +00004616/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004617int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004618 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004619{
4620 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004621 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004622 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004623
4624 while (len > 0) {
4625 page = addr & TARGET_PAGE_MASK;
4626 phys_addr = cpu_get_phys_page_debug(env, page);
4627 /* if no physical page mapped, return an error */
4628 if (phys_addr == -1)
4629 return -1;
4630 l = (page + TARGET_PAGE_SIZE) - addr;
4631 if (l > len)
4632 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004633 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004634 if (is_write)
4635 cpu_physical_memory_write_rom(phys_addr, buf, l);
4636 else
aliguori5e2972f2009-03-28 17:51:36 +00004637 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004638 len -= l;
4639 buf += l;
4640 addr += l;
4641 }
4642 return 0;
4643}
Paul Brooka68fe892010-03-01 00:08:59 +00004644#endif
bellard13eb76e2004-01-24 15:23:36 +00004645
pbrook2e70f6e2008-06-29 01:03:05 +00004646/* in deterministic execution mode, instructions doing device I/Os
4647 must be at the end of the TB */
4648void cpu_io_recompile(CPUState *env, void *retaddr)
4649{
4650 TranslationBlock *tb;
4651 uint32_t n, cflags;
4652 target_ulong pc, cs_base;
4653 uint64_t flags;
4654
4655 tb = tb_find_pc((unsigned long)retaddr);
4656 if (!tb) {
4657 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4658 retaddr);
4659 }
4660 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004661 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004662 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004663 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004664 n = n - env->icount_decr.u16.low;
4665 /* Generate a new TB ending on the I/O insn. */
4666 n++;
4667 /* On MIPS and SH, delay slot instructions can only be restarted if
4668 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004669 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004670 branch. */
4671#if defined(TARGET_MIPS)
4672 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4673 env->active_tc.PC -= 4;
4674 env->icount_decr.u16.low++;
4675 env->hflags &= ~MIPS_HFLAG_BMASK;
4676 }
4677#elif defined(TARGET_SH4)
4678 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4679 && n > 1) {
4680 env->pc -= 2;
4681 env->icount_decr.u16.low++;
4682 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4683 }
4684#endif
4685 /* This should never happen. */
4686 if (n > CF_COUNT_MASK)
4687 cpu_abort(env, "TB too big during recompile");
4688
4689 cflags = n | CF_LAST_IO;
4690 pc = tb->pc;
4691 cs_base = tb->cs_base;
4692 flags = tb->flags;
4693 tb_phys_invalidate(tb, -1);
4694 /* FIXME: In theory this could raise an exception. In practice
4695 we have already translated the block once so it's probably ok. */
4696 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004697 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004698 the first in the TB) then we end up generating a whole new TB and
4699 repeating the fault, which is horribly inefficient.
4700 Better would be to execute just this insn uncached, or generate a
4701 second new TB. */
4702 cpu_resume_from_signal(env, NULL);
4703}
4704
Paul Brookb3755a92010-03-12 16:54:58 +00004705#if !defined(CONFIG_USER_ONLY)
4706
Stefan Weil055403b2010-10-22 23:03:32 +02004707void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004708{
4709 int i, target_code_size, max_target_code_size;
4710 int direct_jmp_count, direct_jmp2_count, cross_page;
4711 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004712
bellarde3db7222005-01-26 22:00:47 +00004713 target_code_size = 0;
4714 max_target_code_size = 0;
4715 cross_page = 0;
4716 direct_jmp_count = 0;
4717 direct_jmp2_count = 0;
4718 for(i = 0; i < nb_tbs; i++) {
4719 tb = &tbs[i];
4720 target_code_size += tb->size;
4721 if (tb->size > max_target_code_size)
4722 max_target_code_size = tb->size;
4723 if (tb->page_addr[1] != -1)
4724 cross_page++;
4725 if (tb->tb_next_offset[0] != 0xffff) {
4726 direct_jmp_count++;
4727 if (tb->tb_next_offset[1] != 0xffff) {
4728 direct_jmp2_count++;
4729 }
4730 }
4731 }
4732 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004733 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004734 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004735 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4736 cpu_fprintf(f, "TB count %d/%d\n",
4737 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004738 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004739 nb_tbs ? target_code_size / nb_tbs : 0,
4740 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004741 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004742 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4743 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004744 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4745 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004746 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4747 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004748 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004749 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4750 direct_jmp2_count,
4751 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004752 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004753 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4754 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4755 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004756 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004757}
4758
bellard61382a52003-10-27 21:22:23 +00004759#define MMUSUFFIX _cmmu
4760#define GETPC() NULL
4761#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004762#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004763
4764#define SHIFT 0
4765#include "softmmu_template.h"
4766
4767#define SHIFT 1
4768#include "softmmu_template.h"
4769
4770#define SHIFT 2
4771#include "softmmu_template.h"
4772
4773#define SHIFT 3
4774#include "softmmu_template.h"
4775
4776#undef env
4777
4778#endif