blob: a6d3bad7472c294587dd70d761f36f5d0b194122 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
bellard6180a182003-09-30 21:04:53 +000034#include "cpu.h"
35#include "exec-all.h"
aurel32ca10f862008-04-11 21:35:42 +000036#include "qemu-common.h"
bellardb67d9a52008-05-23 09:57:34 +000037#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000038#include "hw/hw.h"
aliguori74576192008-10-06 14:02:03 +000039#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000040#include "kvm.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000041#include "qemu-timer.h"
pbrook53a59602006-03-25 19:31:22 +000042#if defined(CONFIG_USER_ONLY)
43#include <qemu.h>
Riku Voipiofd052bf2010-01-25 14:30:49 +020044#include <signal.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010045#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
46#include <sys/param.h>
47#if __FreeBSD_version >= 700104
48#define HAVE_KINFO_GETVMMAP
49#define sigqueue sigqueue_freebsd /* avoid redefinition */
50#include <sys/time.h>
51#include <sys/proc.h>
52#include <machine/profile.h>
53#define _KERNEL
54#include <sys/user.h>
55#undef _KERNEL
56#undef sigqueue
57#include <libutil.h>
58#endif
59#endif
pbrook53a59602006-03-25 19:31:22 +000060#endif
bellard54936002003-05-13 00:25:15 +000061
bellardfd6ce8f2003-05-14 19:00:11 +000062//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000063//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000064//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000065//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000066
67/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000068//#define DEBUG_TB_CHECK
69//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000070
ths1196be32007-03-17 15:17:58 +000071//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000072//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000073
pbrook99773bd2006-04-16 15:14:59 +000074#if !defined(CONFIG_USER_ONLY)
75/* TB consistency checks only implemented for usermode emulation. */
76#undef DEBUG_TB_CHECK
77#endif
78
bellard9fa3e852004-01-04 18:06:42 +000079#define SMC_BITMAP_USE_THRESHOLD 10
80
blueswir1bdaf78e2008-10-04 07:24:27 +000081static TranslationBlock *tbs;
bellard26a5f132008-05-28 12:30:31 +000082int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000083TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000084static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000085/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050086spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000087
blueswir1141ac462008-07-26 15:05:57 +000088#if defined(__arm__) || defined(__sparc_v9__)
89/* The prologue must be reachable with a direct jump. ARM and Sparc64
90 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000091 section close to code segment. */
92#define code_gen_section \
93 __attribute__((__section__(".gen_code"))) \
94 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020095#elif defined(_WIN32)
96/* Maximum alignment for Win32 is 16. */
97#define code_gen_section \
98 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000099#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000107/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000108static unsigned long code_gen_buffer_max_size;
bellardfd6ce8f2003-05-14 19:00:11 +0000109uint8_t *code_gen_ptr;
110
pbrooke2eef172008-06-08 01:09:01 +0000111#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000112int phys_ram_fd;
bellard1ccde1c2004-02-06 19:46:14 +0000113uint8_t *phys_ram_dirty;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
116typedef struct RAMBlock {
117 uint8_t *host;
Anthony Liguoric227f092009-10-01 16:12:16 -0500118 ram_addr_t offset;
119 ram_addr_t length;
pbrook94a6b542009-04-11 17:15:54 +0000120 struct RAMBlock *next;
121} RAMBlock;
122
123static RAMBlock *ram_blocks;
124/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100125 then we can no longer assume contiguous ram offsets, and external uses
pbrook94a6b542009-04-11 17:15:54 +0000126 of this variable will break. */
Anthony Liguoric227f092009-10-01 16:12:16 -0500127ram_addr_t last_ram_offset;
pbrooke2eef172008-06-08 01:09:01 +0000128#endif
bellard9fa3e852004-01-04 18:06:42 +0000129
bellard6a00d602005-11-21 23:25:50 +0000130CPUState *first_cpu;
131/* current CPU in the current thread. It is only valid inside
132 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000133CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000134/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000135 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000136 2 = Adaptive rate instruction counting. */
137int use_icount = 0;
138/* Current instruction counter. While executing translated code this may
139 include some instructions that have not yet been executed. */
140int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000141
bellard54936002003-05-13 00:25:15 +0000142typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000143 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000144 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000145 /* in order to optimize self modifying code, we count the number
146 of lookups we do to a given page to use a bitmap */
147 unsigned int code_write_count;
148 uint8_t *code_bitmap;
149#if defined(CONFIG_USER_ONLY)
150 unsigned long flags;
151#endif
bellard54936002003-05-13 00:25:15 +0000152} PageDesc;
153
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155 while in user mode we want it to be based on virtual addresses. */
156#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000157#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
158# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
159#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800160# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000161#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000162#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800163# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000164#endif
bellard54936002003-05-13 00:25:15 +0000165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* Size of the L2 (and L3, etc) page tables. */
167#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000168#define L2_SIZE (1 << L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170/* The bits remaining after N lower levels of page tables. */
171#define P_L1_BITS_REM \
172 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
173#define V_L1_BITS_REM \
174 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
175
176/* Size of the L1 page table. Avoid silly small sizes. */
177#if P_L1_BITS_REM < 4
178#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
179#else
180#define P_L1_BITS P_L1_BITS_REM
181#endif
182
183#if V_L1_BITS_REM < 4
184#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
185#else
186#define V_L1_BITS V_L1_BITS_REM
187#endif
188
189#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
190#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
191
192#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
193#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
194
bellard83fb7ad2004-07-05 21:25:26 +0000195unsigned long qemu_real_host_page_size;
196unsigned long qemu_host_page_bits;
197unsigned long qemu_host_page_size;
198unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000199
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800200/* This is a multi-level map on the virtual address space.
201 The bottom level has pointers to PageDesc. */
202static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000203
pbrooke2eef172008-06-08 01:09:01 +0000204#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000205typedef struct PhysPageDesc {
206 /* offset in host memory of the page + io_index in the low bits */
207 ram_addr_t phys_offset;
208 ram_addr_t region_offset;
209} PhysPageDesc;
210
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800211/* This is a multi-level map on the physical address space.
212 The bottom level has pointers to PhysPageDesc. */
213static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000214
pbrooke2eef172008-06-08 01:09:01 +0000215static void io_mem_init(void);
216
bellard33417e72003-08-10 21:47:01 +0000217/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000218CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
219CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000220void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000221static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000222static int io_mem_watch;
223#endif
bellard33417e72003-08-10 21:47:01 +0000224
bellard34865132003-10-05 14:28:56 +0000225/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200226#ifdef WIN32
227static const char *logfilename = "qemu.log";
228#else
blueswir1d9b630f2008-10-05 09:57:08 +0000229static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200230#endif
bellard34865132003-10-05 14:28:56 +0000231FILE *logfile;
232int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000233static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000234
bellarde3db7222005-01-26 22:00:47 +0000235/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000236#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000237static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000238#endif
bellarde3db7222005-01-26 22:00:47 +0000239static int tb_flush_count;
240static int tb_phys_invalidate_count;
241
bellard7cb69ca2008-05-10 10:55:51 +0000242#ifdef _WIN32
243static void map_exec(void *addr, long size)
244{
245 DWORD old_protect;
246 VirtualProtect(addr, size,
247 PAGE_EXECUTE_READWRITE, &old_protect);
248
249}
250#else
251static void map_exec(void *addr, long size)
252{
bellard43694152008-05-29 09:35:57 +0000253 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000254
bellard43694152008-05-29 09:35:57 +0000255 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000256 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000257 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000258
259 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000260 end += page_size - 1;
261 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000262
263 mprotect((void *)start, end - start,
264 PROT_READ | PROT_WRITE | PROT_EXEC);
265}
266#endif
267
bellardb346ff42003-06-15 20:05:50 +0000268static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000269{
bellard83fb7ad2004-07-05 21:25:26 +0000270 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000271 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000272#ifdef _WIN32
273 {
274 SYSTEM_INFO system_info;
275
276 GetSystemInfo(&system_info);
277 qemu_real_host_page_size = system_info.dwPageSize;
278 }
279#else
280 qemu_real_host_page_size = getpagesize();
281#endif
bellard83fb7ad2004-07-05 21:25:26 +0000282 if (qemu_host_page_size == 0)
283 qemu_host_page_size = qemu_real_host_page_size;
284 if (qemu_host_page_size < TARGET_PAGE_SIZE)
285 qemu_host_page_size = TARGET_PAGE_SIZE;
286 qemu_host_page_bits = 0;
287 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
288 qemu_host_page_bits++;
289 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000290
291#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
292 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100293#ifdef HAVE_KINFO_GETVMMAP
294 struct kinfo_vmentry *freep;
295 int i, cnt;
296
297 freep = kinfo_getvmmap(getpid(), &cnt);
298 if (freep) {
299 mmap_lock();
300 for (i = 0; i < cnt; i++) {
301 unsigned long startaddr, endaddr;
302
303 startaddr = freep[i].kve_start;
304 endaddr = freep[i].kve_end;
305 if (h2g_valid(startaddr)) {
306 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
307
308 if (h2g_valid(endaddr)) {
309 endaddr = h2g(endaddr);
Juergen Lockf01576f2010-03-25 22:32:16 +0100310 } else {
311#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
312 endaddr = ~0ul;
Juergen Lock01c0bef2010-03-31 23:00:36 +0200313#else
314 endaddr = ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS) - 1;
Juergen Lockf01576f2010-03-25 22:32:16 +0100315#endif
316 }
Juergen Lock01c0bef2010-03-31 23:00:36 +0200317 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100318 }
319 }
320 free(freep);
321 mmap_unlock();
322 }
323#else
balrog50a95692007-12-12 01:16:23 +0000324 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000325
pbrook07765902008-05-31 16:33:53 +0000326 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800327
balrog50a95692007-12-12 01:16:23 +0000328 f = fopen("/proc/self/maps", "r");
329 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800330 mmap_lock();
331
balrog50a95692007-12-12 01:16:23 +0000332 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800333 unsigned long startaddr, endaddr;
334 int n;
335
336 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
337
338 if (n == 2 && h2g_valid(startaddr)) {
339 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
340
341 if (h2g_valid(endaddr)) {
342 endaddr = h2g(endaddr);
343 } else {
Juergen Lock01c0bef2010-03-31 23:00:36 +0200344#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800345 endaddr = ~0ul;
Juergen Lock01c0bef2010-03-31 23:00:36 +0200346#else
347 endaddr = ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS) - 1;
348#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800349 }
350 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000351 }
352 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353
balrog50a95692007-12-12 01:16:23 +0000354 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800355 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000356 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100357#endif
balrog50a95692007-12-12 01:16:23 +0000358 }
359#endif
bellard54936002003-05-13 00:25:15 +0000360}
361
Paul Brook41c1b1c2010-03-12 16:54:58 +0000362static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000363{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000364 PageDesc *pd;
365 void **lp;
366 int i;
367
pbrook17e23772008-06-09 13:47:45 +0000368#if defined(CONFIG_USER_ONLY)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800369 /* We can't use qemu_malloc because it may recurse into a locked mutex.
370 Neither can we record the new pages we reserve while allocating a
371 given page because that may recurse into an unallocated page table
372 entry. Stuff the allocations we do make into a queue and process
373 them after having completed one entire page table allocation. */
374
375 unsigned long reserve[2 * (V_L1_SHIFT / L2_BITS)];
376 int reserve_idx = 0;
377
378# define ALLOC(P, SIZE) \
379 do { \
380 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
381 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
382 if (h2g_valid(P)) { \
383 reserve[reserve_idx] = h2g(P); \
384 reserve[reserve_idx + 1] = SIZE; \
385 reserve_idx += 2; \
386 } \
387 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000388#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800389# define ALLOC(P, SIZE) \
390 do { P = qemu_mallocz(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000391#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800392
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800393 /* Level 1. Always allocated. */
394 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
395
396 /* Level 2..N-1. */
397 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
398 void **p = *lp;
399
400 if (p == NULL) {
401 if (!alloc) {
402 return NULL;
403 }
404 ALLOC(p, sizeof(void *) * L2_SIZE);
405 *lp = p;
406 }
407
408 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000409 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800410
411 pd = *lp;
412 if (pd == NULL) {
413 if (!alloc) {
414 return NULL;
415 }
416 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
417 *lp = pd;
418 }
419
420#undef ALLOC
421#if defined(CONFIG_USER_ONLY)
422 for (i = 0; i < reserve_idx; i += 2) {
423 unsigned long addr = reserve[i];
424 unsigned long len = reserve[i + 1];
425
426 page_set_flags(addr & TARGET_PAGE_MASK,
427 TARGET_PAGE_ALIGN(addr + len),
428 PAGE_RESERVED);
429 }
430#endif
431
432 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000433}
434
Paul Brook41c1b1c2010-03-12 16:54:58 +0000435static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000436{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800437 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000438}
439
Paul Brook6d9a1302010-02-28 23:55:53 +0000440#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500441static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000442{
pbrooke3f4e2a2006-04-08 20:02:06 +0000443 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800444 void **lp;
445 int i;
bellard92e873b2004-05-21 14:52:29 +0000446
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800447 /* Level 1. Always allocated. */
448 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000449
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800450 /* Level 2..N-1. */
451 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
452 void **p = *lp;
453 if (p == NULL) {
454 if (!alloc) {
455 return NULL;
456 }
457 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
458 }
459 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000460 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800461
pbrooke3f4e2a2006-04-08 20:02:06 +0000462 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800463 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000464 int i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800465
466 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000467 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800468 }
469
470 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
471
pbrook67c4d232009-02-23 13:16:07 +0000472 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800473 pd[i].phys_offset = IO_MEM_UNASSIGNED;
474 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000475 }
bellard92e873b2004-05-21 14:52:29 +0000476 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800477
478 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000479}
480
Anthony Liguoric227f092009-10-01 16:12:16 -0500481static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000482{
bellard108c49b2005-07-24 12:55:09 +0000483 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000484}
485
Anthony Liguoric227f092009-10-01 16:12:16 -0500486static void tlb_protect_code(ram_addr_t ram_addr);
487static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000488 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000489#define mmap_lock() do { } while(0)
490#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000491#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000492
bellard43694152008-05-29 09:35:57 +0000493#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
494
495#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100496/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000497 user mode. It will change when a dedicated libc will be used */
498#define USE_STATIC_CODE_GEN_BUFFER
499#endif
500
501#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200502static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
503 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000504#endif
505
blueswir18fcd3692008-08-17 20:26:25 +0000506static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000507{
bellard43694152008-05-29 09:35:57 +0000508#ifdef USE_STATIC_CODE_GEN_BUFFER
509 code_gen_buffer = static_code_gen_buffer;
510 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
511 map_exec(code_gen_buffer, code_gen_buffer_size);
512#else
bellard26a5f132008-05-28 12:30:31 +0000513 code_gen_buffer_size = tb_size;
514 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000515#if defined(CONFIG_USER_ONLY)
516 /* in user mode, phys_ram_size is not meaningful */
517 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
518#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100519 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000520 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000521#endif
bellard26a5f132008-05-28 12:30:31 +0000522 }
523 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
524 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
525 /* The code gen buffer location may have constraints depending on
526 the host cpu and OS */
527#if defined(__linux__)
528 {
529 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000530 void *start = NULL;
531
bellard26a5f132008-05-28 12:30:31 +0000532 flags = MAP_PRIVATE | MAP_ANONYMOUS;
533#if defined(__x86_64__)
534 flags |= MAP_32BIT;
535 /* Cannot map more than that */
536 if (code_gen_buffer_size > (800 * 1024 * 1024))
537 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000538#elif defined(__sparc_v9__)
539 // Map the buffer below 2G, so we can use direct calls and branches
540 flags |= MAP_FIXED;
541 start = (void *) 0x60000000UL;
542 if (code_gen_buffer_size > (512 * 1024 * 1024))
543 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000544#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000545 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000546 flags |= MAP_FIXED;
547 start = (void *) 0x01000000UL;
548 if (code_gen_buffer_size > 16 * 1024 * 1024)
549 code_gen_buffer_size = 16 * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000550#endif
blueswir1141ac462008-07-26 15:05:57 +0000551 code_gen_buffer = mmap(start, code_gen_buffer_size,
552 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000553 flags, -1, 0);
554 if (code_gen_buffer == MAP_FAILED) {
555 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
556 exit(1);
557 }
558 }
Aurelien Jarnoa167ba52009-11-29 18:00:41 +0100559#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
aliguori06e67a82008-09-27 15:32:41 +0000560 {
561 int flags;
562 void *addr = NULL;
563 flags = MAP_PRIVATE | MAP_ANONYMOUS;
564#if defined(__x86_64__)
565 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
566 * 0x40000000 is free */
567 flags |= MAP_FIXED;
568 addr = (void *)0x40000000;
569 /* Cannot map more than that */
570 if (code_gen_buffer_size > (800 * 1024 * 1024))
571 code_gen_buffer_size = (800 * 1024 * 1024);
572#endif
573 code_gen_buffer = mmap(addr, code_gen_buffer_size,
574 PROT_WRITE | PROT_READ | PROT_EXEC,
575 flags, -1, 0);
576 if (code_gen_buffer == MAP_FAILED) {
577 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
578 exit(1);
579 }
580 }
bellard26a5f132008-05-28 12:30:31 +0000581#else
582 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000583 map_exec(code_gen_buffer, code_gen_buffer_size);
584#endif
bellard43694152008-05-29 09:35:57 +0000585#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000586 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
587 code_gen_buffer_max_size = code_gen_buffer_size -
588 code_gen_max_block_size();
589 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
590 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
591}
592
593/* Must be called before using the QEMU cpus. 'tb_size' is the size
594 (in bytes) allocated to the translation buffer. Zero means default
595 size. */
596void cpu_exec_init_all(unsigned long tb_size)
597{
bellard26a5f132008-05-28 12:30:31 +0000598 cpu_gen_init();
599 code_gen_alloc(tb_size);
600 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000601 page_init();
pbrooke2eef172008-06-08 01:09:01 +0000602#if !defined(CONFIG_USER_ONLY)
bellard26a5f132008-05-28 12:30:31 +0000603 io_mem_init();
pbrooke2eef172008-06-08 01:09:01 +0000604#endif
bellard26a5f132008-05-28 12:30:31 +0000605}
606
pbrook9656f322008-07-01 20:01:19 +0000607#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
608
Juan Quintelae59fb372009-09-29 22:48:21 +0200609static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200610{
611 CPUState *env = opaque;
612
aurel323098dba2009-03-07 21:28:24 +0000613 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
614 version_id is increased. */
615 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000616 tlb_flush(env, 1);
617
618 return 0;
619}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200620
621static const VMStateDescription vmstate_cpu_common = {
622 .name = "cpu_common",
623 .version_id = 1,
624 .minimum_version_id = 1,
625 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200626 .post_load = cpu_common_post_load,
627 .fields = (VMStateField []) {
628 VMSTATE_UINT32(halted, CPUState),
629 VMSTATE_UINT32(interrupt_request, CPUState),
630 VMSTATE_END_OF_LIST()
631 }
632};
pbrook9656f322008-07-01 20:01:19 +0000633#endif
634
Glauber Costa950f1472009-06-09 12:15:18 -0400635CPUState *qemu_get_cpu(int cpu)
636{
637 CPUState *env = first_cpu;
638
639 while (env) {
640 if (env->cpu_index == cpu)
641 break;
642 env = env->next_cpu;
643 }
644
645 return env;
646}
647
bellard6a00d602005-11-21 23:25:50 +0000648void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000649{
bellard6a00d602005-11-21 23:25:50 +0000650 CPUState **penv;
651 int cpu_index;
652
pbrookc2764712009-03-07 15:24:59 +0000653#if defined(CONFIG_USER_ONLY)
654 cpu_list_lock();
655#endif
bellard6a00d602005-11-21 23:25:50 +0000656 env->next_cpu = NULL;
657 penv = &first_cpu;
658 cpu_index = 0;
659 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700660 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000661 cpu_index++;
662 }
663 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000664 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000665 QTAILQ_INIT(&env->breakpoints);
666 QTAILQ_INIT(&env->watchpoints);
bellard6a00d602005-11-21 23:25:50 +0000667 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000668#if defined(CONFIG_USER_ONLY)
669 cpu_list_unlock();
670#endif
pbrookb3c77242008-06-30 16:31:04 +0000671#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200672 vmstate_register(cpu_index, &vmstate_cpu_common, env);
pbrookb3c77242008-06-30 16:31:04 +0000673 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
674 cpu_save, cpu_load, env);
675#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000676}
677
bellard9fa3e852004-01-04 18:06:42 +0000678static inline void invalidate_page_bitmap(PageDesc *p)
679{
680 if (p->code_bitmap) {
bellard59817cc2004-02-16 22:01:13 +0000681 qemu_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000682 p->code_bitmap = NULL;
683 }
684 p->code_write_count = 0;
685}
686
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800687/* Set to NULL all the 'first_tb' fields in all PageDescs. */
688
689static void page_flush_tb_1 (int level, void **lp)
690{
691 int i;
692
693 if (*lp == NULL) {
694 return;
695 }
696 if (level == 0) {
697 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000698 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800699 pd[i].first_tb = NULL;
700 invalidate_page_bitmap(pd + i);
701 }
702 } else {
703 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000704 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800705 page_flush_tb_1 (level - 1, pp + i);
706 }
707 }
708}
709
bellardfd6ce8f2003-05-14 19:00:11 +0000710static void page_flush_tb(void)
711{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800712 int i;
713 for (i = 0; i < V_L1_SIZE; i++) {
714 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000715 }
716}
717
718/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000719/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000720void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000721{
bellard6a00d602005-11-21 23:25:50 +0000722 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000723#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000724 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
725 (unsigned long)(code_gen_ptr - code_gen_buffer),
726 nb_tbs, nb_tbs > 0 ?
727 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000728#endif
bellard26a5f132008-05-28 12:30:31 +0000729 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000730 cpu_abort(env1, "Internal error: code buffer overflow\n");
731
bellardfd6ce8f2003-05-14 19:00:11 +0000732 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000733
bellard6a00d602005-11-21 23:25:50 +0000734 for(env = first_cpu; env != NULL; env = env->next_cpu) {
735 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
736 }
bellard9fa3e852004-01-04 18:06:42 +0000737
bellard8a8a6082004-10-03 13:36:49 +0000738 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000739 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000740
bellardfd6ce8f2003-05-14 19:00:11 +0000741 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000742 /* XXX: flush processor icache at this point if cache flush is
743 expensive */
bellarde3db7222005-01-26 22:00:47 +0000744 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000745}
746
747#ifdef DEBUG_TB_CHECK
748
j_mayerbc98a7e2007-04-04 07:55:12 +0000749static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000750{
751 TranslationBlock *tb;
752 int i;
753 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000754 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
755 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000756 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
757 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000758 printf("ERROR invalidate: address=" TARGET_FMT_lx
759 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000760 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000761 }
762 }
763 }
764}
765
766/* verify that all the pages have correct rights for code */
767static void tb_page_check(void)
768{
769 TranslationBlock *tb;
770 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000771
pbrook99773bd2006-04-16 15:14:59 +0000772 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
773 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000774 flags1 = page_get_flags(tb->pc);
775 flags2 = page_get_flags(tb->pc + tb->size - 1);
776 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
777 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000778 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000779 }
780 }
781 }
782}
783
784#endif
785
786/* invalidate one TB */
787static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
788 int next_offset)
789{
790 TranslationBlock *tb1;
791 for(;;) {
792 tb1 = *ptb;
793 if (tb1 == tb) {
794 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
795 break;
796 }
797 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
798 }
799}
800
bellard9fa3e852004-01-04 18:06:42 +0000801static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
802{
803 TranslationBlock *tb1;
804 unsigned int n1;
805
806 for(;;) {
807 tb1 = *ptb;
808 n1 = (long)tb1 & 3;
809 tb1 = (TranslationBlock *)((long)tb1 & ~3);
810 if (tb1 == tb) {
811 *ptb = tb1->page_next[n1];
812 break;
813 }
814 ptb = &tb1->page_next[n1];
815 }
816}
817
bellardd4e81642003-05-25 16:46:15 +0000818static inline void tb_jmp_remove(TranslationBlock *tb, int n)
819{
820 TranslationBlock *tb1, **ptb;
821 unsigned int n1;
822
823 ptb = &tb->jmp_next[n];
824 tb1 = *ptb;
825 if (tb1) {
826 /* find tb(n) in circular list */
827 for(;;) {
828 tb1 = *ptb;
829 n1 = (long)tb1 & 3;
830 tb1 = (TranslationBlock *)((long)tb1 & ~3);
831 if (n1 == n && tb1 == tb)
832 break;
833 if (n1 == 2) {
834 ptb = &tb1->jmp_first;
835 } else {
836 ptb = &tb1->jmp_next[n1];
837 }
838 }
839 /* now we can suppress tb(n) from the list */
840 *ptb = tb->jmp_next[n];
841
842 tb->jmp_next[n] = NULL;
843 }
844}
845
846/* reset the jump entry 'n' of a TB so that it is not chained to
847 another TB */
848static inline void tb_reset_jump(TranslationBlock *tb, int n)
849{
850 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
851}
852
Paul Brook41c1b1c2010-03-12 16:54:58 +0000853void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000854{
bellard6a00d602005-11-21 23:25:50 +0000855 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000856 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000857 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000858 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000859 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000860
bellard9fa3e852004-01-04 18:06:42 +0000861 /* remove the TB from the hash list */
862 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
863 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000864 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000865 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000866
bellard9fa3e852004-01-04 18:06:42 +0000867 /* remove the TB from the page list */
868 if (tb->page_addr[0] != page_addr) {
869 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
870 tb_page_remove(&p->first_tb, tb);
871 invalidate_page_bitmap(p);
872 }
873 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
874 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
875 tb_page_remove(&p->first_tb, tb);
876 invalidate_page_bitmap(p);
877 }
878
bellard8a40a182005-11-20 10:35:40 +0000879 tb_invalidated_flag = 1;
880
881 /* remove the TB from the hash list */
882 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000883 for(env = first_cpu; env != NULL; env = env->next_cpu) {
884 if (env->tb_jmp_cache[h] == tb)
885 env->tb_jmp_cache[h] = NULL;
886 }
bellard8a40a182005-11-20 10:35:40 +0000887
888 /* suppress this TB from the two jump lists */
889 tb_jmp_remove(tb, 0);
890 tb_jmp_remove(tb, 1);
891
892 /* suppress any remaining jumps to this TB */
893 tb1 = tb->jmp_first;
894 for(;;) {
895 n1 = (long)tb1 & 3;
896 if (n1 == 2)
897 break;
898 tb1 = (TranslationBlock *)((long)tb1 & ~3);
899 tb2 = tb1->jmp_next[n1];
900 tb_reset_jump(tb1, n1);
901 tb1->jmp_next[n1] = NULL;
902 tb1 = tb2;
903 }
904 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
905
bellarde3db7222005-01-26 22:00:47 +0000906 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000907}
908
909static inline void set_bits(uint8_t *tab, int start, int len)
910{
911 int end, mask, end1;
912
913 end = start + len;
914 tab += start >> 3;
915 mask = 0xff << (start & 7);
916 if ((start & ~7) == (end & ~7)) {
917 if (start < end) {
918 mask &= ~(0xff << (end & 7));
919 *tab |= mask;
920 }
921 } else {
922 *tab++ |= mask;
923 start = (start + 8) & ~7;
924 end1 = end & ~7;
925 while (start < end1) {
926 *tab++ = 0xff;
927 start += 8;
928 }
929 if (start < end) {
930 mask = ~(0xff << (end & 7));
931 *tab |= mask;
932 }
933 }
934}
935
936static void build_page_bitmap(PageDesc *p)
937{
938 int n, tb_start, tb_end;
939 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000940
pbrookb2a70812008-06-09 13:57:23 +0000941 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000942
943 tb = p->first_tb;
944 while (tb != NULL) {
945 n = (long)tb & 3;
946 tb = (TranslationBlock *)((long)tb & ~3);
947 /* NOTE: this is subtle as a TB may span two physical pages */
948 if (n == 0) {
949 /* NOTE: tb_end may be after the end of the page, but
950 it is not a problem */
951 tb_start = tb->pc & ~TARGET_PAGE_MASK;
952 tb_end = tb_start + tb->size;
953 if (tb_end > TARGET_PAGE_SIZE)
954 tb_end = TARGET_PAGE_SIZE;
955 } else {
956 tb_start = 0;
957 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
958 }
959 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
960 tb = tb->page_next[n];
961 }
962}
963
pbrook2e70f6e2008-06-29 01:03:05 +0000964TranslationBlock *tb_gen_code(CPUState *env,
965 target_ulong pc, target_ulong cs_base,
966 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000967{
968 TranslationBlock *tb;
969 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000970 tb_page_addr_t phys_pc, phys_page2;
971 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000972 int code_gen_size;
973
Paul Brook41c1b1c2010-03-12 16:54:58 +0000974 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000975 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000976 if (!tb) {
977 /* flush must be done */
978 tb_flush(env);
979 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000980 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000981 /* Don't forget to invalidate previous TB info. */
982 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000983 }
984 tc_ptr = code_gen_ptr;
985 tb->tc_ptr = tc_ptr;
986 tb->cs_base = cs_base;
987 tb->flags = flags;
988 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +0000989 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +0000990 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +0000991
bellardd720b932004-04-25 17:57:43 +0000992 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +0000993 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +0000994 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +0000995 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +0000996 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +0000997 }
Paul Brook41c1b1c2010-03-12 16:54:58 +0000998 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +0000999 return tb;
bellardd720b932004-04-25 17:57:43 +00001000}
ths3b46e622007-09-17 08:09:54 +00001001
bellard9fa3e852004-01-04 18:06:42 +00001002/* invalidate all TBs which intersect with the target physical page
1003 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001004 the same physical page. 'is_cpu_write_access' should be true if called
1005 from a real cpu write access: the virtual CPU will exit the current
1006 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001007void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001008 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001009{
aliguori6b917542008-11-18 19:46:41 +00001010 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001011 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001012 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001013 PageDesc *p;
1014 int n;
1015#ifdef TARGET_HAS_PRECISE_SMC
1016 int current_tb_not_found = is_cpu_write_access;
1017 TranslationBlock *current_tb = NULL;
1018 int current_tb_modified = 0;
1019 target_ulong current_pc = 0;
1020 target_ulong current_cs_base = 0;
1021 int current_flags = 0;
1022#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001023
1024 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001025 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001026 return;
ths5fafdf22007-09-16 21:08:06 +00001027 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001028 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1029 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001030 /* build code bitmap */
1031 build_page_bitmap(p);
1032 }
1033
1034 /* we remove all the TBs in the range [start, end[ */
1035 /* XXX: see if in some cases it could be faster to invalidate all the code */
1036 tb = p->first_tb;
1037 while (tb != NULL) {
1038 n = (long)tb & 3;
1039 tb = (TranslationBlock *)((long)tb & ~3);
1040 tb_next = tb->page_next[n];
1041 /* NOTE: this is subtle as a TB may span two physical pages */
1042 if (n == 0) {
1043 /* NOTE: tb_end may be after the end of the page, but
1044 it is not a problem */
1045 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1046 tb_end = tb_start + tb->size;
1047 } else {
1048 tb_start = tb->page_addr[1];
1049 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1050 }
1051 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001052#ifdef TARGET_HAS_PRECISE_SMC
1053 if (current_tb_not_found) {
1054 current_tb_not_found = 0;
1055 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001056 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001057 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001058 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001059 }
1060 }
1061 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001062 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001063 /* If we are modifying the current TB, we must stop
1064 its execution. We could be more precise by checking
1065 that the modification is after the current PC, but it
1066 would require a specialized function to partially
1067 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001068
bellardd720b932004-04-25 17:57:43 +00001069 current_tb_modified = 1;
ths5fafdf22007-09-16 21:08:06 +00001070 cpu_restore_state(current_tb, env,
pbrook2e70f6e2008-06-29 01:03:05 +00001071 env->mem_io_pc, NULL);
aliguori6b917542008-11-18 19:46:41 +00001072 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1073 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001074 }
1075#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001076 /* we need to do that to handle the case where a signal
1077 occurs while doing tb_phys_invalidate() */
1078 saved_tb = NULL;
1079 if (env) {
1080 saved_tb = env->current_tb;
1081 env->current_tb = NULL;
1082 }
bellard9fa3e852004-01-04 18:06:42 +00001083 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001084 if (env) {
1085 env->current_tb = saved_tb;
1086 if (env->interrupt_request && env->current_tb)
1087 cpu_interrupt(env, env->interrupt_request);
1088 }
bellard9fa3e852004-01-04 18:06:42 +00001089 }
1090 tb = tb_next;
1091 }
1092#if !defined(CONFIG_USER_ONLY)
1093 /* if no code remaining, no need to continue to use slow writes */
1094 if (!p->first_tb) {
1095 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001096 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001097 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001098 }
1099 }
1100#endif
1101#ifdef TARGET_HAS_PRECISE_SMC
1102 if (current_tb_modified) {
1103 /* we generate a block containing just the instruction
1104 modifying the memory. It will ensure that it cannot modify
1105 itself */
bellardea1c1802004-06-14 18:56:36 +00001106 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001107 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001108 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001109 }
1110#endif
1111}
1112
1113/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001114static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001115{
1116 PageDesc *p;
1117 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001118#if 0
bellarda4193c82004-06-03 14:01:43 +00001119 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001120 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1121 cpu_single_env->mem_io_vaddr, len,
1122 cpu_single_env->eip,
1123 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001124 }
1125#endif
bellard9fa3e852004-01-04 18:06:42 +00001126 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001127 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001128 return;
1129 if (p->code_bitmap) {
1130 offset = start & ~TARGET_PAGE_MASK;
1131 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1132 if (b & ((1 << len) - 1))
1133 goto do_invalidate;
1134 } else {
1135 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001136 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001137 }
1138}
1139
bellard9fa3e852004-01-04 18:06:42 +00001140#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001141static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001142 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001143{
aliguori6b917542008-11-18 19:46:41 +00001144 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001145 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001146 int n;
bellardd720b932004-04-25 17:57:43 +00001147#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001148 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001149 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001150 int current_tb_modified = 0;
1151 target_ulong current_pc = 0;
1152 target_ulong current_cs_base = 0;
1153 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001154#endif
bellard9fa3e852004-01-04 18:06:42 +00001155
1156 addr &= TARGET_PAGE_MASK;
1157 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001158 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001159 return;
1160 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001161#ifdef TARGET_HAS_PRECISE_SMC
1162 if (tb && pc != 0) {
1163 current_tb = tb_find_pc(pc);
1164 }
1165#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001166 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001167 n = (long)tb & 3;
1168 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001169#ifdef TARGET_HAS_PRECISE_SMC
1170 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001171 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001172 /* If we are modifying the current TB, we must stop
1173 its execution. We could be more precise by checking
1174 that the modification is after the current PC, but it
1175 would require a specialized function to partially
1176 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001177
bellardd720b932004-04-25 17:57:43 +00001178 current_tb_modified = 1;
1179 cpu_restore_state(current_tb, env, pc, puc);
aliguori6b917542008-11-18 19:46:41 +00001180 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1181 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001182 }
1183#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001184 tb_phys_invalidate(tb, addr);
1185 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001186 }
1187 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001188#ifdef TARGET_HAS_PRECISE_SMC
1189 if (current_tb_modified) {
1190 /* we generate a block containing just the instruction
1191 modifying the memory. It will ensure that it cannot modify
1192 itself */
bellardea1c1802004-06-14 18:56:36 +00001193 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001194 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001195 cpu_resume_from_signal(env, puc);
1196 }
1197#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001198}
bellard9fa3e852004-01-04 18:06:42 +00001199#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001200
1201/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001202static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001203 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001204{
1205 PageDesc *p;
bellard9fa3e852004-01-04 18:06:42 +00001206 TranslationBlock *last_first_tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001207
bellard9fa3e852004-01-04 18:06:42 +00001208 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001209 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001210 tb->page_next[n] = p->first_tb;
1211 last_first_tb = p->first_tb;
1212 p->first_tb = (TranslationBlock *)((long)tb | n);
1213 invalidate_page_bitmap(p);
1214
bellard107db442004-06-22 18:48:46 +00001215#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001216
bellard9fa3e852004-01-04 18:06:42 +00001217#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001218 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001219 target_ulong addr;
1220 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001221 int prot;
1222
bellardfd6ce8f2003-05-14 19:00:11 +00001223 /* force the host page as non writable (writes will have a
1224 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001225 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001226 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001227 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1228 addr += TARGET_PAGE_SIZE) {
1229
1230 p2 = page_find (addr >> TARGET_PAGE_BITS);
1231 if (!p2)
1232 continue;
1233 prot |= p2->flags;
1234 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001235 }
ths5fafdf22007-09-16 21:08:06 +00001236 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001237 (prot & PAGE_BITS) & ~PAGE_WRITE);
1238#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001239 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001240 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001241#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001242 }
bellard9fa3e852004-01-04 18:06:42 +00001243#else
1244 /* if some code is already present, then the pages are already
1245 protected. So we handle the case where only the first TB is
1246 allocated in a physical page */
1247 if (!last_first_tb) {
bellard6a00d602005-11-21 23:25:50 +00001248 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001249 }
1250#endif
bellardd720b932004-04-25 17:57:43 +00001251
1252#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001253}
1254
1255/* Allocate a new translation block. Flush the translation buffer if
1256 too many translation blocks or too much generated code. */
bellardc27004e2005-01-03 23:35:10 +00001257TranslationBlock *tb_alloc(target_ulong pc)
bellardfd6ce8f2003-05-14 19:00:11 +00001258{
1259 TranslationBlock *tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001260
bellard26a5f132008-05-28 12:30:31 +00001261 if (nb_tbs >= code_gen_max_blocks ||
1262 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
bellardd4e81642003-05-25 16:46:15 +00001263 return NULL;
bellardfd6ce8f2003-05-14 19:00:11 +00001264 tb = &tbs[nb_tbs++];
1265 tb->pc = pc;
bellardb448f2f2004-02-25 23:24:04 +00001266 tb->cflags = 0;
bellardd4e81642003-05-25 16:46:15 +00001267 return tb;
1268}
1269
pbrook2e70f6e2008-06-29 01:03:05 +00001270void tb_free(TranslationBlock *tb)
1271{
thsbf20dc02008-06-30 17:22:19 +00001272 /* In practice this is mostly used for single use temporary TB
pbrook2e70f6e2008-06-29 01:03:05 +00001273 Ignore the hard cases and just back up if this TB happens to
1274 be the last one generated. */
1275 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1276 code_gen_ptr = tb->tc_ptr;
1277 nb_tbs--;
1278 }
1279}
1280
bellard9fa3e852004-01-04 18:06:42 +00001281/* add a new TB and link it to the physical page tables. phys_page2 is
1282 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001283void tb_link_page(TranslationBlock *tb,
1284 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001285{
bellard9fa3e852004-01-04 18:06:42 +00001286 unsigned int h;
1287 TranslationBlock **ptb;
1288
pbrookc8a706f2008-06-02 16:16:42 +00001289 /* Grab the mmap lock to stop another thread invalidating this TB
1290 before we are done. */
1291 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001292 /* add in the physical hash table */
1293 h = tb_phys_hash_func(phys_pc);
1294 ptb = &tb_phys_hash[h];
1295 tb->phys_hash_next = *ptb;
1296 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001297
1298 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001299 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1300 if (phys_page2 != -1)
1301 tb_alloc_page(tb, 1, phys_page2);
1302 else
1303 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001304
bellardd4e81642003-05-25 16:46:15 +00001305 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1306 tb->jmp_next[0] = NULL;
1307 tb->jmp_next[1] = NULL;
1308
1309 /* init original jump addresses */
1310 if (tb->tb_next_offset[0] != 0xffff)
1311 tb_reset_jump(tb, 0);
1312 if (tb->tb_next_offset[1] != 0xffff)
1313 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001314
1315#ifdef DEBUG_TB_CHECK
1316 tb_page_check();
1317#endif
pbrookc8a706f2008-06-02 16:16:42 +00001318 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001319}
1320
bellarda513fe12003-05-27 23:29:48 +00001321/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1322 tb[1].tc_ptr. Return NULL if not found */
1323TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1324{
1325 int m_min, m_max, m;
1326 unsigned long v;
1327 TranslationBlock *tb;
1328
1329 if (nb_tbs <= 0)
1330 return NULL;
1331 if (tc_ptr < (unsigned long)code_gen_buffer ||
1332 tc_ptr >= (unsigned long)code_gen_ptr)
1333 return NULL;
1334 /* binary search (cf Knuth) */
1335 m_min = 0;
1336 m_max = nb_tbs - 1;
1337 while (m_min <= m_max) {
1338 m = (m_min + m_max) >> 1;
1339 tb = &tbs[m];
1340 v = (unsigned long)tb->tc_ptr;
1341 if (v == tc_ptr)
1342 return tb;
1343 else if (tc_ptr < v) {
1344 m_max = m - 1;
1345 } else {
1346 m_min = m + 1;
1347 }
ths5fafdf22007-09-16 21:08:06 +00001348 }
bellarda513fe12003-05-27 23:29:48 +00001349 return &tbs[m_max];
1350}
bellard75012672003-06-21 13:11:07 +00001351
bellardea041c02003-06-25 16:16:50 +00001352static void tb_reset_jump_recursive(TranslationBlock *tb);
1353
1354static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1355{
1356 TranslationBlock *tb1, *tb_next, **ptb;
1357 unsigned int n1;
1358
1359 tb1 = tb->jmp_next[n];
1360 if (tb1 != NULL) {
1361 /* find head of list */
1362 for(;;) {
1363 n1 = (long)tb1 & 3;
1364 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1365 if (n1 == 2)
1366 break;
1367 tb1 = tb1->jmp_next[n1];
1368 }
1369 /* we are now sure now that tb jumps to tb1 */
1370 tb_next = tb1;
1371
1372 /* remove tb from the jmp_first list */
1373 ptb = &tb_next->jmp_first;
1374 for(;;) {
1375 tb1 = *ptb;
1376 n1 = (long)tb1 & 3;
1377 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1378 if (n1 == n && tb1 == tb)
1379 break;
1380 ptb = &tb1->jmp_next[n1];
1381 }
1382 *ptb = tb->jmp_next[n];
1383 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001384
bellardea041c02003-06-25 16:16:50 +00001385 /* suppress the jump to next tb in generated code */
1386 tb_reset_jump(tb, n);
1387
bellard01243112004-01-04 15:48:17 +00001388 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001389 tb_reset_jump_recursive(tb_next);
1390 }
1391}
1392
1393static void tb_reset_jump_recursive(TranslationBlock *tb)
1394{
1395 tb_reset_jump_recursive2(tb, 0);
1396 tb_reset_jump_recursive2(tb, 1);
1397}
1398
bellard1fddef42005-04-17 19:16:13 +00001399#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001400#if defined(CONFIG_USER_ONLY)
1401static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1402{
1403 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1404}
1405#else
bellardd720b932004-04-25 17:57:43 +00001406static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1407{
Anthony Liguoric227f092009-10-01 16:12:16 -05001408 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001409 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001410 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001411 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001412
pbrookc2f07f82006-04-08 17:14:56 +00001413 addr = cpu_get_phys_page_debug(env, pc);
1414 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1415 if (!p) {
1416 pd = IO_MEM_UNASSIGNED;
1417 } else {
1418 pd = p->phys_offset;
1419 }
1420 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001421 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001422}
bellardc27004e2005-01-03 23:35:10 +00001423#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001424#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001425
Paul Brookc527ee82010-03-01 03:31:14 +00001426#if defined(CONFIG_USER_ONLY)
1427void cpu_watchpoint_remove_all(CPUState *env, int mask)
1428
1429{
1430}
1431
1432int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1433 int flags, CPUWatchpoint **watchpoint)
1434{
1435 return -ENOSYS;
1436}
1437#else
pbrook6658ffb2007-03-16 23:58:11 +00001438/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001439int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1440 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001441{
aliguorib4051332008-11-18 20:14:20 +00001442 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001443 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001444
aliguorib4051332008-11-18 20:14:20 +00001445 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1446 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1447 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1448 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1449 return -EINVAL;
1450 }
aliguoria1d1bb32008-11-18 20:07:32 +00001451 wp = qemu_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001452
aliguoria1d1bb32008-11-18 20:07:32 +00001453 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001454 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001455 wp->flags = flags;
1456
aliguori2dc9f412008-11-18 20:56:59 +00001457 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001458 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001459 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001460 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001461 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001462
pbrook6658ffb2007-03-16 23:58:11 +00001463 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001464
1465 if (watchpoint)
1466 *watchpoint = wp;
1467 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001468}
1469
aliguoria1d1bb32008-11-18 20:07:32 +00001470/* Remove a specific watchpoint. */
1471int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1472 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001473{
aliguorib4051332008-11-18 20:14:20 +00001474 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001475 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001476
Blue Swirl72cf2d42009-09-12 07:36:22 +00001477 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001478 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001479 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001480 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001481 return 0;
1482 }
1483 }
aliguoria1d1bb32008-11-18 20:07:32 +00001484 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001485}
1486
aliguoria1d1bb32008-11-18 20:07:32 +00001487/* Remove a specific watchpoint by reference. */
1488void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1489{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001490 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001491
aliguoria1d1bb32008-11-18 20:07:32 +00001492 tlb_flush_page(env, watchpoint->vaddr);
1493
1494 qemu_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001495}
1496
aliguoria1d1bb32008-11-18 20:07:32 +00001497/* Remove all matching watchpoints. */
1498void cpu_watchpoint_remove_all(CPUState *env, int mask)
1499{
aliguoric0ce9982008-11-25 22:13:57 +00001500 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001501
Blue Swirl72cf2d42009-09-12 07:36:22 +00001502 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001503 if (wp->flags & mask)
1504 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001505 }
aliguoria1d1bb32008-11-18 20:07:32 +00001506}
Paul Brookc527ee82010-03-01 03:31:14 +00001507#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001508
1509/* Add a breakpoint. */
1510int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1511 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001512{
bellard1fddef42005-04-17 19:16:13 +00001513#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001514 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001515
aliguoria1d1bb32008-11-18 20:07:32 +00001516 bp = qemu_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001517
1518 bp->pc = pc;
1519 bp->flags = flags;
1520
aliguori2dc9f412008-11-18 20:56:59 +00001521 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001522 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001523 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001524 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001525 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001526
1527 breakpoint_invalidate(env, pc);
1528
1529 if (breakpoint)
1530 *breakpoint = bp;
1531 return 0;
1532#else
1533 return -ENOSYS;
1534#endif
1535}
1536
1537/* Remove a specific breakpoint. */
1538int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1539{
1540#if defined(TARGET_HAS_ICE)
1541 CPUBreakpoint *bp;
1542
Blue Swirl72cf2d42009-09-12 07:36:22 +00001543 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001544 if (bp->pc == pc && bp->flags == flags) {
1545 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001546 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001547 }
bellard4c3a88a2003-07-26 12:06:08 +00001548 }
aliguoria1d1bb32008-11-18 20:07:32 +00001549 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001550#else
aliguoria1d1bb32008-11-18 20:07:32 +00001551 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001552#endif
1553}
1554
aliguoria1d1bb32008-11-18 20:07:32 +00001555/* Remove a specific breakpoint by reference. */
1556void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001557{
bellard1fddef42005-04-17 19:16:13 +00001558#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001559 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001560
aliguoria1d1bb32008-11-18 20:07:32 +00001561 breakpoint_invalidate(env, breakpoint->pc);
1562
1563 qemu_free(breakpoint);
1564#endif
1565}
1566
1567/* Remove all matching breakpoints. */
1568void cpu_breakpoint_remove_all(CPUState *env, int mask)
1569{
1570#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001571 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001572
Blue Swirl72cf2d42009-09-12 07:36:22 +00001573 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001574 if (bp->flags & mask)
1575 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001576 }
bellard4c3a88a2003-07-26 12:06:08 +00001577#endif
1578}
1579
bellardc33a3462003-07-29 20:50:33 +00001580/* enable or disable single step mode. EXCP_DEBUG is returned by the
1581 CPU loop after each instruction */
1582void cpu_single_step(CPUState *env, int enabled)
1583{
bellard1fddef42005-04-17 19:16:13 +00001584#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001585 if (env->singlestep_enabled != enabled) {
1586 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001587 if (kvm_enabled())
1588 kvm_update_guest_debug(env, 0);
1589 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001590 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001591 /* XXX: only flush what is necessary */
1592 tb_flush(env);
1593 }
bellardc33a3462003-07-29 20:50:33 +00001594 }
1595#endif
1596}
1597
bellard34865132003-10-05 14:28:56 +00001598/* enable or disable low levels log */
1599void cpu_set_log(int log_flags)
1600{
1601 loglevel = log_flags;
1602 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001603 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001604 if (!logfile) {
1605 perror(logfilename);
1606 _exit(1);
1607 }
bellard9fa3e852004-01-04 18:06:42 +00001608#if !defined(CONFIG_SOFTMMU)
1609 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1610 {
blueswir1b55266b2008-09-20 08:07:15 +00001611 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001612 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1613 }
Filip Navarabf65f532009-07-27 10:02:04 -05001614#elif !defined(_WIN32)
1615 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001616 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001617#endif
pbrooke735b912007-06-30 13:53:24 +00001618 log_append = 1;
1619 }
1620 if (!loglevel && logfile) {
1621 fclose(logfile);
1622 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001623 }
1624}
1625
1626void cpu_set_log_filename(const char *filename)
1627{
1628 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001629 if (logfile) {
1630 fclose(logfile);
1631 logfile = NULL;
1632 }
1633 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001634}
bellardc33a3462003-07-29 20:50:33 +00001635
aurel323098dba2009-03-07 21:28:24 +00001636static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001637{
pbrookd5975362008-06-07 20:50:51 +00001638 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1639 problem and hope the cpu will stop of its own accord. For userspace
1640 emulation this often isn't actually as bad as it sounds. Often
1641 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001642 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001643 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001644
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001645 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001646 tb = env->current_tb;
1647 /* if the cpu is currently executing code, we must unlink it and
1648 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001649 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001650 env->current_tb = NULL;
1651 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001652 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001653 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001654}
1655
1656/* mask must never be zero, except for A20 change call */
1657void cpu_interrupt(CPUState *env, int mask)
1658{
1659 int old_mask;
1660
1661 old_mask = env->interrupt_request;
1662 env->interrupt_request |= mask;
1663
aliguori8edac962009-04-24 18:03:45 +00001664#ifndef CONFIG_USER_ONLY
1665 /*
1666 * If called from iothread context, wake the target cpu in
1667 * case its halted.
1668 */
1669 if (!qemu_cpu_self(env)) {
1670 qemu_cpu_kick(env);
1671 return;
1672 }
1673#endif
1674
pbrook2e70f6e2008-06-29 01:03:05 +00001675 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001676 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001677#ifndef CONFIG_USER_ONLY
pbrook2e70f6e2008-06-29 01:03:05 +00001678 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001679 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001680 cpu_abort(env, "Raised interrupt while not in I/O function");
1681 }
1682#endif
1683 } else {
aurel323098dba2009-03-07 21:28:24 +00001684 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001685 }
1686}
1687
bellardb54ad042004-05-20 13:42:52 +00001688void cpu_reset_interrupt(CPUState *env, int mask)
1689{
1690 env->interrupt_request &= ~mask;
1691}
1692
aurel323098dba2009-03-07 21:28:24 +00001693void cpu_exit(CPUState *env)
1694{
1695 env->exit_request = 1;
1696 cpu_unlink_tb(env);
1697}
1698
blueswir1c7cd6a32008-10-02 18:27:46 +00001699const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001700 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001701 "show generated host assembly code for each compiled TB" },
1702 { CPU_LOG_TB_IN_ASM, "in_asm",
1703 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001704 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001705 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001706 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001707 "show micro ops "
1708#ifdef TARGET_I386
1709 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001710#endif
blueswir1e01a1152008-03-14 17:37:11 +00001711 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001712 { CPU_LOG_INT, "int",
1713 "show interrupts/exceptions in short format" },
1714 { CPU_LOG_EXEC, "exec",
1715 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001716 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001717 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001718#ifdef TARGET_I386
1719 { CPU_LOG_PCALL, "pcall",
1720 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001721 { CPU_LOG_RESET, "cpu_reset",
1722 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001723#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001724#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001725 { CPU_LOG_IOPORT, "ioport",
1726 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001727#endif
bellardf193c792004-03-21 17:06:25 +00001728 { 0, NULL, NULL },
1729};
1730
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001731#ifndef CONFIG_USER_ONLY
1732static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1733 = QLIST_HEAD_INITIALIZER(memory_client_list);
1734
1735static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1736 ram_addr_t size,
1737 ram_addr_t phys_offset)
1738{
1739 CPUPhysMemoryClient *client;
1740 QLIST_FOREACH(client, &memory_client_list, list) {
1741 client->set_memory(client, start_addr, size, phys_offset);
1742 }
1743}
1744
1745static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1746 target_phys_addr_t end)
1747{
1748 CPUPhysMemoryClient *client;
1749 QLIST_FOREACH(client, &memory_client_list, list) {
1750 int r = client->sync_dirty_bitmap(client, start, end);
1751 if (r < 0)
1752 return r;
1753 }
1754 return 0;
1755}
1756
1757static int cpu_notify_migration_log(int enable)
1758{
1759 CPUPhysMemoryClient *client;
1760 QLIST_FOREACH(client, &memory_client_list, list) {
1761 int r = client->migration_log(client, enable);
1762 if (r < 0)
1763 return r;
1764 }
1765 return 0;
1766}
1767
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001768static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1769 int level, void **lp)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001770{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001771 int i;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001772
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001773 if (*lp == NULL) {
1774 return;
1775 }
1776 if (level == 0) {
1777 PhysPageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001778 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001779 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1780 client->set_memory(client, pd[i].region_offset,
1781 TARGET_PAGE_SIZE, pd[i].phys_offset);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001782 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001783 }
1784 } else {
1785 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001786 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001787 phys_page_for_each_1(client, level - 1, pp + i);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001788 }
1789 }
1790}
1791
1792static void phys_page_for_each(CPUPhysMemoryClient *client)
1793{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001794 int i;
1795 for (i = 0; i < P_L1_SIZE; ++i) {
1796 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1797 l1_phys_map + 1);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001798 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001799}
1800
1801void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1802{
1803 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1804 phys_page_for_each(client);
1805}
1806
1807void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1808{
1809 QLIST_REMOVE(client, list);
1810}
1811#endif
1812
bellardf193c792004-03-21 17:06:25 +00001813static int cmp1(const char *s1, int n, const char *s2)
1814{
1815 if (strlen(s2) != n)
1816 return 0;
1817 return memcmp(s1, s2, n) == 0;
1818}
ths3b46e622007-09-17 08:09:54 +00001819
bellardf193c792004-03-21 17:06:25 +00001820/* takes a comma separated list of log masks. Return 0 if error. */
1821int cpu_str_to_log_mask(const char *str)
1822{
blueswir1c7cd6a32008-10-02 18:27:46 +00001823 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001824 int mask;
1825 const char *p, *p1;
1826
1827 p = str;
1828 mask = 0;
1829 for(;;) {
1830 p1 = strchr(p, ',');
1831 if (!p1)
1832 p1 = p + strlen(p);
bellard8e3a9fd2004-10-09 17:32:58 +00001833 if(cmp1(p,p1-p,"all")) {
1834 for(item = cpu_log_items; item->mask != 0; item++) {
1835 mask |= item->mask;
1836 }
1837 } else {
bellardf193c792004-03-21 17:06:25 +00001838 for(item = cpu_log_items; item->mask != 0; item++) {
1839 if (cmp1(p, p1 - p, item->name))
1840 goto found;
1841 }
1842 return 0;
bellard8e3a9fd2004-10-09 17:32:58 +00001843 }
bellardf193c792004-03-21 17:06:25 +00001844 found:
1845 mask |= item->mask;
1846 if (*p1 != ',')
1847 break;
1848 p = p1 + 1;
1849 }
1850 return mask;
1851}
bellardea041c02003-06-25 16:16:50 +00001852
bellard75012672003-06-21 13:11:07 +00001853void cpu_abort(CPUState *env, const char *fmt, ...)
1854{
1855 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001856 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001857
1858 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001859 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001860 fprintf(stderr, "qemu: fatal: ");
1861 vfprintf(stderr, fmt, ap);
1862 fprintf(stderr, "\n");
1863#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001864 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1865#else
1866 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001867#endif
aliguori93fcfe32009-01-15 22:34:14 +00001868 if (qemu_log_enabled()) {
1869 qemu_log("qemu: fatal: ");
1870 qemu_log_vprintf(fmt, ap2);
1871 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001872#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001873 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001874#else
aliguori93fcfe32009-01-15 22:34:14 +00001875 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001876#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001877 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001878 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001879 }
pbrook493ae1f2007-11-23 16:53:59 +00001880 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001881 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001882#if defined(CONFIG_USER_ONLY)
1883 {
1884 struct sigaction act;
1885 sigfillset(&act.sa_mask);
1886 act.sa_handler = SIG_DFL;
1887 sigaction(SIGABRT, &act, NULL);
1888 }
1889#endif
bellard75012672003-06-21 13:11:07 +00001890 abort();
1891}
1892
thsc5be9f02007-02-28 20:20:53 +00001893CPUState *cpu_copy(CPUState *env)
1894{
ths01ba9812007-12-09 02:22:57 +00001895 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001896 CPUState *next_cpu = new_env->next_cpu;
1897 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001898#if defined(TARGET_HAS_ICE)
1899 CPUBreakpoint *bp;
1900 CPUWatchpoint *wp;
1901#endif
1902
thsc5be9f02007-02-28 20:20:53 +00001903 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001904
1905 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001906 new_env->next_cpu = next_cpu;
1907 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001908
1909 /* Clone all break/watchpoints.
1910 Note: Once we support ptrace with hw-debug register access, make sure
1911 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001912 QTAILQ_INIT(&env->breakpoints);
1913 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001914#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001915 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001916 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1917 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001918 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001919 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1920 wp->flags, NULL);
1921 }
1922#endif
1923
thsc5be9f02007-02-28 20:20:53 +00001924 return new_env;
1925}
1926
bellard01243112004-01-04 15:48:17 +00001927#if !defined(CONFIG_USER_ONLY)
1928
edgar_igl5c751e92008-05-06 08:44:21 +00001929static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1930{
1931 unsigned int i;
1932
1933 /* Discard jump cache entries for any tb which might potentially
1934 overlap the flushed page. */
1935 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1936 memset (&env->tb_jmp_cache[i], 0,
1937 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1938
1939 i = tb_jmp_cache_hash_page(addr);
1940 memset (&env->tb_jmp_cache[i], 0,
1941 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1942}
1943
Igor Kovalenko08738982009-07-12 02:15:40 +04001944static CPUTLBEntry s_cputlb_empty_entry = {
1945 .addr_read = -1,
1946 .addr_write = -1,
1947 .addr_code = -1,
1948 .addend = -1,
1949};
1950
bellardee8b7022004-02-03 23:35:10 +00001951/* NOTE: if flush_global is true, also flush global entries (not
1952 implemented yet) */
1953void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001954{
bellard33417e72003-08-10 21:47:01 +00001955 int i;
bellard01243112004-01-04 15:48:17 +00001956
bellard9fa3e852004-01-04 18:06:42 +00001957#if defined(DEBUG_TLB)
1958 printf("tlb_flush:\n");
1959#endif
bellard01243112004-01-04 15:48:17 +00001960 /* must reset current TB so that interrupts cannot modify the
1961 links while we are modifying them */
1962 env->current_tb = NULL;
1963
bellard33417e72003-08-10 21:47:01 +00001964 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001965 int mmu_idx;
1966 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001967 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001968 }
bellard33417e72003-08-10 21:47:01 +00001969 }
bellard9fa3e852004-01-04 18:06:42 +00001970
bellard8a40a182005-11-20 10:35:40 +00001971 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001972
Paul Brookd4c430a2010-03-17 02:14:28 +00001973 env->tlb_flush_addr = -1;
1974 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001975 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001976}
1977
bellard274da6b2004-05-20 21:56:27 +00001978static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001979{
ths5fafdf22007-09-16 21:08:06 +00001980 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001981 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001982 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001983 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001984 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001985 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001986 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001987 }
bellard61382a52003-10-27 21:22:23 +00001988}
1989
bellard2e126692004-04-25 21:28:44 +00001990void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001991{
bellard8a40a182005-11-20 10:35:40 +00001992 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001993 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001994
bellard9fa3e852004-01-04 18:06:42 +00001995#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001996 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001997#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001998 /* Check if we need to flush due to large pages. */
1999 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2000#if defined(DEBUG_TLB)
2001 printf("tlb_flush_page: forced full flush ("
2002 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2003 env->tlb_flush_addr, env->tlb_flush_mask);
2004#endif
2005 tlb_flush(env, 1);
2006 return;
2007 }
bellard01243112004-01-04 15:48:17 +00002008 /* must reset current TB so that interrupts cannot modify the
2009 links while we are modifying them */
2010 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002011
bellard61382a52003-10-27 21:22:23 +00002012 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002013 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002014 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2015 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002016
edgar_igl5c751e92008-05-06 08:44:21 +00002017 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002018}
2019
bellard9fa3e852004-01-04 18:06:42 +00002020/* update the TLBs so that writes to code in the virtual page 'addr'
2021 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002022static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002023{
ths5fafdf22007-09-16 21:08:06 +00002024 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002025 ram_addr + TARGET_PAGE_SIZE,
2026 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002027}
2028
bellard9fa3e852004-01-04 18:06:42 +00002029/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002030 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002031static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002032 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002033{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002034 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002035}
2036
ths5fafdf22007-09-16 21:08:06 +00002037static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002038 unsigned long start, unsigned long length)
2039{
2040 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00002041 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2042 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002043 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002044 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002045 }
2046 }
2047}
2048
pbrook5579c7f2009-04-11 14:47:08 +00002049/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002050void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002051 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002052{
2053 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002054 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002055 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002056
2057 start &= TARGET_PAGE_MASK;
2058 end = TARGET_PAGE_ALIGN(end);
2059
2060 length = end - start;
2061 if (length == 0)
2062 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002063 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002064
bellard1ccde1c2004-02-06 19:46:14 +00002065 /* we modify the TLB cache so that the dirty bit will be set again
2066 when accessing the range */
pbrook5579c7f2009-04-11 14:47:08 +00002067 start1 = (unsigned long)qemu_get_ram_ptr(start);
2068 /* Chek that we don't span multiple blocks - this breaks the
2069 address comparisons below. */
2070 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2071 != (end - 1) - start) {
2072 abort();
2073 }
2074
bellard6a00d602005-11-21 23:25:50 +00002075 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002076 int mmu_idx;
2077 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2078 for(i = 0; i < CPU_TLB_SIZE; i++)
2079 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2080 start1, length);
2081 }
bellard6a00d602005-11-21 23:25:50 +00002082 }
bellard1ccde1c2004-02-06 19:46:14 +00002083}
2084
aliguori74576192008-10-06 14:02:03 +00002085int cpu_physical_memory_set_dirty_tracking(int enable)
2086{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002087 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002088 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002089 ret = cpu_notify_migration_log(!!enable);
2090 return ret;
aliguori74576192008-10-06 14:02:03 +00002091}
2092
2093int cpu_physical_memory_get_dirty_tracking(void)
2094{
2095 return in_migration;
2096}
2097
Anthony Liguoric227f092009-10-01 16:12:16 -05002098int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2099 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00002100{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02002101 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02002102
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002103 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02002104 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00002105}
2106
bellard3a7d9292005-08-21 09:26:42 +00002107static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2108{
Anthony Liguoric227f092009-10-01 16:12:16 -05002109 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002110 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002111
bellard84b7b8e2005-11-28 21:19:04 +00002112 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002113 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2114 + tlb_entry->addend);
2115 ram_addr = qemu_ram_addr_from_host(p);
bellard3a7d9292005-08-21 09:26:42 +00002116 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002117 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002118 }
2119 }
2120}
2121
2122/* update the TLB according to the current state of the dirty bits */
2123void cpu_tlb_update_dirty(CPUState *env)
2124{
2125 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002126 int mmu_idx;
2127 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2128 for(i = 0; i < CPU_TLB_SIZE; i++)
2129 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2130 }
bellard3a7d9292005-08-21 09:26:42 +00002131}
2132
pbrook0f459d12008-06-09 00:20:13 +00002133static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002134{
pbrook0f459d12008-06-09 00:20:13 +00002135 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2136 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002137}
2138
pbrook0f459d12008-06-09 00:20:13 +00002139/* update the TLB corresponding to virtual page vaddr
2140 so that it is no longer dirty */
2141static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002142{
bellard1ccde1c2004-02-06 19:46:14 +00002143 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002144 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002145
pbrook0f459d12008-06-09 00:20:13 +00002146 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002147 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002148 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2149 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002150}
2151
Paul Brookd4c430a2010-03-17 02:14:28 +00002152/* Our TLB does not support large pages, so remember the area covered by
2153 large pages and trigger a full TLB flush if these are invalidated. */
2154static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2155 target_ulong size)
2156{
2157 target_ulong mask = ~(size - 1);
2158
2159 if (env->tlb_flush_addr == (target_ulong)-1) {
2160 env->tlb_flush_addr = vaddr & mask;
2161 env->tlb_flush_mask = mask;
2162 return;
2163 }
2164 /* Extend the existing region to include the new page.
2165 This is a compromise between unnecessary flushes and the cost
2166 of maintaining a full variable size TLB. */
2167 mask &= env->tlb_flush_mask;
2168 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2169 mask <<= 1;
2170 }
2171 env->tlb_flush_addr &= mask;
2172 env->tlb_flush_mask = mask;
2173}
2174
2175/* Add a new TLB entry. At most one entry for a given virtual address
2176 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2177 supplied size is only used by tlb_flush_page. */
2178void tlb_set_page(CPUState *env, target_ulong vaddr,
2179 target_phys_addr_t paddr, int prot,
2180 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002181{
bellard92e873b2004-05-21 14:52:29 +00002182 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002183 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002184 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002185 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002186 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002187 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002188 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002189 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002190 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002191
Paul Brookd4c430a2010-03-17 02:14:28 +00002192 assert(size >= TARGET_PAGE_SIZE);
2193 if (size != TARGET_PAGE_SIZE) {
2194 tlb_add_large_page(env, vaddr, size);
2195 }
bellard92e873b2004-05-21 14:52:29 +00002196 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002197 if (!p) {
2198 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002199 } else {
2200 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002201 }
2202#if defined(DEBUG_TLB)
j_mayer6ebbf392007-10-14 07:07:08 +00002203 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2204 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
bellard9fa3e852004-01-04 18:06:42 +00002205#endif
2206
pbrook0f459d12008-06-09 00:20:13 +00002207 address = vaddr;
2208 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2209 /* IO memory case (romd handled later) */
2210 address |= TLB_MMIO;
2211 }
pbrook5579c7f2009-04-11 14:47:08 +00002212 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002213 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2214 /* Normal RAM. */
2215 iotlb = pd & TARGET_PAGE_MASK;
2216 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2217 iotlb |= IO_MEM_NOTDIRTY;
2218 else
2219 iotlb |= IO_MEM_ROM;
2220 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002221 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002222 It would be nice to pass an offset from the base address
2223 of that region. This would avoid having to special case RAM,
2224 and avoid full address decoding in every device.
2225 We can't use the high bits of pd for this because
2226 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002227 iotlb = (pd & ~TARGET_PAGE_MASK);
2228 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002229 iotlb += p->region_offset;
2230 } else {
2231 iotlb += paddr;
2232 }
pbrook0f459d12008-06-09 00:20:13 +00002233 }
pbrook6658ffb2007-03-16 23:58:11 +00002234
pbrook0f459d12008-06-09 00:20:13 +00002235 code_address = address;
2236 /* Make accesses to pages with watchpoints go via the
2237 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002238 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002239 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
pbrook0f459d12008-06-09 00:20:13 +00002240 iotlb = io_mem_watch + paddr;
2241 /* TODO: The memory case can be optimized by not trapping
2242 reads of pages with a write breakpoint. */
2243 address |= TLB_MMIO;
pbrook6658ffb2007-03-16 23:58:11 +00002244 }
pbrook0f459d12008-06-09 00:20:13 +00002245 }
balrogd79acba2007-06-26 20:01:13 +00002246
pbrook0f459d12008-06-09 00:20:13 +00002247 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2248 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2249 te = &env->tlb_table[mmu_idx][index];
2250 te->addend = addend - vaddr;
2251 if (prot & PAGE_READ) {
2252 te->addr_read = address;
2253 } else {
2254 te->addr_read = -1;
2255 }
edgar_igl5c751e92008-05-06 08:44:21 +00002256
pbrook0f459d12008-06-09 00:20:13 +00002257 if (prot & PAGE_EXEC) {
2258 te->addr_code = code_address;
2259 } else {
2260 te->addr_code = -1;
2261 }
2262 if (prot & PAGE_WRITE) {
2263 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2264 (pd & IO_MEM_ROMD)) {
2265 /* Write access calls the I/O callback. */
2266 te->addr_write = address | TLB_MMIO;
2267 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2268 !cpu_physical_memory_is_dirty(pd)) {
2269 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002270 } else {
pbrook0f459d12008-06-09 00:20:13 +00002271 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002272 }
pbrook0f459d12008-06-09 00:20:13 +00002273 } else {
2274 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002275 }
bellard9fa3e852004-01-04 18:06:42 +00002276}
2277
bellard01243112004-01-04 15:48:17 +00002278#else
2279
bellardee8b7022004-02-03 23:35:10 +00002280void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002281{
2282}
2283
bellard2e126692004-04-25 21:28:44 +00002284void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002285{
2286}
2287
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002288/*
2289 * Walks guest process memory "regions" one by one
2290 * and calls callback function 'fn' for each region.
2291 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002292
2293struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002294{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002295 walk_memory_regions_fn fn;
2296 void *priv;
2297 unsigned long start;
2298 int prot;
2299};
bellard9fa3e852004-01-04 18:06:42 +00002300
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002301static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002302 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002303{
2304 if (data->start != -1ul) {
2305 int rc = data->fn(data->priv, data->start, end, data->prot);
2306 if (rc != 0) {
2307 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002308 }
bellard33417e72003-08-10 21:47:01 +00002309 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002310
2311 data->start = (new_prot ? end : -1ul);
2312 data->prot = new_prot;
2313
2314 return 0;
2315}
2316
2317static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002318 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002319{
Paul Brookb480d9b2010-03-12 23:23:29 +00002320 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002321 int i, rc;
2322
2323 if (*lp == NULL) {
2324 return walk_memory_regions_end(data, base, 0);
2325 }
2326
2327 if (level == 0) {
2328 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002329 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002330 int prot = pd[i].flags;
2331
2332 pa = base | (i << TARGET_PAGE_BITS);
2333 if (prot != data->prot) {
2334 rc = walk_memory_regions_end(data, pa, prot);
2335 if (rc != 0) {
2336 return rc;
2337 }
2338 }
2339 }
2340 } else {
2341 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002342 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002343 pa = base | ((abi_ulong)i <<
2344 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002345 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2346 if (rc != 0) {
2347 return rc;
2348 }
2349 }
2350 }
2351
2352 return 0;
2353}
2354
2355int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2356{
2357 struct walk_memory_regions_data data;
2358 unsigned long i;
2359
2360 data.fn = fn;
2361 data.priv = priv;
2362 data.start = -1ul;
2363 data.prot = 0;
2364
2365 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002366 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002367 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2368 if (rc != 0) {
2369 return rc;
2370 }
2371 }
2372
2373 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002374}
2375
Paul Brookb480d9b2010-03-12 23:23:29 +00002376static int dump_region(void *priv, abi_ulong start,
2377 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002378{
2379 FILE *f = (FILE *)priv;
2380
Paul Brookb480d9b2010-03-12 23:23:29 +00002381 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2382 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002383 start, end, end - start,
2384 ((prot & PAGE_READ) ? 'r' : '-'),
2385 ((prot & PAGE_WRITE) ? 'w' : '-'),
2386 ((prot & PAGE_EXEC) ? 'x' : '-'));
2387
2388 return (0);
2389}
2390
2391/* dump memory mappings */
2392void page_dump(FILE *f)
2393{
2394 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2395 "start", "end", "size", "prot");
2396 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002397}
2398
pbrook53a59602006-03-25 19:31:22 +00002399int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002400{
bellard9fa3e852004-01-04 18:06:42 +00002401 PageDesc *p;
2402
2403 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002404 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002405 return 0;
2406 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002407}
2408
Richard Henderson376a7902010-03-10 15:57:04 -08002409/* Modify the flags of a page and invalidate the code if necessary.
2410 The flag PAGE_WRITE_ORG is positioned automatically depending
2411 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002412void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002413{
Richard Henderson376a7902010-03-10 15:57:04 -08002414 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002415
Richard Henderson376a7902010-03-10 15:57:04 -08002416 /* This function should never be called with addresses outside the
2417 guest address space. If this assert fires, it probably indicates
2418 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002419#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2420 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002421#endif
2422 assert(start < end);
2423
bellard9fa3e852004-01-04 18:06:42 +00002424 start = start & TARGET_PAGE_MASK;
2425 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002426
2427 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002428 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002429 }
2430
2431 for (addr = start, len = end - start;
2432 len != 0;
2433 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2434 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2435
2436 /* If the write protection bit is set, then we invalidate
2437 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002438 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002439 (flags & PAGE_WRITE) &&
2440 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002441 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002442 }
2443 p->flags = flags;
2444 }
bellard9fa3e852004-01-04 18:06:42 +00002445}
2446
ths3d97b402007-11-02 19:02:07 +00002447int page_check_range(target_ulong start, target_ulong len, int flags)
2448{
2449 PageDesc *p;
2450 target_ulong end;
2451 target_ulong addr;
2452
Richard Henderson376a7902010-03-10 15:57:04 -08002453 /* This function should never be called with addresses outside the
2454 guest address space. If this assert fires, it probably indicates
2455 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002456#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2457 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002458#endif
2459
2460 if (start + len - 1 < start) {
2461 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002462 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002463 }
balrog55f280c2008-10-28 10:24:11 +00002464
ths3d97b402007-11-02 19:02:07 +00002465 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2466 start = start & TARGET_PAGE_MASK;
2467
Richard Henderson376a7902010-03-10 15:57:04 -08002468 for (addr = start, len = end - start;
2469 len != 0;
2470 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002471 p = page_find(addr >> TARGET_PAGE_BITS);
2472 if( !p )
2473 return -1;
2474 if( !(p->flags & PAGE_VALID) )
2475 return -1;
2476
bellarddae32702007-11-14 10:51:00 +00002477 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002478 return -1;
bellarddae32702007-11-14 10:51:00 +00002479 if (flags & PAGE_WRITE) {
2480 if (!(p->flags & PAGE_WRITE_ORG))
2481 return -1;
2482 /* unprotect the page if it was put read-only because it
2483 contains translated code */
2484 if (!(p->flags & PAGE_WRITE)) {
2485 if (!page_unprotect(addr, 0, NULL))
2486 return -1;
2487 }
2488 return 0;
2489 }
ths3d97b402007-11-02 19:02:07 +00002490 }
2491 return 0;
2492}
2493
bellard9fa3e852004-01-04 18:06:42 +00002494/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002495 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002496int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002497{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002498 unsigned int prot;
2499 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002500 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002501
pbrookc8a706f2008-06-02 16:16:42 +00002502 /* Technically this isn't safe inside a signal handler. However we
2503 know this only ever happens in a synchronous SEGV handler, so in
2504 practice it seems to be ok. */
2505 mmap_lock();
2506
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002507 p = page_find(address >> TARGET_PAGE_BITS);
2508 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002509 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002510 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002511 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002512
bellard9fa3e852004-01-04 18:06:42 +00002513 /* if the page was really writable, then we change its
2514 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002515 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2516 host_start = address & qemu_host_page_mask;
2517 host_end = host_start + qemu_host_page_size;
2518
2519 prot = 0;
2520 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2521 p = page_find(addr >> TARGET_PAGE_BITS);
2522 p->flags |= PAGE_WRITE;
2523 prot |= p->flags;
2524
bellard9fa3e852004-01-04 18:06:42 +00002525 /* and since the content will be modified, we must invalidate
2526 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002527 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002528#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002529 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002530#endif
bellard9fa3e852004-01-04 18:06:42 +00002531 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002532 mprotect((void *)g2h(host_start), qemu_host_page_size,
2533 prot & PAGE_BITS);
2534
2535 mmap_unlock();
2536 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002537 }
pbrookc8a706f2008-06-02 16:16:42 +00002538 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002539 return 0;
2540}
2541
bellard6a00d602005-11-21 23:25:50 +00002542static inline void tlb_set_dirty(CPUState *env,
2543 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002544{
2545}
bellard9fa3e852004-01-04 18:06:42 +00002546#endif /* defined(CONFIG_USER_ONLY) */
2547
pbrooke2eef172008-06-08 01:09:01 +00002548#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002549
Paul Brookc04b2b72010-03-01 03:31:14 +00002550#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2551typedef struct subpage_t {
2552 target_phys_addr_t base;
2553 CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
2554 CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
2555 void *opaque[TARGET_PAGE_SIZE][2][4];
2556 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
2557} subpage_t;
2558
Anthony Liguoric227f092009-10-01 16:12:16 -05002559static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2560 ram_addr_t memory, ram_addr_t region_offset);
2561static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2562 ram_addr_t orig_memory, ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002563#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2564 need_subpage) \
2565 do { \
2566 if (addr > start_addr) \
2567 start_addr2 = 0; \
2568 else { \
2569 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2570 if (start_addr2 > 0) \
2571 need_subpage = 1; \
2572 } \
2573 \
blueswir149e9fba2007-05-30 17:25:06 +00002574 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002575 end_addr2 = TARGET_PAGE_SIZE - 1; \
2576 else { \
2577 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2578 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2579 need_subpage = 1; \
2580 } \
2581 } while (0)
2582
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002583/* register physical memory.
2584 For RAM, 'size' must be a multiple of the target page size.
2585 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002586 io memory page. The address used when calling the IO function is
2587 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002588 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002589 before calculating this offset. This should not be a problem unless
2590 the low bits of start_addr and region_offset differ. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002591void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2592 ram_addr_t size,
2593 ram_addr_t phys_offset,
2594 ram_addr_t region_offset)
bellard33417e72003-08-10 21:47:01 +00002595{
Anthony Liguoric227f092009-10-01 16:12:16 -05002596 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002597 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002598 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002599 ram_addr_t orig_size = size;
blueswir1db7b5422007-05-26 17:36:03 +00002600 void *subpage;
bellard33417e72003-08-10 21:47:01 +00002601
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002602 cpu_notify_set_memory(start_addr, size, phys_offset);
2603
pbrook67c4d232009-02-23 13:16:07 +00002604 if (phys_offset == IO_MEM_UNASSIGNED) {
2605 region_offset = start_addr;
2606 }
pbrook8da3ff12008-12-01 18:59:50 +00002607 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002608 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002609 end_addr = start_addr + (target_phys_addr_t)size;
blueswir149e9fba2007-05-30 17:25:06 +00002610 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
blueswir1db7b5422007-05-26 17:36:03 +00002611 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2612 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002613 ram_addr_t orig_memory = p->phys_offset;
2614 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002615 int need_subpage = 0;
2616
2617 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2618 need_subpage);
blueswir14254fab2008-01-01 16:57:19 +00002619 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
blueswir1db7b5422007-05-26 17:36:03 +00002620 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2621 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002622 &p->phys_offset, orig_memory,
2623 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002624 } else {
2625 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2626 >> IO_MEM_SHIFT];
2627 }
pbrook8da3ff12008-12-01 18:59:50 +00002628 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2629 region_offset);
2630 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002631 } else {
2632 p->phys_offset = phys_offset;
2633 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2634 (phys_offset & IO_MEM_ROMD))
2635 phys_offset += TARGET_PAGE_SIZE;
2636 }
2637 } else {
2638 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2639 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002640 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002641 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002642 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002643 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002644 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002645 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002646 int need_subpage = 0;
2647
2648 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2649 end_addr2, need_subpage);
2650
blueswir14254fab2008-01-01 16:57:19 +00002651 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
blueswir1db7b5422007-05-26 17:36:03 +00002652 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002653 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002654 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002655 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002656 phys_offset, region_offset);
2657 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002658 }
2659 }
2660 }
pbrook8da3ff12008-12-01 18:59:50 +00002661 region_offset += TARGET_PAGE_SIZE;
bellard33417e72003-08-10 21:47:01 +00002662 }
ths3b46e622007-09-17 08:09:54 +00002663
bellard9d420372006-06-25 22:25:22 +00002664 /* since each CPU stores ram addresses in its TLB cache, we must
2665 reset the modified entries */
2666 /* XXX: slow ! */
2667 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2668 tlb_flush(env, 1);
2669 }
bellard33417e72003-08-10 21:47:01 +00002670}
2671
bellardba863452006-09-24 18:41:10 +00002672/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002673ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002674{
2675 PhysPageDesc *p;
2676
2677 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2678 if (!p)
2679 return IO_MEM_UNASSIGNED;
2680 return p->phys_offset;
2681}
2682
Anthony Liguoric227f092009-10-01 16:12:16 -05002683void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002684{
2685 if (kvm_enabled())
2686 kvm_coalesce_mmio_region(addr, size);
2687}
2688
Anthony Liguoric227f092009-10-01 16:12:16 -05002689void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002690{
2691 if (kvm_enabled())
2692 kvm_uncoalesce_mmio_region(addr, size);
2693}
2694
Sheng Yang62a27442010-01-26 19:21:16 +08002695void qemu_flush_coalesced_mmio_buffer(void)
2696{
2697 if (kvm_enabled())
2698 kvm_flush_coalesced_mmio_buffer();
2699}
2700
Marcelo Tosattic9027602010-03-01 20:25:08 -03002701#if defined(__linux__) && !defined(TARGET_S390X)
2702
2703#include <sys/vfs.h>
2704
2705#define HUGETLBFS_MAGIC 0x958458f6
2706
2707static long gethugepagesize(const char *path)
2708{
2709 struct statfs fs;
2710 int ret;
2711
2712 do {
2713 ret = statfs(path, &fs);
2714 } while (ret != 0 && errno == EINTR);
2715
2716 if (ret != 0) {
Michael Tokarev6adc0542010-03-27 16:35:37 +03002717 perror(path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002718 return 0;
2719 }
2720
2721 if (fs.f_type != HUGETLBFS_MAGIC)
2722 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2723
2724 return fs.f_bsize;
2725}
2726
2727static void *file_ram_alloc(ram_addr_t memory, const char *path)
2728{
2729 char *filename;
2730 void *area;
2731 int fd;
2732#ifdef MAP_POPULATE
2733 int flags;
2734#endif
2735 unsigned long hpagesize;
2736
2737 hpagesize = gethugepagesize(path);
2738 if (!hpagesize) {
2739 return NULL;
2740 }
2741
2742 if (memory < hpagesize) {
2743 return NULL;
2744 }
2745
2746 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2747 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2748 return NULL;
2749 }
2750
2751 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2752 return NULL;
2753 }
2754
2755 fd = mkstemp(filename);
2756 if (fd < 0) {
Michael Tokarev6adc0542010-03-27 16:35:37 +03002757 perror("unable to create backing store for hugepages");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002758 free(filename);
2759 return NULL;
2760 }
2761 unlink(filename);
2762 free(filename);
2763
2764 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2765
2766 /*
2767 * ftruncate is not supported by hugetlbfs in older
2768 * hosts, so don't bother bailing out on errors.
2769 * If anything goes wrong with it under other filesystems,
2770 * mmap will fail.
2771 */
2772 if (ftruncate(fd, memory))
2773 perror("ftruncate");
2774
2775#ifdef MAP_POPULATE
2776 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2777 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2778 * to sidestep this quirk.
2779 */
2780 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2781 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2782#else
2783 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2784#endif
2785 if (area == MAP_FAILED) {
2786 perror("file_ram_alloc: can't mmap RAM pages");
2787 close(fd);
2788 return (NULL);
2789 }
2790 return area;
2791}
2792#endif
2793
Anthony Liguoric227f092009-10-01 16:12:16 -05002794ram_addr_t qemu_ram_alloc(ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002795{
2796 RAMBlock *new_block;
2797
pbrook94a6b542009-04-11 17:15:54 +00002798 size = TARGET_PAGE_ALIGN(size);
2799 new_block = qemu_malloc(sizeof(*new_block));
2800
Marcelo Tosattic9027602010-03-01 20:25:08 -03002801 if (mem_path) {
2802#if defined (__linux__) && !defined(TARGET_S390X)
2803 new_block->host = file_ram_alloc(size, mem_path);
2804 if (!new_block->host)
2805 exit(1);
Alexander Graf6b024942009-12-05 12:44:25 +01002806#else
Marcelo Tosattic9027602010-03-01 20:25:08 -03002807 fprintf(stderr, "-mem-path option unsupported\n");
2808 exit(1);
2809#endif
2810 } else {
2811#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2812 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2813 new_block->host = mmap((void*)0x1000000, size,
2814 PROT_EXEC|PROT_READ|PROT_WRITE,
2815 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2816#else
2817 new_block->host = qemu_vmalloc(size);
Alexander Graf6b024942009-12-05 12:44:25 +01002818#endif
Izik Eidusccb167e2009-10-08 16:39:39 +02002819#ifdef MADV_MERGEABLE
Marcelo Tosattic9027602010-03-01 20:25:08 -03002820 madvise(new_block->host, size, MADV_MERGEABLE);
Izik Eidusccb167e2009-10-08 16:39:39 +02002821#endif
Marcelo Tosattic9027602010-03-01 20:25:08 -03002822 }
pbrook94a6b542009-04-11 17:15:54 +00002823 new_block->offset = last_ram_offset;
2824 new_block->length = size;
2825
2826 new_block->next = ram_blocks;
2827 ram_blocks = new_block;
2828
2829 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2830 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2831 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2832 0xff, size >> TARGET_PAGE_BITS);
2833
2834 last_ram_offset += size;
2835
Jan Kiszka6f0437e2009-04-26 18:03:40 +02002836 if (kvm_enabled())
2837 kvm_setup_guest_memory(new_block->host, size);
2838
pbrook94a6b542009-04-11 17:15:54 +00002839 return new_block->offset;
2840}
bellarde9a1ab12007-02-08 23:08:38 +00002841
Anthony Liguoric227f092009-10-01 16:12:16 -05002842void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002843{
pbrook94a6b542009-04-11 17:15:54 +00002844 /* TODO: implement this. */
bellarde9a1ab12007-02-08 23:08:38 +00002845}
2846
pbrookdc828ca2009-04-09 22:21:07 +00002847/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002848 With the exception of the softmmu code in this file, this should
2849 only be used for local memory (e.g. video ram) that the device owns,
2850 and knows it isn't going to access beyond the end of the block.
2851
2852 It should not be used for general purpose DMA.
2853 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2854 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002855void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002856{
pbrook94a6b542009-04-11 17:15:54 +00002857 RAMBlock *prev;
2858 RAMBlock **prevp;
2859 RAMBlock *block;
2860
pbrook94a6b542009-04-11 17:15:54 +00002861 prev = NULL;
2862 prevp = &ram_blocks;
2863 block = ram_blocks;
2864 while (block && (block->offset > addr
2865 || block->offset + block->length <= addr)) {
2866 if (prev)
2867 prevp = &prev->next;
2868 prev = block;
2869 block = block->next;
2870 }
2871 if (!block) {
2872 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2873 abort();
2874 }
2875 /* Move this entry to to start of the list. */
2876 if (prev) {
2877 prev->next = block->next;
2878 block->next = *prevp;
2879 *prevp = block;
2880 }
2881 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00002882}
2883
pbrook5579c7f2009-04-11 14:47:08 +00002884/* Some of the softmmu routines need to translate from a host pointer
2885 (typically a TLB entry) back to a ram offset. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002886ram_addr_t qemu_ram_addr_from_host(void *ptr)
pbrook5579c7f2009-04-11 14:47:08 +00002887{
pbrook94a6b542009-04-11 17:15:54 +00002888 RAMBlock *prev;
pbrook94a6b542009-04-11 17:15:54 +00002889 RAMBlock *block;
2890 uint8_t *host = ptr;
2891
pbrook94a6b542009-04-11 17:15:54 +00002892 prev = NULL;
pbrook94a6b542009-04-11 17:15:54 +00002893 block = ram_blocks;
2894 while (block && (block->host > host
2895 || block->host + block->length <= host)) {
pbrook94a6b542009-04-11 17:15:54 +00002896 prev = block;
2897 block = block->next;
2898 }
2899 if (!block) {
2900 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2901 abort();
2902 }
2903 return block->offset + (host - block->host);
pbrook5579c7f2009-04-11 14:47:08 +00002904}
2905
Anthony Liguoric227f092009-10-01 16:12:16 -05002906static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00002907{
pbrook67d3b952006-12-18 05:03:52 +00002908#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002909 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002910#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002911#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002912 do_unassigned_access(addr, 0, 0, 0, 1);
2913#endif
2914 return 0;
2915}
2916
Anthony Liguoric227f092009-10-01 16:12:16 -05002917static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002918{
2919#ifdef DEBUG_UNASSIGNED
2920 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2921#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002922#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002923 do_unassigned_access(addr, 0, 0, 0, 2);
2924#endif
2925 return 0;
2926}
2927
Anthony Liguoric227f092009-10-01 16:12:16 -05002928static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002929{
2930#ifdef DEBUG_UNASSIGNED
2931 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2932#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002933#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002934 do_unassigned_access(addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002935#endif
bellard33417e72003-08-10 21:47:01 +00002936 return 0;
2937}
2938
Anthony Liguoric227f092009-10-01 16:12:16 -05002939static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00002940{
pbrook67d3b952006-12-18 05:03:52 +00002941#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002942 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00002943#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002944#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002945 do_unassigned_access(addr, 1, 0, 0, 1);
2946#endif
2947}
2948
Anthony Liguoric227f092009-10-01 16:12:16 -05002949static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002950{
2951#ifdef DEBUG_UNASSIGNED
2952 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2953#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002954#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002955 do_unassigned_access(addr, 1, 0, 0, 2);
2956#endif
2957}
2958
Anthony Liguoric227f092009-10-01 16:12:16 -05002959static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002960{
2961#ifdef DEBUG_UNASSIGNED
2962 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2963#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002964#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002965 do_unassigned_access(addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002966#endif
bellard33417e72003-08-10 21:47:01 +00002967}
2968
Blue Swirld60efc62009-08-25 18:29:31 +00002969static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00002970 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00002971 unassigned_mem_readw,
2972 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00002973};
2974
Blue Swirld60efc62009-08-25 18:29:31 +00002975static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00002976 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00002977 unassigned_mem_writew,
2978 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00002979};
2980
Anthony Liguoric227f092009-10-01 16:12:16 -05002981static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002982 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002983{
bellard3a7d9292005-08-21 09:26:42 +00002984 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002985 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002986 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2987#if !defined(CONFIG_USER_ONLY)
2988 tb_invalidate_phys_page_fast(ram_addr, 1);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002989 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002990#endif
2991 }
pbrook5579c7f2009-04-11 14:47:08 +00002992 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002993 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002994 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002995 /* we remove the notdirty callback only if the code has been
2996 flushed */
2997 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002998 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002999}
3000
Anthony Liguoric227f092009-10-01 16:12:16 -05003001static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003002 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003003{
bellard3a7d9292005-08-21 09:26:42 +00003004 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003005 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003006 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3007#if !defined(CONFIG_USER_ONLY)
3008 tb_invalidate_phys_page_fast(ram_addr, 2);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003009 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003010#endif
3011 }
pbrook5579c7f2009-04-11 14:47:08 +00003012 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003013 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003014 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003015 /* we remove the notdirty callback only if the code has been
3016 flushed */
3017 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003018 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003019}
3020
Anthony Liguoric227f092009-10-01 16:12:16 -05003021static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003022 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003023{
bellard3a7d9292005-08-21 09:26:42 +00003024 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003025 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003026 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3027#if !defined(CONFIG_USER_ONLY)
3028 tb_invalidate_phys_page_fast(ram_addr, 4);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003029 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003030#endif
3031 }
pbrook5579c7f2009-04-11 14:47:08 +00003032 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003033 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003034 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003035 /* we remove the notdirty callback only if the code has been
3036 flushed */
3037 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003038 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003039}
3040
Blue Swirld60efc62009-08-25 18:29:31 +00003041static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00003042 NULL, /* never used */
3043 NULL, /* never used */
3044 NULL, /* never used */
3045};
3046
Blue Swirld60efc62009-08-25 18:29:31 +00003047static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00003048 notdirty_mem_writeb,
3049 notdirty_mem_writew,
3050 notdirty_mem_writel,
3051};
3052
pbrook0f459d12008-06-09 00:20:13 +00003053/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003054static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003055{
3056 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003057 target_ulong pc, cs_base;
3058 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003059 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003060 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003061 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003062
aliguori06d55cc2008-11-18 20:24:06 +00003063 if (env->watchpoint_hit) {
3064 /* We re-entered the check after replacing the TB. Now raise
3065 * the debug interrupt so that is will trigger after the
3066 * current instruction. */
3067 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3068 return;
3069 }
pbrook2e70f6e2008-06-29 01:03:05 +00003070 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003071 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003072 if ((vaddr == (wp->vaddr & len_mask) ||
3073 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003074 wp->flags |= BP_WATCHPOINT_HIT;
3075 if (!env->watchpoint_hit) {
3076 env->watchpoint_hit = wp;
3077 tb = tb_find_pc(env->mem_io_pc);
3078 if (!tb) {
3079 cpu_abort(env, "check_watchpoint: could not find TB for "
3080 "pc=%p", (void *)env->mem_io_pc);
3081 }
3082 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3083 tb_phys_invalidate(tb, -1);
3084 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3085 env->exception_index = EXCP_DEBUG;
3086 } else {
3087 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3088 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3089 }
3090 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003091 }
aliguori6e140f22008-11-18 20:37:55 +00003092 } else {
3093 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003094 }
3095 }
3096}
3097
pbrook6658ffb2007-03-16 23:58:11 +00003098/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3099 so these check for a hit then pass through to the normal out-of-line
3100 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003101static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003102{
aliguorib4051332008-11-18 20:14:20 +00003103 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003104 return ldub_phys(addr);
3105}
3106
Anthony Liguoric227f092009-10-01 16:12:16 -05003107static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003108{
aliguorib4051332008-11-18 20:14:20 +00003109 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003110 return lduw_phys(addr);
3111}
3112
Anthony Liguoric227f092009-10-01 16:12:16 -05003113static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003114{
aliguorib4051332008-11-18 20:14:20 +00003115 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003116 return ldl_phys(addr);
3117}
3118
Anthony Liguoric227f092009-10-01 16:12:16 -05003119static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003120 uint32_t val)
3121{
aliguorib4051332008-11-18 20:14:20 +00003122 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003123 stb_phys(addr, val);
3124}
3125
Anthony Liguoric227f092009-10-01 16:12:16 -05003126static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003127 uint32_t val)
3128{
aliguorib4051332008-11-18 20:14:20 +00003129 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003130 stw_phys(addr, val);
3131}
3132
Anthony Liguoric227f092009-10-01 16:12:16 -05003133static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003134 uint32_t val)
3135{
aliguorib4051332008-11-18 20:14:20 +00003136 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003137 stl_phys(addr, val);
3138}
3139
Blue Swirld60efc62009-08-25 18:29:31 +00003140static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003141 watch_mem_readb,
3142 watch_mem_readw,
3143 watch_mem_readl,
3144};
3145
Blue Swirld60efc62009-08-25 18:29:31 +00003146static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003147 watch_mem_writeb,
3148 watch_mem_writew,
3149 watch_mem_writel,
3150};
pbrook6658ffb2007-03-16 23:58:11 +00003151
Anthony Liguoric227f092009-10-01 16:12:16 -05003152static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003153 unsigned int len)
3154{
blueswir1db7b5422007-05-26 17:36:03 +00003155 uint32_t ret;
3156 unsigned int idx;
3157
pbrook8da3ff12008-12-01 18:59:50 +00003158 idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003159#if defined(DEBUG_SUBPAGE)
3160 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3161 mmio, len, addr, idx);
3162#endif
pbrook8da3ff12008-12-01 18:59:50 +00003163 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
3164 addr + mmio->region_offset[idx][0][len]);
blueswir1db7b5422007-05-26 17:36:03 +00003165
3166 return ret;
3167}
3168
Anthony Liguoric227f092009-10-01 16:12:16 -05003169static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003170 uint32_t value, unsigned int len)
3171{
blueswir1db7b5422007-05-26 17:36:03 +00003172 unsigned int idx;
3173
pbrook8da3ff12008-12-01 18:59:50 +00003174 idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003175#if defined(DEBUG_SUBPAGE)
3176 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3177 mmio, len, addr, idx, value);
3178#endif
pbrook8da3ff12008-12-01 18:59:50 +00003179 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
3180 addr + mmio->region_offset[idx][1][len],
3181 value);
blueswir1db7b5422007-05-26 17:36:03 +00003182}
3183
Anthony Liguoric227f092009-10-01 16:12:16 -05003184static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003185{
3186#if defined(DEBUG_SUBPAGE)
3187 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3188#endif
3189
3190 return subpage_readlen(opaque, addr, 0);
3191}
3192
Anthony Liguoric227f092009-10-01 16:12:16 -05003193static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003194 uint32_t value)
3195{
3196#if defined(DEBUG_SUBPAGE)
3197 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3198#endif
3199 subpage_writelen(opaque, addr, value, 0);
3200}
3201
Anthony Liguoric227f092009-10-01 16:12:16 -05003202static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003203{
3204#if defined(DEBUG_SUBPAGE)
3205 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3206#endif
3207
3208 return subpage_readlen(opaque, addr, 1);
3209}
3210
Anthony Liguoric227f092009-10-01 16:12:16 -05003211static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003212 uint32_t value)
3213{
3214#if defined(DEBUG_SUBPAGE)
3215 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3216#endif
3217 subpage_writelen(opaque, addr, value, 1);
3218}
3219
Anthony Liguoric227f092009-10-01 16:12:16 -05003220static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003221{
3222#if defined(DEBUG_SUBPAGE)
3223 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3224#endif
3225
3226 return subpage_readlen(opaque, addr, 2);
3227}
3228
3229static void subpage_writel (void *opaque,
Anthony Liguoric227f092009-10-01 16:12:16 -05003230 target_phys_addr_t addr, uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003231{
3232#if defined(DEBUG_SUBPAGE)
3233 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3234#endif
3235 subpage_writelen(opaque, addr, value, 2);
3236}
3237
Blue Swirld60efc62009-08-25 18:29:31 +00003238static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003239 &subpage_readb,
3240 &subpage_readw,
3241 &subpage_readl,
3242};
3243
Blue Swirld60efc62009-08-25 18:29:31 +00003244static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003245 &subpage_writeb,
3246 &subpage_writew,
3247 &subpage_writel,
3248};
3249
Anthony Liguoric227f092009-10-01 16:12:16 -05003250static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3251 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003252{
3253 int idx, eidx;
blueswir14254fab2008-01-01 16:57:19 +00003254 unsigned int i;
blueswir1db7b5422007-05-26 17:36:03 +00003255
3256 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3257 return -1;
3258 idx = SUBPAGE_IDX(start);
3259 eidx = SUBPAGE_IDX(end);
3260#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003261 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003262 mmio, start, end, idx, eidx, memory);
3263#endif
3264 memory >>= IO_MEM_SHIFT;
3265 for (; idx <= eidx; idx++) {
blueswir14254fab2008-01-01 16:57:19 +00003266 for (i = 0; i < 4; i++) {
blueswir13ee89922008-01-02 19:45:26 +00003267 if (io_mem_read[memory][i]) {
3268 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3269 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
pbrook8da3ff12008-12-01 18:59:50 +00003270 mmio->region_offset[idx][0][i] = region_offset;
blueswir13ee89922008-01-02 19:45:26 +00003271 }
3272 if (io_mem_write[memory][i]) {
3273 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3274 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
pbrook8da3ff12008-12-01 18:59:50 +00003275 mmio->region_offset[idx][1][i] = region_offset;
blueswir13ee89922008-01-02 19:45:26 +00003276 }
blueswir14254fab2008-01-01 16:57:19 +00003277 }
blueswir1db7b5422007-05-26 17:36:03 +00003278 }
3279
3280 return 0;
3281}
3282
Anthony Liguoric227f092009-10-01 16:12:16 -05003283static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3284 ram_addr_t orig_memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003285{
Anthony Liguoric227f092009-10-01 16:12:16 -05003286 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003287 int subpage_memory;
3288
Anthony Liguoric227f092009-10-01 16:12:16 -05003289 mmio = qemu_mallocz(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003290
3291 mmio->base = base;
Avi Kivity1eed09c2009-06-14 11:38:51 +03003292 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
blueswir1db7b5422007-05-26 17:36:03 +00003293#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003294 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3295 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003296#endif
aliguori1eec6142009-02-05 22:06:18 +00003297 *phys = subpage_memory | IO_MEM_SUBPAGE;
3298 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
pbrook8da3ff12008-12-01 18:59:50 +00003299 region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003300
3301 return mmio;
3302}
3303
aliguori88715652009-02-11 15:20:58 +00003304static int get_free_io_mem_idx(void)
3305{
3306 int i;
3307
3308 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3309 if (!io_mem_used[i]) {
3310 io_mem_used[i] = 1;
3311 return i;
3312 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003313 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003314 return -1;
3315}
3316
bellard33417e72003-08-10 21:47:01 +00003317/* mem_read and mem_write are arrays of functions containing the
3318 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003319 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003320 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003321 modified. If it is zero, a new io zone is allocated. The return
3322 value can be used with cpu_register_physical_memory(). (-1) is
3323 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003324static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003325 CPUReadMemoryFunc * const *mem_read,
3326 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03003327 void *opaque)
bellard33417e72003-08-10 21:47:01 +00003328{
blueswir14254fab2008-01-01 16:57:19 +00003329 int i, subwidth = 0;
bellard33417e72003-08-10 21:47:01 +00003330
3331 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003332 io_index = get_free_io_mem_idx();
3333 if (io_index == -1)
3334 return io_index;
bellard33417e72003-08-10 21:47:01 +00003335 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003336 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003337 if (io_index >= IO_MEM_NB_ENTRIES)
3338 return -1;
3339 }
bellardb5ff1b32005-11-26 10:38:39 +00003340
bellard33417e72003-08-10 21:47:01 +00003341 for(i = 0;i < 3; i++) {
blueswir14254fab2008-01-01 16:57:19 +00003342 if (!mem_read[i] || !mem_write[i])
3343 subwidth = IO_MEM_SUBWIDTH;
bellard33417e72003-08-10 21:47:01 +00003344 io_mem_read[io_index][i] = mem_read[i];
3345 io_mem_write[io_index][i] = mem_write[i];
3346 }
bellarda4193c82004-06-03 14:01:43 +00003347 io_mem_opaque[io_index] = opaque;
blueswir14254fab2008-01-01 16:57:19 +00003348 return (io_index << IO_MEM_SHIFT) | subwidth;
bellard33417e72003-08-10 21:47:01 +00003349}
bellard61382a52003-10-27 21:22:23 +00003350
Blue Swirld60efc62009-08-25 18:29:31 +00003351int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3352 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03003353 void *opaque)
3354{
3355 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3356}
3357
aliguori88715652009-02-11 15:20:58 +00003358void cpu_unregister_io_memory(int io_table_address)
3359{
3360 int i;
3361 int io_index = io_table_address >> IO_MEM_SHIFT;
3362
3363 for (i=0;i < 3; i++) {
3364 io_mem_read[io_index][i] = unassigned_mem_read[i];
3365 io_mem_write[io_index][i] = unassigned_mem_write[i];
3366 }
3367 io_mem_opaque[io_index] = NULL;
3368 io_mem_used[io_index] = 0;
3369}
3370
Avi Kivitye9179ce2009-06-14 11:38:52 +03003371static void io_mem_init(void)
3372{
3373 int i;
3374
3375 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3376 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3377 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3378 for (i=0; i<5; i++)
3379 io_mem_used[i] = 1;
3380
3381 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3382 watch_mem_write, NULL);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003383}
3384
pbrooke2eef172008-06-08 01:09:01 +00003385#endif /* !defined(CONFIG_USER_ONLY) */
3386
bellard13eb76e2004-01-24 15:23:36 +00003387/* physical memory access (slow version, mainly for debug) */
3388#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003389int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3390 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003391{
3392 int l, flags;
3393 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003394 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003395
3396 while (len > 0) {
3397 page = addr & TARGET_PAGE_MASK;
3398 l = (page + TARGET_PAGE_SIZE) - addr;
3399 if (l > len)
3400 l = len;
3401 flags = page_get_flags(page);
3402 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003403 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003404 if (is_write) {
3405 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003406 return -1;
bellard579a97f2007-11-11 14:26:47 +00003407 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003408 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003409 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003410 memcpy(p, buf, l);
3411 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003412 } else {
3413 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003414 return -1;
bellard579a97f2007-11-11 14:26:47 +00003415 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003416 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003417 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003418 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003419 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003420 }
3421 len -= l;
3422 buf += l;
3423 addr += l;
3424 }
Paul Brooka68fe892010-03-01 00:08:59 +00003425 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003426}
bellard8df1cd02005-01-28 22:37:22 +00003427
bellard13eb76e2004-01-24 15:23:36 +00003428#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003429void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003430 int len, int is_write)
3431{
3432 int l, io_index;
3433 uint8_t *ptr;
3434 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003435 target_phys_addr_t page;
bellard2e126692004-04-25 21:28:44 +00003436 unsigned long pd;
bellard92e873b2004-05-21 14:52:29 +00003437 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003438
bellard13eb76e2004-01-24 15:23:36 +00003439 while (len > 0) {
3440 page = addr & TARGET_PAGE_MASK;
3441 l = (page + TARGET_PAGE_SIZE) - addr;
3442 if (l > len)
3443 l = len;
bellard92e873b2004-05-21 14:52:29 +00003444 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003445 if (!p) {
3446 pd = IO_MEM_UNASSIGNED;
3447 } else {
3448 pd = p->phys_offset;
3449 }
ths3b46e622007-09-17 08:09:54 +00003450
bellard13eb76e2004-01-24 15:23:36 +00003451 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003452 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003453 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003454 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003455 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003456 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003457 /* XXX: could force cpu_single_env to NULL to avoid
3458 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003459 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003460 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003461 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003462 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003463 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003464 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003465 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003466 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003467 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003468 l = 2;
3469 } else {
bellard1c213d12005-09-03 10:49:04 +00003470 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003471 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003472 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003473 l = 1;
3474 }
3475 } else {
bellardb448f2f2004-02-25 23:24:04 +00003476 unsigned long addr1;
3477 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003478 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003479 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003480 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003481 if (!cpu_physical_memory_is_dirty(addr1)) {
3482 /* invalidate code */
3483 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3484 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003485 cpu_physical_memory_set_dirty_flags(
3486 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003487 }
bellard13eb76e2004-01-24 15:23:36 +00003488 }
3489 } else {
ths5fafdf22007-09-16 21:08:06 +00003490 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003491 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003492 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003493 /* I/O case */
3494 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003495 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003496 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3497 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003498 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003499 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003500 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003501 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003502 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003503 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003504 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003505 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003506 l = 2;
3507 } else {
bellard1c213d12005-09-03 10:49:04 +00003508 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003509 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003510 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003511 l = 1;
3512 }
3513 } else {
3514 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003515 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard13eb76e2004-01-24 15:23:36 +00003516 (addr & ~TARGET_PAGE_MASK);
3517 memcpy(buf, ptr, l);
3518 }
3519 }
3520 len -= l;
3521 buf += l;
3522 addr += l;
3523 }
3524}
bellard8df1cd02005-01-28 22:37:22 +00003525
bellardd0ecd2a2006-04-23 17:14:48 +00003526/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003527void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003528 const uint8_t *buf, int len)
3529{
3530 int l;
3531 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003532 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003533 unsigned long pd;
3534 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003535
bellardd0ecd2a2006-04-23 17:14:48 +00003536 while (len > 0) {
3537 page = addr & TARGET_PAGE_MASK;
3538 l = (page + TARGET_PAGE_SIZE) - addr;
3539 if (l > len)
3540 l = len;
3541 p = phys_page_find(page >> TARGET_PAGE_BITS);
3542 if (!p) {
3543 pd = IO_MEM_UNASSIGNED;
3544 } else {
3545 pd = p->phys_offset;
3546 }
ths3b46e622007-09-17 08:09:54 +00003547
bellardd0ecd2a2006-04-23 17:14:48 +00003548 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00003549 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3550 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003551 /* do nothing */
3552 } else {
3553 unsigned long addr1;
3554 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3555 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003556 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003557 memcpy(ptr, buf, l);
3558 }
3559 len -= l;
3560 buf += l;
3561 addr += l;
3562 }
3563}
3564
aliguori6d16c2f2009-01-22 16:59:11 +00003565typedef struct {
3566 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003567 target_phys_addr_t addr;
3568 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003569} BounceBuffer;
3570
3571static BounceBuffer bounce;
3572
aliguoriba223c22009-01-22 16:59:16 +00003573typedef struct MapClient {
3574 void *opaque;
3575 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003576 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003577} MapClient;
3578
Blue Swirl72cf2d42009-09-12 07:36:22 +00003579static QLIST_HEAD(map_client_list, MapClient) map_client_list
3580 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003581
3582void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3583{
3584 MapClient *client = qemu_malloc(sizeof(*client));
3585
3586 client->opaque = opaque;
3587 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003588 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003589 return client;
3590}
3591
3592void cpu_unregister_map_client(void *_client)
3593{
3594 MapClient *client = (MapClient *)_client;
3595
Blue Swirl72cf2d42009-09-12 07:36:22 +00003596 QLIST_REMOVE(client, link);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003597 qemu_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003598}
3599
3600static void cpu_notify_map_clients(void)
3601{
3602 MapClient *client;
3603
Blue Swirl72cf2d42009-09-12 07:36:22 +00003604 while (!QLIST_EMPTY(&map_client_list)) {
3605 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003606 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003607 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003608 }
3609}
3610
aliguori6d16c2f2009-01-22 16:59:11 +00003611/* Map a physical memory region into a host virtual address.
3612 * May map a subset of the requested range, given by and returned in *plen.
3613 * May return NULL if resources needed to perform the mapping are exhausted.
3614 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003615 * Use cpu_register_map_client() to know when retrying the map operation is
3616 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003617 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003618void *cpu_physical_memory_map(target_phys_addr_t addr,
3619 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003620 int is_write)
3621{
Anthony Liguoric227f092009-10-01 16:12:16 -05003622 target_phys_addr_t len = *plen;
3623 target_phys_addr_t done = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003624 int l;
3625 uint8_t *ret = NULL;
3626 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003627 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003628 unsigned long pd;
3629 PhysPageDesc *p;
3630 unsigned long addr1;
3631
3632 while (len > 0) {
3633 page = addr & TARGET_PAGE_MASK;
3634 l = (page + TARGET_PAGE_SIZE) - addr;
3635 if (l > len)
3636 l = len;
3637 p = phys_page_find(page >> TARGET_PAGE_BITS);
3638 if (!p) {
3639 pd = IO_MEM_UNASSIGNED;
3640 } else {
3641 pd = p->phys_offset;
3642 }
3643
3644 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3645 if (done || bounce.buffer) {
3646 break;
3647 }
3648 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3649 bounce.addr = addr;
3650 bounce.len = l;
3651 if (!is_write) {
3652 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3653 }
3654 ptr = bounce.buffer;
3655 } else {
3656 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003657 ptr = qemu_get_ram_ptr(addr1);
aliguori6d16c2f2009-01-22 16:59:11 +00003658 }
3659 if (!done) {
3660 ret = ptr;
3661 } else if (ret + done != ptr) {
3662 break;
3663 }
3664
3665 len -= l;
3666 addr += l;
3667 done += l;
3668 }
3669 *plen = done;
3670 return ret;
3671}
3672
3673/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3674 * Will also mark the memory as dirty if is_write == 1. access_len gives
3675 * the amount of memory that was actually read or written by the caller.
3676 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003677void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3678 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003679{
3680 if (buffer != bounce.buffer) {
3681 if (is_write) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003682 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003683 while (access_len) {
3684 unsigned l;
3685 l = TARGET_PAGE_SIZE;
3686 if (l > access_len)
3687 l = access_len;
3688 if (!cpu_physical_memory_is_dirty(addr1)) {
3689 /* invalidate code */
3690 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3691 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003692 cpu_physical_memory_set_dirty_flags(
3693 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003694 }
3695 addr1 += l;
3696 access_len -= l;
3697 }
3698 }
3699 return;
3700 }
3701 if (is_write) {
3702 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3703 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003704 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003705 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003706 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003707}
bellardd0ecd2a2006-04-23 17:14:48 +00003708
bellard8df1cd02005-01-28 22:37:22 +00003709/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003710uint32_t ldl_phys(target_phys_addr_t addr)
bellard8df1cd02005-01-28 22:37:22 +00003711{
3712 int io_index;
3713 uint8_t *ptr;
3714 uint32_t val;
3715 unsigned long pd;
3716 PhysPageDesc *p;
3717
3718 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3719 if (!p) {
3720 pd = IO_MEM_UNASSIGNED;
3721 } else {
3722 pd = p->phys_offset;
3723 }
ths3b46e622007-09-17 08:09:54 +00003724
ths5fafdf22007-09-16 21:08:06 +00003725 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003726 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00003727 /* I/O case */
3728 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003729 if (p)
3730 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003731 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3732 } else {
3733 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003734 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00003735 (addr & ~TARGET_PAGE_MASK);
3736 val = ldl_p(ptr);
3737 }
3738 return val;
3739}
3740
bellard84b7b8e2005-11-28 21:19:04 +00003741/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003742uint64_t ldq_phys(target_phys_addr_t addr)
bellard84b7b8e2005-11-28 21:19:04 +00003743{
3744 int io_index;
3745 uint8_t *ptr;
3746 uint64_t val;
3747 unsigned long pd;
3748 PhysPageDesc *p;
3749
3750 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3751 if (!p) {
3752 pd = IO_MEM_UNASSIGNED;
3753 } else {
3754 pd = p->phys_offset;
3755 }
ths3b46e622007-09-17 08:09:54 +00003756
bellard2a4188a2006-06-25 21:54:59 +00003757 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3758 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00003759 /* I/O case */
3760 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003761 if (p)
3762 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard84b7b8e2005-11-28 21:19:04 +00003763#ifdef TARGET_WORDS_BIGENDIAN
3764 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3765 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3766#else
3767 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3768 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3769#endif
3770 } else {
3771 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003772 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00003773 (addr & ~TARGET_PAGE_MASK);
3774 val = ldq_p(ptr);
3775 }
3776 return val;
3777}
3778
bellardaab33092005-10-30 20:48:42 +00003779/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003780uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003781{
3782 uint8_t val;
3783 cpu_physical_memory_read(addr, &val, 1);
3784 return val;
3785}
3786
3787/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003788uint32_t lduw_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003789{
3790 uint16_t val;
3791 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3792 return tswap16(val);
3793}
3794
bellard8df1cd02005-01-28 22:37:22 +00003795/* warning: addr must be aligned. The ram page is not masked as dirty
3796 and the code inside is not invalidated. It is useful if the dirty
3797 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003798void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003799{
3800 int io_index;
3801 uint8_t *ptr;
3802 unsigned long pd;
3803 PhysPageDesc *p;
3804
3805 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3806 if (!p) {
3807 pd = IO_MEM_UNASSIGNED;
3808 } else {
3809 pd = p->phys_offset;
3810 }
ths3b46e622007-09-17 08:09:54 +00003811
bellard3a7d9292005-08-21 09:26:42 +00003812 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003813 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003814 if (p)
3815 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003816 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3817 } else {
aliguori74576192008-10-06 14:02:03 +00003818 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003819 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003820 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003821
3822 if (unlikely(in_migration)) {
3823 if (!cpu_physical_memory_is_dirty(addr1)) {
3824 /* invalidate code */
3825 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3826 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003827 cpu_physical_memory_set_dirty_flags(
3828 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003829 }
3830 }
bellard8df1cd02005-01-28 22:37:22 +00003831 }
3832}
3833
Anthony Liguoric227f092009-10-01 16:12:16 -05003834void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003835{
3836 int io_index;
3837 uint8_t *ptr;
3838 unsigned long pd;
3839 PhysPageDesc *p;
3840
3841 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3842 if (!p) {
3843 pd = IO_MEM_UNASSIGNED;
3844 } else {
3845 pd = p->phys_offset;
3846 }
ths3b46e622007-09-17 08:09:54 +00003847
j_mayerbc98a7e2007-04-04 07:55:12 +00003848 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3849 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003850 if (p)
3851 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00003852#ifdef TARGET_WORDS_BIGENDIAN
3853 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3854 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3855#else
3856 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3857 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3858#endif
3859 } else {
pbrook5579c7f2009-04-11 14:47:08 +00003860 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00003861 (addr & ~TARGET_PAGE_MASK);
3862 stq_p(ptr, val);
3863 }
3864}
3865
bellard8df1cd02005-01-28 22:37:22 +00003866/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003867void stl_phys(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003868{
3869 int io_index;
3870 uint8_t *ptr;
3871 unsigned long pd;
3872 PhysPageDesc *p;
3873
3874 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3875 if (!p) {
3876 pd = IO_MEM_UNASSIGNED;
3877 } else {
3878 pd = p->phys_offset;
3879 }
ths3b46e622007-09-17 08:09:54 +00003880
bellard3a7d9292005-08-21 09:26:42 +00003881 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003882 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003883 if (p)
3884 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003885 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3886 } else {
3887 unsigned long addr1;
3888 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3889 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003890 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003891 stl_p(ptr, val);
bellard3a7d9292005-08-21 09:26:42 +00003892 if (!cpu_physical_memory_is_dirty(addr1)) {
3893 /* invalidate code */
3894 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3895 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003896 cpu_physical_memory_set_dirty_flags(addr1,
3897 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003898 }
bellard8df1cd02005-01-28 22:37:22 +00003899 }
3900}
3901
bellardaab33092005-10-30 20:48:42 +00003902/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003903void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003904{
3905 uint8_t v = val;
3906 cpu_physical_memory_write(addr, &v, 1);
3907}
3908
3909/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003910void stw_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003911{
3912 uint16_t v = tswap16(val);
3913 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3914}
3915
3916/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003917void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003918{
3919 val = tswap64(val);
3920 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3921}
3922
aliguori5e2972f2009-03-28 17:51:36 +00003923/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00003924int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003925 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003926{
3927 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003928 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003929 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003930
3931 while (len > 0) {
3932 page = addr & TARGET_PAGE_MASK;
3933 phys_addr = cpu_get_phys_page_debug(env, page);
3934 /* if no physical page mapped, return an error */
3935 if (phys_addr == -1)
3936 return -1;
3937 l = (page + TARGET_PAGE_SIZE) - addr;
3938 if (l > len)
3939 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003940 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00003941 if (is_write)
3942 cpu_physical_memory_write_rom(phys_addr, buf, l);
3943 else
aliguori5e2972f2009-03-28 17:51:36 +00003944 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00003945 len -= l;
3946 buf += l;
3947 addr += l;
3948 }
3949 return 0;
3950}
Paul Brooka68fe892010-03-01 00:08:59 +00003951#endif
bellard13eb76e2004-01-24 15:23:36 +00003952
pbrook2e70f6e2008-06-29 01:03:05 +00003953/* in deterministic execution mode, instructions doing device I/Os
3954 must be at the end of the TB */
3955void cpu_io_recompile(CPUState *env, void *retaddr)
3956{
3957 TranslationBlock *tb;
3958 uint32_t n, cflags;
3959 target_ulong pc, cs_base;
3960 uint64_t flags;
3961
3962 tb = tb_find_pc((unsigned long)retaddr);
3963 if (!tb) {
3964 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3965 retaddr);
3966 }
3967 n = env->icount_decr.u16.low + tb->icount;
3968 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3969 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00003970 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00003971 n = n - env->icount_decr.u16.low;
3972 /* Generate a new TB ending on the I/O insn. */
3973 n++;
3974 /* On MIPS and SH, delay slot instructions can only be restarted if
3975 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00003976 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00003977 branch. */
3978#if defined(TARGET_MIPS)
3979 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3980 env->active_tc.PC -= 4;
3981 env->icount_decr.u16.low++;
3982 env->hflags &= ~MIPS_HFLAG_BMASK;
3983 }
3984#elif defined(TARGET_SH4)
3985 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3986 && n > 1) {
3987 env->pc -= 2;
3988 env->icount_decr.u16.low++;
3989 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3990 }
3991#endif
3992 /* This should never happen. */
3993 if (n > CF_COUNT_MASK)
3994 cpu_abort(env, "TB too big during recompile");
3995
3996 cflags = n | CF_LAST_IO;
3997 pc = tb->pc;
3998 cs_base = tb->cs_base;
3999 flags = tb->flags;
4000 tb_phys_invalidate(tb, -1);
4001 /* FIXME: In theory this could raise an exception. In practice
4002 we have already translated the block once so it's probably ok. */
4003 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004004 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004005 the first in the TB) then we end up generating a whole new TB and
4006 repeating the fault, which is horribly inefficient.
4007 Better would be to execute just this insn uncached, or generate a
4008 second new TB. */
4009 cpu_resume_from_signal(env, NULL);
4010}
4011
Paul Brookb3755a92010-03-12 16:54:58 +00004012#if !defined(CONFIG_USER_ONLY)
4013
bellarde3db7222005-01-26 22:00:47 +00004014void dump_exec_info(FILE *f,
4015 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4016{
4017 int i, target_code_size, max_target_code_size;
4018 int direct_jmp_count, direct_jmp2_count, cross_page;
4019 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004020
bellarde3db7222005-01-26 22:00:47 +00004021 target_code_size = 0;
4022 max_target_code_size = 0;
4023 cross_page = 0;
4024 direct_jmp_count = 0;
4025 direct_jmp2_count = 0;
4026 for(i = 0; i < nb_tbs; i++) {
4027 tb = &tbs[i];
4028 target_code_size += tb->size;
4029 if (tb->size > max_target_code_size)
4030 max_target_code_size = tb->size;
4031 if (tb->page_addr[1] != -1)
4032 cross_page++;
4033 if (tb->tb_next_offset[0] != 0xffff) {
4034 direct_jmp_count++;
4035 if (tb->tb_next_offset[1] != 0xffff) {
4036 direct_jmp2_count++;
4037 }
4038 }
4039 }
4040 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004041 cpu_fprintf(f, "Translation buffer state:\n");
bellard26a5f132008-05-28 12:30:31 +00004042 cpu_fprintf(f, "gen code size %ld/%ld\n",
4043 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4044 cpu_fprintf(f, "TB count %d/%d\n",
4045 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004046 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004047 nb_tbs ? target_code_size / nb_tbs : 0,
4048 max_target_code_size);
ths5fafdf22007-09-16 21:08:06 +00004049 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004050 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4051 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004052 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4053 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004054 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4055 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004056 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004057 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4058 direct_jmp2_count,
4059 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004060 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004061 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4062 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4063 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004064 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004065}
4066
bellard61382a52003-10-27 21:22:23 +00004067#define MMUSUFFIX _cmmu
4068#define GETPC() NULL
4069#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004070#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004071
4072#define SHIFT 0
4073#include "softmmu_template.h"
4074
4075#define SHIFT 1
4076#include "softmmu_template.h"
4077
4078#define SHIFT 2
4079#include "softmmu_template.h"
4080
4081#define SHIFT 3
4082#include "softmmu_template.h"
4083
4084#undef env
4085
4086#endif