blob: 1190591e936f488d2b2e3573c2be99f07854886d [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
bellard6180a182003-09-30 21:04:53 +000034#include "cpu.h"
35#include "exec-all.h"
aurel32ca10f862008-04-11 21:35:42 +000036#include "qemu-common.h"
bellardb67d9a52008-05-23 09:57:34 +000037#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000038#include "hw/hw.h"
aliguori74576192008-10-06 14:02:03 +000039#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000040#include "kvm.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
43#endif
bellard54936002003-05-13 00:25:15 +000044
bellardfd6ce8f2003-05-14 19:00:11 +000045//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000046//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000047//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000048//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000049
50/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000051//#define DEBUG_TB_CHECK
52//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000053
ths1196be32007-03-17 15:17:58 +000054//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000055//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000056
pbrook99773bd2006-04-16 15:14:59 +000057#if !defined(CONFIG_USER_ONLY)
58/* TB consistency checks only implemented for usermode emulation. */
59#undef DEBUG_TB_CHECK
60#endif
61
bellard9fa3e852004-01-04 18:06:42 +000062#define SMC_BITMAP_USE_THRESHOLD 10
63
bellard108c49b2005-07-24 12:55:09 +000064#if defined(TARGET_SPARC64)
65#define TARGET_PHYS_ADDR_SPACE_BITS 41
blueswir15dcb6b92007-05-19 12:58:30 +000066#elif defined(TARGET_SPARC)
67#define TARGET_PHYS_ADDR_SPACE_BITS 36
j_mayerbedb69e2007-04-05 20:08:21 +000068#elif defined(TARGET_ALPHA)
69#define TARGET_PHYS_ADDR_SPACE_BITS 42
70#define TARGET_VIRT_ADDR_SPACE_BITS 42
bellard108c49b2005-07-24 12:55:09 +000071#elif defined(TARGET_PPC64)
72#define TARGET_PHYS_ADDR_SPACE_BITS 42
Anthony Liguori4a1418e2009-08-10 17:07:24 -050073#elif defined(TARGET_X86_64)
aurel3200f82b82008-04-27 21:12:55 +000074#define TARGET_PHYS_ADDR_SPACE_BITS 42
Anthony Liguori4a1418e2009-08-10 17:07:24 -050075#elif defined(TARGET_I386)
aurel3200f82b82008-04-27 21:12:55 +000076#define TARGET_PHYS_ADDR_SPACE_BITS 36
bellard108c49b2005-07-24 12:55:09 +000077#else
bellard108c49b2005-07-24 12:55:09 +000078#define TARGET_PHYS_ADDR_SPACE_BITS 32
79#endif
80
blueswir1bdaf78e2008-10-04 07:24:27 +000081static TranslationBlock *tbs;
bellard26a5f132008-05-28 12:30:31 +000082int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000083TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000084static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000085/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050086spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000087
blueswir1141ac462008-07-26 15:05:57 +000088#if defined(__arm__) || defined(__sparc_v9__)
89/* The prologue must be reachable with a direct jump. ARM and Sparc64
90 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000091 section close to code segment. */
92#define code_gen_section \
93 __attribute__((__section__(".gen_code"))) \
94 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020095#elif defined(_WIN32)
96/* Maximum alignment for Win32 is 16. */
97#define code_gen_section \
98 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000099#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000107/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000108static unsigned long code_gen_buffer_max_size;
bellardfd6ce8f2003-05-14 19:00:11 +0000109uint8_t *code_gen_ptr;
110
pbrooke2eef172008-06-08 01:09:01 +0000111#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000112int phys_ram_fd;
bellard1ccde1c2004-02-06 19:46:14 +0000113uint8_t *phys_ram_dirty;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
116typedef struct RAMBlock {
117 uint8_t *host;
Anthony Liguoric227f092009-10-01 16:12:16 -0500118 ram_addr_t offset;
119 ram_addr_t length;
pbrook94a6b542009-04-11 17:15:54 +0000120 struct RAMBlock *next;
121} RAMBlock;
122
123static RAMBlock *ram_blocks;
124/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100125 then we can no longer assume contiguous ram offsets, and external uses
pbrook94a6b542009-04-11 17:15:54 +0000126 of this variable will break. */
Anthony Liguoric227f092009-10-01 16:12:16 -0500127ram_addr_t last_ram_offset;
pbrooke2eef172008-06-08 01:09:01 +0000128#endif
bellard9fa3e852004-01-04 18:06:42 +0000129
bellard6a00d602005-11-21 23:25:50 +0000130CPUState *first_cpu;
131/* current CPU in the current thread. It is only valid inside
132 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000133CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000134/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000135 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000136 2 = Adaptive rate instruction counting. */
137int use_icount = 0;
138/* Current instruction counter. While executing translated code this may
139 include some instructions that have not yet been executed. */
140int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000141
bellard54936002003-05-13 00:25:15 +0000142typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000143 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000144 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000145 /* in order to optimize self modifying code, we count the number
146 of lookups we do to a given page to use a bitmap */
147 unsigned int code_write_count;
148 uint8_t *code_bitmap;
149#if defined(CONFIG_USER_ONLY)
150 unsigned long flags;
151#endif
bellard54936002003-05-13 00:25:15 +0000152} PageDesc;
153
bellard92e873b2004-05-21 14:52:29 +0000154typedef struct PhysPageDesc {
pbrook0f459d12008-06-09 00:20:13 +0000155 /* offset in host memory of the page + io_index in the low bits */
Anthony Liguoric227f092009-10-01 16:12:16 -0500156 ram_addr_t phys_offset;
157 ram_addr_t region_offset;
bellard92e873b2004-05-21 14:52:29 +0000158} PhysPageDesc;
159
bellard54936002003-05-13 00:25:15 +0000160#define L2_BITS 10
j_mayerbedb69e2007-04-05 20:08:21 +0000161#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
162/* XXX: this is a temporary hack for alpha target.
163 * In the future, this is to be replaced by a multi-level table
164 * to actually be able to handle the complete 64 bits address space.
165 */
166#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
167#else
aurel3203875442008-04-22 20:45:18 +0000168#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
j_mayerbedb69e2007-04-05 20:08:21 +0000169#endif
bellard54936002003-05-13 00:25:15 +0000170
171#define L1_SIZE (1 << L1_BITS)
172#define L2_SIZE (1 << L2_BITS)
173
bellard83fb7ad2004-07-05 21:25:26 +0000174unsigned long qemu_real_host_page_size;
175unsigned long qemu_host_page_bits;
176unsigned long qemu_host_page_size;
177unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000178
bellard92e873b2004-05-21 14:52:29 +0000179/* XXX: for system emulation, it could just be an array */
bellard54936002003-05-13 00:25:15 +0000180static PageDesc *l1_map[L1_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +0000181static PhysPageDesc **l1_phys_map;
bellard54936002003-05-13 00:25:15 +0000182
pbrooke2eef172008-06-08 01:09:01 +0000183#if !defined(CONFIG_USER_ONLY)
184static void io_mem_init(void);
185
bellard33417e72003-08-10 21:47:01 +0000186/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000187CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
188CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000189void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000190static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000191static int io_mem_watch;
192#endif
bellard33417e72003-08-10 21:47:01 +0000193
bellard34865132003-10-05 14:28:56 +0000194/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200195#ifdef WIN32
196static const char *logfilename = "qemu.log";
197#else
blueswir1d9b630f2008-10-05 09:57:08 +0000198static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200199#endif
bellard34865132003-10-05 14:28:56 +0000200FILE *logfile;
201int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000202static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000203
bellarde3db7222005-01-26 22:00:47 +0000204/* statistics */
205static int tlb_flush_count;
206static int tb_flush_count;
207static int tb_phys_invalidate_count;
208
blueswir1db7b5422007-05-26 17:36:03 +0000209#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
Anthony Liguoric227f092009-10-01 16:12:16 -0500210typedef struct subpage_t {
211 target_phys_addr_t base;
Blue Swirld60efc62009-08-25 18:29:31 +0000212 CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
213 CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
blueswir13ee89922008-01-02 19:45:26 +0000214 void *opaque[TARGET_PAGE_SIZE][2][4];
Anthony Liguoric227f092009-10-01 16:12:16 -0500215 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
216} subpage_t;
blueswir1db7b5422007-05-26 17:36:03 +0000217
bellard7cb69ca2008-05-10 10:55:51 +0000218#ifdef _WIN32
219static void map_exec(void *addr, long size)
220{
221 DWORD old_protect;
222 VirtualProtect(addr, size,
223 PAGE_EXECUTE_READWRITE, &old_protect);
224
225}
226#else
227static void map_exec(void *addr, long size)
228{
bellard43694152008-05-29 09:35:57 +0000229 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000230
bellard43694152008-05-29 09:35:57 +0000231 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000232 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000233 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000234
235 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000236 end += page_size - 1;
237 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000238
239 mprotect((void *)start, end - start,
240 PROT_READ | PROT_WRITE | PROT_EXEC);
241}
242#endif
243
bellardb346ff42003-06-15 20:05:50 +0000244static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000245{
bellard83fb7ad2004-07-05 21:25:26 +0000246 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000247 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000248#ifdef _WIN32
249 {
250 SYSTEM_INFO system_info;
251
252 GetSystemInfo(&system_info);
253 qemu_real_host_page_size = system_info.dwPageSize;
254 }
255#else
256 qemu_real_host_page_size = getpagesize();
257#endif
bellard83fb7ad2004-07-05 21:25:26 +0000258 if (qemu_host_page_size == 0)
259 qemu_host_page_size = qemu_real_host_page_size;
260 if (qemu_host_page_size < TARGET_PAGE_SIZE)
261 qemu_host_page_size = TARGET_PAGE_SIZE;
262 qemu_host_page_bits = 0;
263 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
264 qemu_host_page_bits++;
265 qemu_host_page_mask = ~(qemu_host_page_size - 1);
bellard108c49b2005-07-24 12:55:09 +0000266 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
267 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
balrog50a95692007-12-12 01:16:23 +0000268
269#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
270 {
271 long long startaddr, endaddr;
272 FILE *f;
273 int n;
274
pbrookc8a706f2008-06-02 16:16:42 +0000275 mmap_lock();
pbrook07765902008-05-31 16:33:53 +0000276 last_brk = (unsigned long)sbrk(0);
balrog50a95692007-12-12 01:16:23 +0000277 f = fopen("/proc/self/maps", "r");
278 if (f) {
279 do {
280 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
281 if (n == 2) {
blueswir1e0b8d652008-05-03 17:51:24 +0000282 startaddr = MIN(startaddr,
283 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
284 endaddr = MIN(endaddr,
285 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
pbrookb5fc9092008-05-29 13:56:10 +0000286 page_set_flags(startaddr & TARGET_PAGE_MASK,
balrog50a95692007-12-12 01:16:23 +0000287 TARGET_PAGE_ALIGN(endaddr),
288 PAGE_RESERVED);
289 }
290 } while (!feof(f));
291 fclose(f);
292 }
pbrookc8a706f2008-06-02 16:16:42 +0000293 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000294 }
295#endif
bellard54936002003-05-13 00:25:15 +0000296}
297
aliguori434929b2008-09-15 15:56:30 +0000298static inline PageDesc **page_l1_map(target_ulong index)
bellard54936002003-05-13 00:25:15 +0000299{
pbrook17e23772008-06-09 13:47:45 +0000300#if TARGET_LONG_BITS > 32
301 /* Host memory outside guest VM. For 32-bit targets we have already
302 excluded high addresses. */
thsd8173e02008-08-29 13:10:00 +0000303 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
pbrook17e23772008-06-09 13:47:45 +0000304 return NULL;
305#endif
aliguori434929b2008-09-15 15:56:30 +0000306 return &l1_map[index >> L2_BITS];
307}
308
309static inline PageDesc *page_find_alloc(target_ulong index)
310{
311 PageDesc **lp, *p;
312 lp = page_l1_map(index);
313 if (!lp)
314 return NULL;
315
bellard54936002003-05-13 00:25:15 +0000316 p = *lp;
317 if (!p) {
318 /* allocate if not found */
pbrook17e23772008-06-09 13:47:45 +0000319#if defined(CONFIG_USER_ONLY)
pbrook17e23772008-06-09 13:47:45 +0000320 size_t len = sizeof(PageDesc) * L2_SIZE;
321 /* Don't use qemu_malloc because it may recurse. */
Blue Swirl660f11b2009-07-31 21:16:51 +0000322 p = mmap(NULL, len, PROT_READ | PROT_WRITE,
pbrook17e23772008-06-09 13:47:45 +0000323 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
bellard54936002003-05-13 00:25:15 +0000324 *lp = p;
aurel32fb1c2cd2008-12-08 18:12:26 +0000325 if (h2g_valid(p)) {
326 unsigned long addr = h2g(p);
pbrook17e23772008-06-09 13:47:45 +0000327 page_set_flags(addr & TARGET_PAGE_MASK,
328 TARGET_PAGE_ALIGN(addr + len),
329 PAGE_RESERVED);
330 }
331#else
332 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
333 *lp = p;
334#endif
bellard54936002003-05-13 00:25:15 +0000335 }
336 return p + (index & (L2_SIZE - 1));
337}
338
aurel3200f82b82008-04-27 21:12:55 +0000339static inline PageDesc *page_find(target_ulong index)
bellard54936002003-05-13 00:25:15 +0000340{
aliguori434929b2008-09-15 15:56:30 +0000341 PageDesc **lp, *p;
342 lp = page_l1_map(index);
343 if (!lp)
344 return NULL;
bellard54936002003-05-13 00:25:15 +0000345
aliguori434929b2008-09-15 15:56:30 +0000346 p = *lp;
Blue Swirl660f11b2009-07-31 21:16:51 +0000347 if (!p) {
348 return NULL;
349 }
bellardfd6ce8f2003-05-14 19:00:11 +0000350 return p + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000351}
352
Anthony Liguoric227f092009-10-01 16:12:16 -0500353static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000354{
bellard108c49b2005-07-24 12:55:09 +0000355 void **lp, **p;
pbrooke3f4e2a2006-04-08 20:02:06 +0000356 PhysPageDesc *pd;
bellard92e873b2004-05-21 14:52:29 +0000357
bellard108c49b2005-07-24 12:55:09 +0000358 p = (void **)l1_phys_map;
359#if TARGET_PHYS_ADDR_SPACE_BITS > 32
360
361#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
362#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
363#endif
364 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000365 p = *lp;
366 if (!p) {
367 /* allocate if not found */
bellard108c49b2005-07-24 12:55:09 +0000368 if (!alloc)
369 return NULL;
370 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
371 memset(p, 0, sizeof(void *) * L1_SIZE);
372 *lp = p;
373 }
374#endif
375 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
pbrooke3f4e2a2006-04-08 20:02:06 +0000376 pd = *lp;
377 if (!pd) {
378 int i;
bellard108c49b2005-07-24 12:55:09 +0000379 /* allocate if not found */
380 if (!alloc)
381 return NULL;
pbrooke3f4e2a2006-04-08 20:02:06 +0000382 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
383 *lp = pd;
pbrook67c4d232009-02-23 13:16:07 +0000384 for (i = 0; i < L2_SIZE; i++) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000385 pd[i].phys_offset = IO_MEM_UNASSIGNED;
pbrook67c4d232009-02-23 13:16:07 +0000386 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
387 }
bellard92e873b2004-05-21 14:52:29 +0000388 }
pbrooke3f4e2a2006-04-08 20:02:06 +0000389 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000390}
391
Anthony Liguoric227f092009-10-01 16:12:16 -0500392static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000393{
bellard108c49b2005-07-24 12:55:09 +0000394 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000395}
396
bellard9fa3e852004-01-04 18:06:42 +0000397#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500398static void tlb_protect_code(ram_addr_t ram_addr);
399static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000400 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000401#define mmap_lock() do { } while(0)
402#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000403#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000404
bellard43694152008-05-29 09:35:57 +0000405#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
406
407#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100408/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000409 user mode. It will change when a dedicated libc will be used */
410#define USE_STATIC_CODE_GEN_BUFFER
411#endif
412
413#ifdef USE_STATIC_CODE_GEN_BUFFER
414static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
415#endif
416
blueswir18fcd3692008-08-17 20:26:25 +0000417static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000418{
bellard43694152008-05-29 09:35:57 +0000419#ifdef USE_STATIC_CODE_GEN_BUFFER
420 code_gen_buffer = static_code_gen_buffer;
421 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
422 map_exec(code_gen_buffer, code_gen_buffer_size);
423#else
bellard26a5f132008-05-28 12:30:31 +0000424 code_gen_buffer_size = tb_size;
425 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000426#if defined(CONFIG_USER_ONLY)
427 /* in user mode, phys_ram_size is not meaningful */
428 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
429#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100430 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000431 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000432#endif
bellard26a5f132008-05-28 12:30:31 +0000433 }
434 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
435 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
436 /* The code gen buffer location may have constraints depending on
437 the host cpu and OS */
438#if defined(__linux__)
439 {
440 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000441 void *start = NULL;
442
bellard26a5f132008-05-28 12:30:31 +0000443 flags = MAP_PRIVATE | MAP_ANONYMOUS;
444#if defined(__x86_64__)
445 flags |= MAP_32BIT;
446 /* Cannot map more than that */
447 if (code_gen_buffer_size > (800 * 1024 * 1024))
448 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000449#elif defined(__sparc_v9__)
450 // Map the buffer below 2G, so we can use direct calls and branches
451 flags |= MAP_FIXED;
452 start = (void *) 0x60000000UL;
453 if (code_gen_buffer_size > (512 * 1024 * 1024))
454 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000455#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000456 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000457 flags |= MAP_FIXED;
458 start = (void *) 0x01000000UL;
459 if (code_gen_buffer_size > 16 * 1024 * 1024)
460 code_gen_buffer_size = 16 * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000461#endif
blueswir1141ac462008-07-26 15:05:57 +0000462 code_gen_buffer = mmap(start, code_gen_buffer_size,
463 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000464 flags, -1, 0);
465 if (code_gen_buffer == MAP_FAILED) {
466 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
467 exit(1);
468 }
469 }
Aurelien Jarnoa167ba52009-11-29 18:00:41 +0100470#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
aliguori06e67a82008-09-27 15:32:41 +0000471 {
472 int flags;
473 void *addr = NULL;
474 flags = MAP_PRIVATE | MAP_ANONYMOUS;
475#if defined(__x86_64__)
476 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
477 * 0x40000000 is free */
478 flags |= MAP_FIXED;
479 addr = (void *)0x40000000;
480 /* Cannot map more than that */
481 if (code_gen_buffer_size > (800 * 1024 * 1024))
482 code_gen_buffer_size = (800 * 1024 * 1024);
483#endif
484 code_gen_buffer = mmap(addr, code_gen_buffer_size,
485 PROT_WRITE | PROT_READ | PROT_EXEC,
486 flags, -1, 0);
487 if (code_gen_buffer == MAP_FAILED) {
488 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
489 exit(1);
490 }
491 }
bellard26a5f132008-05-28 12:30:31 +0000492#else
493 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000494 map_exec(code_gen_buffer, code_gen_buffer_size);
495#endif
bellard43694152008-05-29 09:35:57 +0000496#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000497 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
498 code_gen_buffer_max_size = code_gen_buffer_size -
499 code_gen_max_block_size();
500 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
501 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
502}
503
504/* Must be called before using the QEMU cpus. 'tb_size' is the size
505 (in bytes) allocated to the translation buffer. Zero means default
506 size. */
507void cpu_exec_init_all(unsigned long tb_size)
508{
bellard26a5f132008-05-28 12:30:31 +0000509 cpu_gen_init();
510 code_gen_alloc(tb_size);
511 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000512 page_init();
pbrooke2eef172008-06-08 01:09:01 +0000513#if !defined(CONFIG_USER_ONLY)
bellard26a5f132008-05-28 12:30:31 +0000514 io_mem_init();
pbrooke2eef172008-06-08 01:09:01 +0000515#endif
bellard26a5f132008-05-28 12:30:31 +0000516}
517
pbrook9656f322008-07-01 20:01:19 +0000518#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
519
Juan Quintelad4bfa4d2009-09-29 22:48:22 +0200520static void cpu_common_pre_save(void *opaque)
pbrook9656f322008-07-01 20:01:19 +0000521{
Juan Quintelad4bfa4d2009-09-29 22:48:22 +0200522 CPUState *env = opaque;
pbrook9656f322008-07-01 20:01:19 +0000523
Avi Kivity4c0960c2009-08-17 23:19:53 +0300524 cpu_synchronize_state(env);
pbrook9656f322008-07-01 20:01:19 +0000525}
526
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200527static int cpu_common_pre_load(void *opaque)
pbrook9656f322008-07-01 20:01:19 +0000528{
529 CPUState *env = opaque;
530
Avi Kivity4c0960c2009-08-17 23:19:53 +0300531 cpu_synchronize_state(env);
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200532 return 0;
533}
pbrook9656f322008-07-01 20:01:19 +0000534
Juan Quintelae59fb372009-09-29 22:48:21 +0200535static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200536{
537 CPUState *env = opaque;
538
aurel323098dba2009-03-07 21:28:24 +0000539 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
540 version_id is increased. */
541 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000542 tlb_flush(env, 1);
543
544 return 0;
545}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200546
547static const VMStateDescription vmstate_cpu_common = {
548 .name = "cpu_common",
549 .version_id = 1,
550 .minimum_version_id = 1,
551 .minimum_version_id_old = 1,
552 .pre_save = cpu_common_pre_save,
553 .pre_load = cpu_common_pre_load,
554 .post_load = cpu_common_post_load,
555 .fields = (VMStateField []) {
556 VMSTATE_UINT32(halted, CPUState),
557 VMSTATE_UINT32(interrupt_request, CPUState),
558 VMSTATE_END_OF_LIST()
559 }
560};
pbrook9656f322008-07-01 20:01:19 +0000561#endif
562
Glauber Costa950f1472009-06-09 12:15:18 -0400563CPUState *qemu_get_cpu(int cpu)
564{
565 CPUState *env = first_cpu;
566
567 while (env) {
568 if (env->cpu_index == cpu)
569 break;
570 env = env->next_cpu;
571 }
572
573 return env;
574}
575
bellard6a00d602005-11-21 23:25:50 +0000576void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000577{
bellard6a00d602005-11-21 23:25:50 +0000578 CPUState **penv;
579 int cpu_index;
580
pbrookc2764712009-03-07 15:24:59 +0000581#if defined(CONFIG_USER_ONLY)
582 cpu_list_lock();
583#endif
bellard6a00d602005-11-21 23:25:50 +0000584 env->next_cpu = NULL;
585 penv = &first_cpu;
586 cpu_index = 0;
587 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700588 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000589 cpu_index++;
590 }
591 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000592 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000593 QTAILQ_INIT(&env->breakpoints);
594 QTAILQ_INIT(&env->watchpoints);
bellard6a00d602005-11-21 23:25:50 +0000595 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000596#if defined(CONFIG_USER_ONLY)
597 cpu_list_unlock();
598#endif
pbrookb3c77242008-06-30 16:31:04 +0000599#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200600 vmstate_register(cpu_index, &vmstate_cpu_common, env);
pbrookb3c77242008-06-30 16:31:04 +0000601 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
602 cpu_save, cpu_load, env);
603#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000604}
605
bellard9fa3e852004-01-04 18:06:42 +0000606static inline void invalidate_page_bitmap(PageDesc *p)
607{
608 if (p->code_bitmap) {
bellard59817cc2004-02-16 22:01:13 +0000609 qemu_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000610 p->code_bitmap = NULL;
611 }
612 p->code_write_count = 0;
613}
614
bellardfd6ce8f2003-05-14 19:00:11 +0000615/* set to NULL all the 'first_tb' fields in all PageDescs */
616static void page_flush_tb(void)
617{
618 int i, j;
619 PageDesc *p;
620
621 for(i = 0; i < L1_SIZE; i++) {
622 p = l1_map[i];
623 if (p) {
bellard9fa3e852004-01-04 18:06:42 +0000624 for(j = 0; j < L2_SIZE; j++) {
625 p->first_tb = NULL;
626 invalidate_page_bitmap(p);
627 p++;
628 }
bellardfd6ce8f2003-05-14 19:00:11 +0000629 }
630 }
631}
632
633/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000634/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000635void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000636{
bellard6a00d602005-11-21 23:25:50 +0000637 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000638#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000639 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
640 (unsigned long)(code_gen_ptr - code_gen_buffer),
641 nb_tbs, nb_tbs > 0 ?
642 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000643#endif
bellard26a5f132008-05-28 12:30:31 +0000644 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000645 cpu_abort(env1, "Internal error: code buffer overflow\n");
646
bellardfd6ce8f2003-05-14 19:00:11 +0000647 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000648
bellard6a00d602005-11-21 23:25:50 +0000649 for(env = first_cpu; env != NULL; env = env->next_cpu) {
650 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
651 }
bellard9fa3e852004-01-04 18:06:42 +0000652
bellard8a8a6082004-10-03 13:36:49 +0000653 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000654 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000655
bellardfd6ce8f2003-05-14 19:00:11 +0000656 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000657 /* XXX: flush processor icache at this point if cache flush is
658 expensive */
bellarde3db7222005-01-26 22:00:47 +0000659 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000660}
661
662#ifdef DEBUG_TB_CHECK
663
j_mayerbc98a7e2007-04-04 07:55:12 +0000664static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000665{
666 TranslationBlock *tb;
667 int i;
668 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000669 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
670 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000671 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
672 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000673 printf("ERROR invalidate: address=" TARGET_FMT_lx
674 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000675 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000676 }
677 }
678 }
679}
680
681/* verify that all the pages have correct rights for code */
682static void tb_page_check(void)
683{
684 TranslationBlock *tb;
685 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000686
pbrook99773bd2006-04-16 15:14:59 +0000687 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
688 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000689 flags1 = page_get_flags(tb->pc);
690 flags2 = page_get_flags(tb->pc + tb->size - 1);
691 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
692 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000693 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000694 }
695 }
696 }
697}
698
699#endif
700
701/* invalidate one TB */
702static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
703 int next_offset)
704{
705 TranslationBlock *tb1;
706 for(;;) {
707 tb1 = *ptb;
708 if (tb1 == tb) {
709 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
710 break;
711 }
712 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
713 }
714}
715
bellard9fa3e852004-01-04 18:06:42 +0000716static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
717{
718 TranslationBlock *tb1;
719 unsigned int n1;
720
721 for(;;) {
722 tb1 = *ptb;
723 n1 = (long)tb1 & 3;
724 tb1 = (TranslationBlock *)((long)tb1 & ~3);
725 if (tb1 == tb) {
726 *ptb = tb1->page_next[n1];
727 break;
728 }
729 ptb = &tb1->page_next[n1];
730 }
731}
732
bellardd4e81642003-05-25 16:46:15 +0000733static inline void tb_jmp_remove(TranslationBlock *tb, int n)
734{
735 TranslationBlock *tb1, **ptb;
736 unsigned int n1;
737
738 ptb = &tb->jmp_next[n];
739 tb1 = *ptb;
740 if (tb1) {
741 /* find tb(n) in circular list */
742 for(;;) {
743 tb1 = *ptb;
744 n1 = (long)tb1 & 3;
745 tb1 = (TranslationBlock *)((long)tb1 & ~3);
746 if (n1 == n && tb1 == tb)
747 break;
748 if (n1 == 2) {
749 ptb = &tb1->jmp_first;
750 } else {
751 ptb = &tb1->jmp_next[n1];
752 }
753 }
754 /* now we can suppress tb(n) from the list */
755 *ptb = tb->jmp_next[n];
756
757 tb->jmp_next[n] = NULL;
758 }
759}
760
761/* reset the jump entry 'n' of a TB so that it is not chained to
762 another TB */
763static inline void tb_reset_jump(TranslationBlock *tb, int n)
764{
765 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
766}
767
pbrook2e70f6e2008-06-29 01:03:05 +0000768void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000769{
bellard6a00d602005-11-21 23:25:50 +0000770 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000771 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000772 unsigned int h, n1;
Anthony Liguoric227f092009-10-01 16:12:16 -0500773 target_phys_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000774 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000775
bellard9fa3e852004-01-04 18:06:42 +0000776 /* remove the TB from the hash list */
777 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
778 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000779 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000780 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000781
bellard9fa3e852004-01-04 18:06:42 +0000782 /* remove the TB from the page list */
783 if (tb->page_addr[0] != page_addr) {
784 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
785 tb_page_remove(&p->first_tb, tb);
786 invalidate_page_bitmap(p);
787 }
788 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
789 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
790 tb_page_remove(&p->first_tb, tb);
791 invalidate_page_bitmap(p);
792 }
793
bellard8a40a182005-11-20 10:35:40 +0000794 tb_invalidated_flag = 1;
795
796 /* remove the TB from the hash list */
797 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000798 for(env = first_cpu; env != NULL; env = env->next_cpu) {
799 if (env->tb_jmp_cache[h] == tb)
800 env->tb_jmp_cache[h] = NULL;
801 }
bellard8a40a182005-11-20 10:35:40 +0000802
803 /* suppress this TB from the two jump lists */
804 tb_jmp_remove(tb, 0);
805 tb_jmp_remove(tb, 1);
806
807 /* suppress any remaining jumps to this TB */
808 tb1 = tb->jmp_first;
809 for(;;) {
810 n1 = (long)tb1 & 3;
811 if (n1 == 2)
812 break;
813 tb1 = (TranslationBlock *)((long)tb1 & ~3);
814 tb2 = tb1->jmp_next[n1];
815 tb_reset_jump(tb1, n1);
816 tb1->jmp_next[n1] = NULL;
817 tb1 = tb2;
818 }
819 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
820
bellarde3db7222005-01-26 22:00:47 +0000821 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000822}
823
824static inline void set_bits(uint8_t *tab, int start, int len)
825{
826 int end, mask, end1;
827
828 end = start + len;
829 tab += start >> 3;
830 mask = 0xff << (start & 7);
831 if ((start & ~7) == (end & ~7)) {
832 if (start < end) {
833 mask &= ~(0xff << (end & 7));
834 *tab |= mask;
835 }
836 } else {
837 *tab++ |= mask;
838 start = (start + 8) & ~7;
839 end1 = end & ~7;
840 while (start < end1) {
841 *tab++ = 0xff;
842 start += 8;
843 }
844 if (start < end) {
845 mask = ~(0xff << (end & 7));
846 *tab |= mask;
847 }
848 }
849}
850
851static void build_page_bitmap(PageDesc *p)
852{
853 int n, tb_start, tb_end;
854 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000855
pbrookb2a70812008-06-09 13:57:23 +0000856 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000857
858 tb = p->first_tb;
859 while (tb != NULL) {
860 n = (long)tb & 3;
861 tb = (TranslationBlock *)((long)tb & ~3);
862 /* NOTE: this is subtle as a TB may span two physical pages */
863 if (n == 0) {
864 /* NOTE: tb_end may be after the end of the page, but
865 it is not a problem */
866 tb_start = tb->pc & ~TARGET_PAGE_MASK;
867 tb_end = tb_start + tb->size;
868 if (tb_end > TARGET_PAGE_SIZE)
869 tb_end = TARGET_PAGE_SIZE;
870 } else {
871 tb_start = 0;
872 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
873 }
874 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
875 tb = tb->page_next[n];
876 }
877}
878
pbrook2e70f6e2008-06-29 01:03:05 +0000879TranslationBlock *tb_gen_code(CPUState *env,
880 target_ulong pc, target_ulong cs_base,
881 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000882{
883 TranslationBlock *tb;
884 uint8_t *tc_ptr;
885 target_ulong phys_pc, phys_page2, virt_page2;
886 int code_gen_size;
887
bellardc27004e2005-01-03 23:35:10 +0000888 phys_pc = get_phys_addr_code(env, pc);
889 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000890 if (!tb) {
891 /* flush must be done */
892 tb_flush(env);
893 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000894 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000895 /* Don't forget to invalidate previous TB info. */
896 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000897 }
898 tc_ptr = code_gen_ptr;
899 tb->tc_ptr = tc_ptr;
900 tb->cs_base = cs_base;
901 tb->flags = flags;
902 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +0000903 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +0000904 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +0000905
bellardd720b932004-04-25 17:57:43 +0000906 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +0000907 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +0000908 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +0000909 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
bellardd720b932004-04-25 17:57:43 +0000910 phys_page2 = get_phys_addr_code(env, virt_page2);
911 }
912 tb_link_phys(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +0000913 return tb;
bellardd720b932004-04-25 17:57:43 +0000914}
ths3b46e622007-09-17 08:09:54 +0000915
bellard9fa3e852004-01-04 18:06:42 +0000916/* invalidate all TBs which intersect with the target physical page
917 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +0000918 the same physical page. 'is_cpu_write_access' should be true if called
919 from a real cpu write access: the virtual CPU will exit the current
920 TB if code is modified inside this TB. */
Anthony Liguoric227f092009-10-01 16:12:16 -0500921void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
bellardd720b932004-04-25 17:57:43 +0000922 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +0000923{
aliguori6b917542008-11-18 19:46:41 +0000924 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +0000925 CPUState *env = cpu_single_env;
bellard9fa3e852004-01-04 18:06:42 +0000926 target_ulong tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +0000927 PageDesc *p;
928 int n;
929#ifdef TARGET_HAS_PRECISE_SMC
930 int current_tb_not_found = is_cpu_write_access;
931 TranslationBlock *current_tb = NULL;
932 int current_tb_modified = 0;
933 target_ulong current_pc = 0;
934 target_ulong current_cs_base = 0;
935 int current_flags = 0;
936#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +0000937
938 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +0000939 if (!p)
bellard9fa3e852004-01-04 18:06:42 +0000940 return;
ths5fafdf22007-09-16 21:08:06 +0000941 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +0000942 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
943 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +0000944 /* build code bitmap */
945 build_page_bitmap(p);
946 }
947
948 /* we remove all the TBs in the range [start, end[ */
949 /* XXX: see if in some cases it could be faster to invalidate all the code */
950 tb = p->first_tb;
951 while (tb != NULL) {
952 n = (long)tb & 3;
953 tb = (TranslationBlock *)((long)tb & ~3);
954 tb_next = tb->page_next[n];
955 /* NOTE: this is subtle as a TB may span two physical pages */
956 if (n == 0) {
957 /* NOTE: tb_end may be after the end of the page, but
958 it is not a problem */
959 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
960 tb_end = tb_start + tb->size;
961 } else {
962 tb_start = tb->page_addr[1];
963 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
964 }
965 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +0000966#ifdef TARGET_HAS_PRECISE_SMC
967 if (current_tb_not_found) {
968 current_tb_not_found = 0;
969 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +0000970 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +0000971 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +0000972 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +0000973 }
974 }
975 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +0000976 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +0000977 /* If we are modifying the current TB, we must stop
978 its execution. We could be more precise by checking
979 that the modification is after the current PC, but it
980 would require a specialized function to partially
981 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +0000982
bellardd720b932004-04-25 17:57:43 +0000983 current_tb_modified = 1;
ths5fafdf22007-09-16 21:08:06 +0000984 cpu_restore_state(current_tb, env,
pbrook2e70f6e2008-06-29 01:03:05 +0000985 env->mem_io_pc, NULL);
aliguori6b917542008-11-18 19:46:41 +0000986 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
987 &current_flags);
bellardd720b932004-04-25 17:57:43 +0000988 }
989#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +0000990 /* we need to do that to handle the case where a signal
991 occurs while doing tb_phys_invalidate() */
992 saved_tb = NULL;
993 if (env) {
994 saved_tb = env->current_tb;
995 env->current_tb = NULL;
996 }
bellard9fa3e852004-01-04 18:06:42 +0000997 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +0000998 if (env) {
999 env->current_tb = saved_tb;
1000 if (env->interrupt_request && env->current_tb)
1001 cpu_interrupt(env, env->interrupt_request);
1002 }
bellard9fa3e852004-01-04 18:06:42 +00001003 }
1004 tb = tb_next;
1005 }
1006#if !defined(CONFIG_USER_ONLY)
1007 /* if no code remaining, no need to continue to use slow writes */
1008 if (!p->first_tb) {
1009 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001010 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001011 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001012 }
1013 }
1014#endif
1015#ifdef TARGET_HAS_PRECISE_SMC
1016 if (current_tb_modified) {
1017 /* we generate a block containing just the instruction
1018 modifying the memory. It will ensure that it cannot modify
1019 itself */
bellardea1c1802004-06-14 18:56:36 +00001020 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001021 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001022 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001023 }
1024#endif
1025}
1026
1027/* len must be <= 8 and start must be a multiple of len */
Anthony Liguoric227f092009-10-01 16:12:16 -05001028static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001029{
1030 PageDesc *p;
1031 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001032#if 0
bellarda4193c82004-06-03 14:01:43 +00001033 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001034 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1035 cpu_single_env->mem_io_vaddr, len,
1036 cpu_single_env->eip,
1037 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001038 }
1039#endif
bellard9fa3e852004-01-04 18:06:42 +00001040 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001041 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001042 return;
1043 if (p->code_bitmap) {
1044 offset = start & ~TARGET_PAGE_MASK;
1045 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1046 if (b & ((1 << len) - 1))
1047 goto do_invalidate;
1048 } else {
1049 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001050 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001051 }
1052}
1053
bellard9fa3e852004-01-04 18:06:42 +00001054#if !defined(CONFIG_SOFTMMU)
Anthony Liguoric227f092009-10-01 16:12:16 -05001055static void tb_invalidate_phys_page(target_phys_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001056 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001057{
aliguori6b917542008-11-18 19:46:41 +00001058 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001059 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001060 int n;
bellardd720b932004-04-25 17:57:43 +00001061#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001062 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001063 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001064 int current_tb_modified = 0;
1065 target_ulong current_pc = 0;
1066 target_ulong current_cs_base = 0;
1067 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001068#endif
bellard9fa3e852004-01-04 18:06:42 +00001069
1070 addr &= TARGET_PAGE_MASK;
1071 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001072 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001073 return;
1074 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001075#ifdef TARGET_HAS_PRECISE_SMC
1076 if (tb && pc != 0) {
1077 current_tb = tb_find_pc(pc);
1078 }
1079#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001080 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001081 n = (long)tb & 3;
1082 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001083#ifdef TARGET_HAS_PRECISE_SMC
1084 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001085 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001086 /* If we are modifying the current TB, we must stop
1087 its execution. We could be more precise by checking
1088 that the modification is after the current PC, but it
1089 would require a specialized function to partially
1090 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001091
bellardd720b932004-04-25 17:57:43 +00001092 current_tb_modified = 1;
1093 cpu_restore_state(current_tb, env, pc, puc);
aliguori6b917542008-11-18 19:46:41 +00001094 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1095 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001096 }
1097#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001098 tb_phys_invalidate(tb, addr);
1099 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001100 }
1101 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001102#ifdef TARGET_HAS_PRECISE_SMC
1103 if (current_tb_modified) {
1104 /* we generate a block containing just the instruction
1105 modifying the memory. It will ensure that it cannot modify
1106 itself */
bellardea1c1802004-06-14 18:56:36 +00001107 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001108 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001109 cpu_resume_from_signal(env, puc);
1110 }
1111#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001112}
bellard9fa3e852004-01-04 18:06:42 +00001113#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001114
1115/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001116static inline void tb_alloc_page(TranslationBlock *tb,
pbrook53a59602006-03-25 19:31:22 +00001117 unsigned int n, target_ulong page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001118{
1119 PageDesc *p;
bellard9fa3e852004-01-04 18:06:42 +00001120 TranslationBlock *last_first_tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001121
bellard9fa3e852004-01-04 18:06:42 +00001122 tb->page_addr[n] = page_addr;
bellard3a7d9292005-08-21 09:26:42 +00001123 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00001124 tb->page_next[n] = p->first_tb;
1125 last_first_tb = p->first_tb;
1126 p->first_tb = (TranslationBlock *)((long)tb | n);
1127 invalidate_page_bitmap(p);
1128
bellard107db442004-06-22 18:48:46 +00001129#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001130
bellard9fa3e852004-01-04 18:06:42 +00001131#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001132 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001133 target_ulong addr;
1134 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001135 int prot;
1136
bellardfd6ce8f2003-05-14 19:00:11 +00001137 /* force the host page as non writable (writes will have a
1138 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001139 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001140 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001141 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1142 addr += TARGET_PAGE_SIZE) {
1143
1144 p2 = page_find (addr >> TARGET_PAGE_BITS);
1145 if (!p2)
1146 continue;
1147 prot |= p2->flags;
1148 p2->flags &= ~PAGE_WRITE;
1149 page_get_flags(addr);
1150 }
ths5fafdf22007-09-16 21:08:06 +00001151 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001152 (prot & PAGE_BITS) & ~PAGE_WRITE);
1153#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001154 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001155 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001156#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001157 }
bellard9fa3e852004-01-04 18:06:42 +00001158#else
1159 /* if some code is already present, then the pages are already
1160 protected. So we handle the case where only the first TB is
1161 allocated in a physical page */
1162 if (!last_first_tb) {
bellard6a00d602005-11-21 23:25:50 +00001163 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001164 }
1165#endif
bellardd720b932004-04-25 17:57:43 +00001166
1167#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001168}
1169
1170/* Allocate a new translation block. Flush the translation buffer if
1171 too many translation blocks or too much generated code. */
bellardc27004e2005-01-03 23:35:10 +00001172TranslationBlock *tb_alloc(target_ulong pc)
bellardfd6ce8f2003-05-14 19:00:11 +00001173{
1174 TranslationBlock *tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001175
bellard26a5f132008-05-28 12:30:31 +00001176 if (nb_tbs >= code_gen_max_blocks ||
1177 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
bellardd4e81642003-05-25 16:46:15 +00001178 return NULL;
bellardfd6ce8f2003-05-14 19:00:11 +00001179 tb = &tbs[nb_tbs++];
1180 tb->pc = pc;
bellardb448f2f2004-02-25 23:24:04 +00001181 tb->cflags = 0;
bellardd4e81642003-05-25 16:46:15 +00001182 return tb;
1183}
1184
pbrook2e70f6e2008-06-29 01:03:05 +00001185void tb_free(TranslationBlock *tb)
1186{
thsbf20dc02008-06-30 17:22:19 +00001187 /* In practice this is mostly used for single use temporary TB
pbrook2e70f6e2008-06-29 01:03:05 +00001188 Ignore the hard cases and just back up if this TB happens to
1189 be the last one generated. */
1190 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1191 code_gen_ptr = tb->tc_ptr;
1192 nb_tbs--;
1193 }
1194}
1195
bellard9fa3e852004-01-04 18:06:42 +00001196/* add a new TB and link it to the physical page tables. phys_page2 is
1197 (-1) to indicate that only one page contains the TB. */
ths5fafdf22007-09-16 21:08:06 +00001198void tb_link_phys(TranslationBlock *tb,
bellard9fa3e852004-01-04 18:06:42 +00001199 target_ulong phys_pc, target_ulong phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001200{
bellard9fa3e852004-01-04 18:06:42 +00001201 unsigned int h;
1202 TranslationBlock **ptb;
1203
pbrookc8a706f2008-06-02 16:16:42 +00001204 /* Grab the mmap lock to stop another thread invalidating this TB
1205 before we are done. */
1206 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001207 /* add in the physical hash table */
1208 h = tb_phys_hash_func(phys_pc);
1209 ptb = &tb_phys_hash[h];
1210 tb->phys_hash_next = *ptb;
1211 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001212
1213 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001214 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1215 if (phys_page2 != -1)
1216 tb_alloc_page(tb, 1, phys_page2);
1217 else
1218 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001219
bellardd4e81642003-05-25 16:46:15 +00001220 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1221 tb->jmp_next[0] = NULL;
1222 tb->jmp_next[1] = NULL;
1223
1224 /* init original jump addresses */
1225 if (tb->tb_next_offset[0] != 0xffff)
1226 tb_reset_jump(tb, 0);
1227 if (tb->tb_next_offset[1] != 0xffff)
1228 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001229
1230#ifdef DEBUG_TB_CHECK
1231 tb_page_check();
1232#endif
pbrookc8a706f2008-06-02 16:16:42 +00001233 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001234}
1235
bellarda513fe12003-05-27 23:29:48 +00001236/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1237 tb[1].tc_ptr. Return NULL if not found */
1238TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1239{
1240 int m_min, m_max, m;
1241 unsigned long v;
1242 TranslationBlock *tb;
1243
1244 if (nb_tbs <= 0)
1245 return NULL;
1246 if (tc_ptr < (unsigned long)code_gen_buffer ||
1247 tc_ptr >= (unsigned long)code_gen_ptr)
1248 return NULL;
1249 /* binary search (cf Knuth) */
1250 m_min = 0;
1251 m_max = nb_tbs - 1;
1252 while (m_min <= m_max) {
1253 m = (m_min + m_max) >> 1;
1254 tb = &tbs[m];
1255 v = (unsigned long)tb->tc_ptr;
1256 if (v == tc_ptr)
1257 return tb;
1258 else if (tc_ptr < v) {
1259 m_max = m - 1;
1260 } else {
1261 m_min = m + 1;
1262 }
ths5fafdf22007-09-16 21:08:06 +00001263 }
bellarda513fe12003-05-27 23:29:48 +00001264 return &tbs[m_max];
1265}
bellard75012672003-06-21 13:11:07 +00001266
bellardea041c02003-06-25 16:16:50 +00001267static void tb_reset_jump_recursive(TranslationBlock *tb);
1268
1269static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1270{
1271 TranslationBlock *tb1, *tb_next, **ptb;
1272 unsigned int n1;
1273
1274 tb1 = tb->jmp_next[n];
1275 if (tb1 != NULL) {
1276 /* find head of list */
1277 for(;;) {
1278 n1 = (long)tb1 & 3;
1279 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1280 if (n1 == 2)
1281 break;
1282 tb1 = tb1->jmp_next[n1];
1283 }
1284 /* we are now sure now that tb jumps to tb1 */
1285 tb_next = tb1;
1286
1287 /* remove tb from the jmp_first list */
1288 ptb = &tb_next->jmp_first;
1289 for(;;) {
1290 tb1 = *ptb;
1291 n1 = (long)tb1 & 3;
1292 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1293 if (n1 == n && tb1 == tb)
1294 break;
1295 ptb = &tb1->jmp_next[n1];
1296 }
1297 *ptb = tb->jmp_next[n];
1298 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001299
bellardea041c02003-06-25 16:16:50 +00001300 /* suppress the jump to next tb in generated code */
1301 tb_reset_jump(tb, n);
1302
bellard01243112004-01-04 15:48:17 +00001303 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001304 tb_reset_jump_recursive(tb_next);
1305 }
1306}
1307
1308static void tb_reset_jump_recursive(TranslationBlock *tb)
1309{
1310 tb_reset_jump_recursive2(tb, 0);
1311 tb_reset_jump_recursive2(tb, 1);
1312}
1313
bellard1fddef42005-04-17 19:16:13 +00001314#if defined(TARGET_HAS_ICE)
bellardd720b932004-04-25 17:57:43 +00001315static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1316{
Anthony Liguoric227f092009-10-01 16:12:16 -05001317 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001318 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001319 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001320 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001321
pbrookc2f07f82006-04-08 17:14:56 +00001322 addr = cpu_get_phys_page_debug(env, pc);
1323 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1324 if (!p) {
1325 pd = IO_MEM_UNASSIGNED;
1326 } else {
1327 pd = p->phys_offset;
1328 }
1329 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001330 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001331}
bellardc27004e2005-01-03 23:35:10 +00001332#endif
bellardd720b932004-04-25 17:57:43 +00001333
pbrook6658ffb2007-03-16 23:58:11 +00001334/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001335int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1336 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001337{
aliguorib4051332008-11-18 20:14:20 +00001338 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001339 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001340
aliguorib4051332008-11-18 20:14:20 +00001341 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1342 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1343 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1344 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1345 return -EINVAL;
1346 }
aliguoria1d1bb32008-11-18 20:07:32 +00001347 wp = qemu_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001348
aliguoria1d1bb32008-11-18 20:07:32 +00001349 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001350 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001351 wp->flags = flags;
1352
aliguori2dc9f412008-11-18 20:56:59 +00001353 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001354 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001355 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001356 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001357 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001358
pbrook6658ffb2007-03-16 23:58:11 +00001359 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001360
1361 if (watchpoint)
1362 *watchpoint = wp;
1363 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001364}
1365
aliguoria1d1bb32008-11-18 20:07:32 +00001366/* Remove a specific watchpoint. */
1367int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1368 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001369{
aliguorib4051332008-11-18 20:14:20 +00001370 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001371 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001372
Blue Swirl72cf2d42009-09-12 07:36:22 +00001373 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001374 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001375 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001376 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001377 return 0;
1378 }
1379 }
aliguoria1d1bb32008-11-18 20:07:32 +00001380 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001381}
1382
aliguoria1d1bb32008-11-18 20:07:32 +00001383/* Remove a specific watchpoint by reference. */
1384void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1385{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001386 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001387
aliguoria1d1bb32008-11-18 20:07:32 +00001388 tlb_flush_page(env, watchpoint->vaddr);
1389
1390 qemu_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001391}
1392
aliguoria1d1bb32008-11-18 20:07:32 +00001393/* Remove all matching watchpoints. */
1394void cpu_watchpoint_remove_all(CPUState *env, int mask)
1395{
aliguoric0ce9982008-11-25 22:13:57 +00001396 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001397
Blue Swirl72cf2d42009-09-12 07:36:22 +00001398 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001399 if (wp->flags & mask)
1400 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001401 }
aliguoria1d1bb32008-11-18 20:07:32 +00001402}
1403
1404/* Add a breakpoint. */
1405int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1406 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001407{
bellard1fddef42005-04-17 19:16:13 +00001408#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001409 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001410
aliguoria1d1bb32008-11-18 20:07:32 +00001411 bp = qemu_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001412
1413 bp->pc = pc;
1414 bp->flags = flags;
1415
aliguori2dc9f412008-11-18 20:56:59 +00001416 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001417 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001418 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001419 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001420 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001421
1422 breakpoint_invalidate(env, pc);
1423
1424 if (breakpoint)
1425 *breakpoint = bp;
1426 return 0;
1427#else
1428 return -ENOSYS;
1429#endif
1430}
1431
1432/* Remove a specific breakpoint. */
1433int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1434{
1435#if defined(TARGET_HAS_ICE)
1436 CPUBreakpoint *bp;
1437
Blue Swirl72cf2d42009-09-12 07:36:22 +00001438 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001439 if (bp->pc == pc && bp->flags == flags) {
1440 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001441 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001442 }
bellard4c3a88a2003-07-26 12:06:08 +00001443 }
aliguoria1d1bb32008-11-18 20:07:32 +00001444 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001445#else
aliguoria1d1bb32008-11-18 20:07:32 +00001446 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001447#endif
1448}
1449
aliguoria1d1bb32008-11-18 20:07:32 +00001450/* Remove a specific breakpoint by reference. */
1451void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001452{
bellard1fddef42005-04-17 19:16:13 +00001453#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001454 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001455
aliguoria1d1bb32008-11-18 20:07:32 +00001456 breakpoint_invalidate(env, breakpoint->pc);
1457
1458 qemu_free(breakpoint);
1459#endif
1460}
1461
1462/* Remove all matching breakpoints. */
1463void cpu_breakpoint_remove_all(CPUState *env, int mask)
1464{
1465#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001466 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001467
Blue Swirl72cf2d42009-09-12 07:36:22 +00001468 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001469 if (bp->flags & mask)
1470 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001471 }
bellard4c3a88a2003-07-26 12:06:08 +00001472#endif
1473}
1474
bellardc33a3462003-07-29 20:50:33 +00001475/* enable or disable single step mode. EXCP_DEBUG is returned by the
1476 CPU loop after each instruction */
1477void cpu_single_step(CPUState *env, int enabled)
1478{
bellard1fddef42005-04-17 19:16:13 +00001479#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001480 if (env->singlestep_enabled != enabled) {
1481 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001482 if (kvm_enabled())
1483 kvm_update_guest_debug(env, 0);
1484 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001485 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001486 /* XXX: only flush what is necessary */
1487 tb_flush(env);
1488 }
bellardc33a3462003-07-29 20:50:33 +00001489 }
1490#endif
1491}
1492
bellard34865132003-10-05 14:28:56 +00001493/* enable or disable low levels log */
1494void cpu_set_log(int log_flags)
1495{
1496 loglevel = log_flags;
1497 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001498 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001499 if (!logfile) {
1500 perror(logfilename);
1501 _exit(1);
1502 }
bellard9fa3e852004-01-04 18:06:42 +00001503#if !defined(CONFIG_SOFTMMU)
1504 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1505 {
blueswir1b55266b2008-09-20 08:07:15 +00001506 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001507 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1508 }
Filip Navarabf65f532009-07-27 10:02:04 -05001509#elif !defined(_WIN32)
1510 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001511 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001512#endif
pbrooke735b912007-06-30 13:53:24 +00001513 log_append = 1;
1514 }
1515 if (!loglevel && logfile) {
1516 fclose(logfile);
1517 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001518 }
1519}
1520
1521void cpu_set_log_filename(const char *filename)
1522{
1523 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001524 if (logfile) {
1525 fclose(logfile);
1526 logfile = NULL;
1527 }
1528 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001529}
bellardc33a3462003-07-29 20:50:33 +00001530
aurel323098dba2009-03-07 21:28:24 +00001531static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001532{
pbrookd5975362008-06-07 20:50:51 +00001533 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1534 problem and hope the cpu will stop of its own accord. For userspace
1535 emulation this often isn't actually as bad as it sounds. Often
1536 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001537 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001538 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001539
1540 tb = env->current_tb;
1541 /* if the cpu is currently executing code, we must unlink it and
1542 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001543 if (tb) {
1544 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001545 env->current_tb = NULL;
1546 tb_reset_jump_recursive(tb);
Riku Voipiof76cfe52009-12-04 15:16:30 +02001547 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001548 }
aurel323098dba2009-03-07 21:28:24 +00001549}
1550
1551/* mask must never be zero, except for A20 change call */
1552void cpu_interrupt(CPUState *env, int mask)
1553{
1554 int old_mask;
1555
1556 old_mask = env->interrupt_request;
1557 env->interrupt_request |= mask;
1558
aliguori8edac962009-04-24 18:03:45 +00001559#ifndef CONFIG_USER_ONLY
1560 /*
1561 * If called from iothread context, wake the target cpu in
1562 * case its halted.
1563 */
1564 if (!qemu_cpu_self(env)) {
1565 qemu_cpu_kick(env);
1566 return;
1567 }
1568#endif
1569
pbrook2e70f6e2008-06-29 01:03:05 +00001570 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001571 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001572#ifndef CONFIG_USER_ONLY
pbrook2e70f6e2008-06-29 01:03:05 +00001573 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001574 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001575 cpu_abort(env, "Raised interrupt while not in I/O function");
1576 }
1577#endif
1578 } else {
aurel323098dba2009-03-07 21:28:24 +00001579 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001580 }
1581}
1582
bellardb54ad042004-05-20 13:42:52 +00001583void cpu_reset_interrupt(CPUState *env, int mask)
1584{
1585 env->interrupt_request &= ~mask;
1586}
1587
aurel323098dba2009-03-07 21:28:24 +00001588void cpu_exit(CPUState *env)
1589{
1590 env->exit_request = 1;
1591 cpu_unlink_tb(env);
1592}
1593
blueswir1c7cd6a32008-10-02 18:27:46 +00001594const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001595 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001596 "show generated host assembly code for each compiled TB" },
1597 { CPU_LOG_TB_IN_ASM, "in_asm",
1598 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001599 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001600 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001601 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001602 "show micro ops "
1603#ifdef TARGET_I386
1604 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001605#endif
blueswir1e01a1152008-03-14 17:37:11 +00001606 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001607 { CPU_LOG_INT, "int",
1608 "show interrupts/exceptions in short format" },
1609 { CPU_LOG_EXEC, "exec",
1610 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001611 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001612 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001613#ifdef TARGET_I386
1614 { CPU_LOG_PCALL, "pcall",
1615 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001616 { CPU_LOG_RESET, "cpu_reset",
1617 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001618#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001619#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001620 { CPU_LOG_IOPORT, "ioport",
1621 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001622#endif
bellardf193c792004-03-21 17:06:25 +00001623 { 0, NULL, NULL },
1624};
1625
1626static int cmp1(const char *s1, int n, const char *s2)
1627{
1628 if (strlen(s2) != n)
1629 return 0;
1630 return memcmp(s1, s2, n) == 0;
1631}
ths3b46e622007-09-17 08:09:54 +00001632
bellardf193c792004-03-21 17:06:25 +00001633/* takes a comma separated list of log masks. Return 0 if error. */
1634int cpu_str_to_log_mask(const char *str)
1635{
blueswir1c7cd6a32008-10-02 18:27:46 +00001636 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001637 int mask;
1638 const char *p, *p1;
1639
1640 p = str;
1641 mask = 0;
1642 for(;;) {
1643 p1 = strchr(p, ',');
1644 if (!p1)
1645 p1 = p + strlen(p);
bellard8e3a9fd2004-10-09 17:32:58 +00001646 if(cmp1(p,p1-p,"all")) {
1647 for(item = cpu_log_items; item->mask != 0; item++) {
1648 mask |= item->mask;
1649 }
1650 } else {
bellardf193c792004-03-21 17:06:25 +00001651 for(item = cpu_log_items; item->mask != 0; item++) {
1652 if (cmp1(p, p1 - p, item->name))
1653 goto found;
1654 }
1655 return 0;
bellard8e3a9fd2004-10-09 17:32:58 +00001656 }
bellardf193c792004-03-21 17:06:25 +00001657 found:
1658 mask |= item->mask;
1659 if (*p1 != ',')
1660 break;
1661 p = p1 + 1;
1662 }
1663 return mask;
1664}
bellardea041c02003-06-25 16:16:50 +00001665
bellard75012672003-06-21 13:11:07 +00001666void cpu_abort(CPUState *env, const char *fmt, ...)
1667{
1668 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001669 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001670
1671 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001672 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001673 fprintf(stderr, "qemu: fatal: ");
1674 vfprintf(stderr, fmt, ap);
1675 fprintf(stderr, "\n");
1676#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001677 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1678#else
1679 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001680#endif
aliguori93fcfe32009-01-15 22:34:14 +00001681 if (qemu_log_enabled()) {
1682 qemu_log("qemu: fatal: ");
1683 qemu_log_vprintf(fmt, ap2);
1684 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001685#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001686 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001687#else
aliguori93fcfe32009-01-15 22:34:14 +00001688 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001689#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001690 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001691 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001692 }
pbrook493ae1f2007-11-23 16:53:59 +00001693 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001694 va_end(ap);
bellard75012672003-06-21 13:11:07 +00001695 abort();
1696}
1697
thsc5be9f02007-02-28 20:20:53 +00001698CPUState *cpu_copy(CPUState *env)
1699{
ths01ba9812007-12-09 02:22:57 +00001700 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001701 CPUState *next_cpu = new_env->next_cpu;
1702 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001703#if defined(TARGET_HAS_ICE)
1704 CPUBreakpoint *bp;
1705 CPUWatchpoint *wp;
1706#endif
1707
thsc5be9f02007-02-28 20:20:53 +00001708 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001709
1710 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001711 new_env->next_cpu = next_cpu;
1712 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001713
1714 /* Clone all break/watchpoints.
1715 Note: Once we support ptrace with hw-debug register access, make sure
1716 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001717 QTAILQ_INIT(&env->breakpoints);
1718 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001719#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001720 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001721 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1722 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001723 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001724 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1725 wp->flags, NULL);
1726 }
1727#endif
1728
thsc5be9f02007-02-28 20:20:53 +00001729 return new_env;
1730}
1731
bellard01243112004-01-04 15:48:17 +00001732#if !defined(CONFIG_USER_ONLY)
1733
edgar_igl5c751e92008-05-06 08:44:21 +00001734static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1735{
1736 unsigned int i;
1737
1738 /* Discard jump cache entries for any tb which might potentially
1739 overlap the flushed page. */
1740 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1741 memset (&env->tb_jmp_cache[i], 0,
1742 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1743
1744 i = tb_jmp_cache_hash_page(addr);
1745 memset (&env->tb_jmp_cache[i], 0,
1746 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1747}
1748
Igor Kovalenko08738982009-07-12 02:15:40 +04001749static CPUTLBEntry s_cputlb_empty_entry = {
1750 .addr_read = -1,
1751 .addr_write = -1,
1752 .addr_code = -1,
1753 .addend = -1,
1754};
1755
bellardee8b7022004-02-03 23:35:10 +00001756/* NOTE: if flush_global is true, also flush global entries (not
1757 implemented yet) */
1758void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001759{
bellard33417e72003-08-10 21:47:01 +00001760 int i;
bellard01243112004-01-04 15:48:17 +00001761
bellard9fa3e852004-01-04 18:06:42 +00001762#if defined(DEBUG_TLB)
1763 printf("tlb_flush:\n");
1764#endif
bellard01243112004-01-04 15:48:17 +00001765 /* must reset current TB so that interrupts cannot modify the
1766 links while we are modifying them */
1767 env->current_tb = NULL;
1768
bellard33417e72003-08-10 21:47:01 +00001769 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001770 int mmu_idx;
1771 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001772 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001773 }
bellard33417e72003-08-10 21:47:01 +00001774 }
bellard9fa3e852004-01-04 18:06:42 +00001775
bellard8a40a182005-11-20 10:35:40 +00001776 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001777
bellarde3db7222005-01-26 22:00:47 +00001778 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001779}
1780
bellard274da6b2004-05-20 21:56:27 +00001781static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001782{
ths5fafdf22007-09-16 21:08:06 +00001783 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001784 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001785 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001786 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001787 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001788 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001789 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001790 }
bellard61382a52003-10-27 21:22:23 +00001791}
1792
bellard2e126692004-04-25 21:28:44 +00001793void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001794{
bellard8a40a182005-11-20 10:35:40 +00001795 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001796 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001797
bellard9fa3e852004-01-04 18:06:42 +00001798#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001799 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001800#endif
bellard01243112004-01-04 15:48:17 +00001801 /* must reset current TB so that interrupts cannot modify the
1802 links while we are modifying them */
1803 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001804
bellard61382a52003-10-27 21:22:23 +00001805 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001806 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001807 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1808 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001809
edgar_igl5c751e92008-05-06 08:44:21 +00001810 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001811}
1812
bellard9fa3e852004-01-04 18:06:42 +00001813/* update the TLBs so that writes to code in the virtual page 'addr'
1814 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001815static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001816{
ths5fafdf22007-09-16 21:08:06 +00001817 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001818 ram_addr + TARGET_PAGE_SIZE,
1819 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001820}
1821
bellard9fa3e852004-01-04 18:06:42 +00001822/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001823 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001824static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001825 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001826{
bellard3a7d9292005-08-21 09:26:42 +00001827 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
bellard1ccde1c2004-02-06 19:46:14 +00001828}
1829
ths5fafdf22007-09-16 21:08:06 +00001830static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001831 unsigned long start, unsigned long length)
1832{
1833 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00001834 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1835 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001836 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001837 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001838 }
1839 }
1840}
1841
pbrook5579c7f2009-04-11 14:47:08 +00001842/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001843void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001844 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001845{
1846 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001847 unsigned long length, start1;
bellard0a962c02005-02-10 22:00:27 +00001848 int i, mask, len;
1849 uint8_t *p;
bellard1ccde1c2004-02-06 19:46:14 +00001850
1851 start &= TARGET_PAGE_MASK;
1852 end = TARGET_PAGE_ALIGN(end);
1853
1854 length = end - start;
1855 if (length == 0)
1856 return;
bellard0a962c02005-02-10 22:00:27 +00001857 len = length >> TARGET_PAGE_BITS;
bellardf23db162005-08-21 19:12:28 +00001858 mask = ~dirty_flags;
1859 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1860 for(i = 0; i < len; i++)
1861 p[i] &= mask;
1862
bellard1ccde1c2004-02-06 19:46:14 +00001863 /* we modify the TLB cache so that the dirty bit will be set again
1864 when accessing the range */
pbrook5579c7f2009-04-11 14:47:08 +00001865 start1 = (unsigned long)qemu_get_ram_ptr(start);
1866 /* Chek that we don't span multiple blocks - this breaks the
1867 address comparisons below. */
1868 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1869 != (end - 1) - start) {
1870 abort();
1871 }
1872
bellard6a00d602005-11-21 23:25:50 +00001873 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001874 int mmu_idx;
1875 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1876 for(i = 0; i < CPU_TLB_SIZE; i++)
1877 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1878 start1, length);
1879 }
bellard6a00d602005-11-21 23:25:50 +00001880 }
bellard1ccde1c2004-02-06 19:46:14 +00001881}
1882
aliguori74576192008-10-06 14:02:03 +00001883int cpu_physical_memory_set_dirty_tracking(int enable)
1884{
1885 in_migration = enable;
Jan Kiszkab0a46a32009-05-02 00:22:51 +02001886 if (kvm_enabled()) {
1887 return kvm_set_migration_log(enable);
1888 }
aliguori74576192008-10-06 14:02:03 +00001889 return 0;
1890}
1891
1892int cpu_physical_memory_get_dirty_tracking(void)
1893{
1894 return in_migration;
1895}
1896
Anthony Liguoric227f092009-10-01 16:12:16 -05001897int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1898 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00001899{
Jan Kiszka151f7742009-05-01 20:52:47 +02001900 int ret = 0;
1901
aliguori2bec46d2008-11-24 20:21:41 +00001902 if (kvm_enabled())
Jan Kiszka151f7742009-05-01 20:52:47 +02001903 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1904 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00001905}
1906
bellard3a7d9292005-08-21 09:26:42 +00001907static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1908{
Anthony Liguoric227f092009-10-01 16:12:16 -05001909 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001910 void *p;
bellard3a7d9292005-08-21 09:26:42 +00001911
bellard84b7b8e2005-11-28 21:19:04 +00001912 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00001913 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1914 + tlb_entry->addend);
1915 ram_addr = qemu_ram_addr_from_host(p);
bellard3a7d9292005-08-21 09:26:42 +00001916 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00001917 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00001918 }
1919 }
1920}
1921
1922/* update the TLB according to the current state of the dirty bits */
1923void cpu_tlb_update_dirty(CPUState *env)
1924{
1925 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001926 int mmu_idx;
1927 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1928 for(i = 0; i < CPU_TLB_SIZE; i++)
1929 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1930 }
bellard3a7d9292005-08-21 09:26:42 +00001931}
1932
pbrook0f459d12008-06-09 00:20:13 +00001933static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00001934{
pbrook0f459d12008-06-09 00:20:13 +00001935 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1936 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00001937}
1938
pbrook0f459d12008-06-09 00:20:13 +00001939/* update the TLB corresponding to virtual page vaddr
1940 so that it is no longer dirty */
1941static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00001942{
bellard1ccde1c2004-02-06 19:46:14 +00001943 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001944 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00001945
pbrook0f459d12008-06-09 00:20:13 +00001946 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00001947 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001948 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1949 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00001950}
1951
bellard59817cc2004-02-16 22:01:13 +00001952/* add a new TLB entry. At most one entry for a given virtual address
1953 is permitted. Return 0 if OK or 2 if the page could not be mapped
1954 (can only happen in non SOFTMMU mode for I/O pages or pages
1955 conflicting with the host address space). */
ths5fafdf22007-09-16 21:08:06 +00001956int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
Anthony Liguoric227f092009-10-01 16:12:16 -05001957 target_phys_addr_t paddr, int prot,
j_mayer6ebbf392007-10-14 07:07:08 +00001958 int mmu_idx, int is_softmmu)
bellard9fa3e852004-01-04 18:06:42 +00001959{
bellard92e873b2004-05-21 14:52:29 +00001960 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00001961 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00001962 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00001963 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00001964 target_ulong code_address;
Anthony Liguoric227f092009-10-01 16:12:16 -05001965 target_phys_addr_t addend;
bellard9fa3e852004-01-04 18:06:42 +00001966 int ret;
bellard84b7b8e2005-11-28 21:19:04 +00001967 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00001968 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05001969 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00001970
bellard92e873b2004-05-21 14:52:29 +00001971 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00001972 if (!p) {
1973 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00001974 } else {
1975 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00001976 }
1977#if defined(DEBUG_TLB)
j_mayer6ebbf392007-10-14 07:07:08 +00001978 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1979 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
bellard9fa3e852004-01-04 18:06:42 +00001980#endif
1981
1982 ret = 0;
pbrook0f459d12008-06-09 00:20:13 +00001983 address = vaddr;
1984 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1985 /* IO memory case (romd handled later) */
1986 address |= TLB_MMIO;
1987 }
pbrook5579c7f2009-04-11 14:47:08 +00001988 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00001989 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1990 /* Normal RAM. */
1991 iotlb = pd & TARGET_PAGE_MASK;
1992 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1993 iotlb |= IO_MEM_NOTDIRTY;
1994 else
1995 iotlb |= IO_MEM_ROM;
1996 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001997 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00001998 It would be nice to pass an offset from the base address
1999 of that region. This would avoid having to special case RAM,
2000 and avoid full address decoding in every device.
2001 We can't use the high bits of pd for this because
2002 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002003 iotlb = (pd & ~TARGET_PAGE_MASK);
2004 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002005 iotlb += p->region_offset;
2006 } else {
2007 iotlb += paddr;
2008 }
pbrook0f459d12008-06-09 00:20:13 +00002009 }
pbrook6658ffb2007-03-16 23:58:11 +00002010
pbrook0f459d12008-06-09 00:20:13 +00002011 code_address = address;
2012 /* Make accesses to pages with watchpoints go via the
2013 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002014 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002015 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
pbrook0f459d12008-06-09 00:20:13 +00002016 iotlb = io_mem_watch + paddr;
2017 /* TODO: The memory case can be optimized by not trapping
2018 reads of pages with a write breakpoint. */
2019 address |= TLB_MMIO;
pbrook6658ffb2007-03-16 23:58:11 +00002020 }
pbrook0f459d12008-06-09 00:20:13 +00002021 }
balrogd79acba2007-06-26 20:01:13 +00002022
pbrook0f459d12008-06-09 00:20:13 +00002023 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2024 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2025 te = &env->tlb_table[mmu_idx][index];
2026 te->addend = addend - vaddr;
2027 if (prot & PAGE_READ) {
2028 te->addr_read = address;
2029 } else {
2030 te->addr_read = -1;
2031 }
edgar_igl5c751e92008-05-06 08:44:21 +00002032
pbrook0f459d12008-06-09 00:20:13 +00002033 if (prot & PAGE_EXEC) {
2034 te->addr_code = code_address;
2035 } else {
2036 te->addr_code = -1;
2037 }
2038 if (prot & PAGE_WRITE) {
2039 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2040 (pd & IO_MEM_ROMD)) {
2041 /* Write access calls the I/O callback. */
2042 te->addr_write = address | TLB_MMIO;
2043 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2044 !cpu_physical_memory_is_dirty(pd)) {
2045 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002046 } else {
pbrook0f459d12008-06-09 00:20:13 +00002047 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002048 }
pbrook0f459d12008-06-09 00:20:13 +00002049 } else {
2050 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002051 }
bellard9fa3e852004-01-04 18:06:42 +00002052 return ret;
2053}
2054
bellard01243112004-01-04 15:48:17 +00002055#else
2056
bellardee8b7022004-02-03 23:35:10 +00002057void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002058{
2059}
2060
bellard2e126692004-04-25 21:28:44 +00002061void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002062{
2063}
2064
ths5fafdf22007-09-16 21:08:06 +00002065int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002066 target_phys_addr_t paddr, int prot,
j_mayer6ebbf392007-10-14 07:07:08 +00002067 int mmu_idx, int is_softmmu)
bellard33417e72003-08-10 21:47:01 +00002068{
bellard9fa3e852004-01-04 18:06:42 +00002069 return 0;
2070}
bellard33417e72003-08-10 21:47:01 +00002071
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002072/*
2073 * Walks guest process memory "regions" one by one
2074 * and calls callback function 'fn' for each region.
2075 */
2076int walk_memory_regions(void *priv,
2077 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
bellard9fa3e852004-01-04 18:06:42 +00002078{
2079 unsigned long start, end;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002080 PageDesc *p = NULL;
bellard9fa3e852004-01-04 18:06:42 +00002081 int i, j, prot, prot1;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002082 int rc = 0;
bellard9fa3e852004-01-04 18:06:42 +00002083
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002084 start = end = -1;
bellard9fa3e852004-01-04 18:06:42 +00002085 prot = 0;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002086
2087 for (i = 0; i <= L1_SIZE; i++) {
2088 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2089 for (j = 0; j < L2_SIZE; j++) {
2090 prot1 = (p == NULL) ? 0 : p[j].flags;
2091 /*
2092 * "region" is one continuous chunk of memory
2093 * that has same protection flags set.
2094 */
bellard9fa3e852004-01-04 18:06:42 +00002095 if (prot1 != prot) {
2096 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2097 if (start != -1) {
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002098 rc = (*fn)(priv, start, end, prot);
2099 /* callback can stop iteration by returning != 0 */
2100 if (rc != 0)
2101 return (rc);
bellard9fa3e852004-01-04 18:06:42 +00002102 }
2103 if (prot1 != 0)
2104 start = end;
2105 else
2106 start = -1;
2107 prot = prot1;
2108 }
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002109 if (p == NULL)
bellard9fa3e852004-01-04 18:06:42 +00002110 break;
2111 }
bellard33417e72003-08-10 21:47:01 +00002112 }
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002113 return (rc);
2114}
2115
2116static int dump_region(void *priv, unsigned long start,
2117 unsigned long end, unsigned long prot)
2118{
2119 FILE *f = (FILE *)priv;
2120
2121 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2122 start, end, end - start,
2123 ((prot & PAGE_READ) ? 'r' : '-'),
2124 ((prot & PAGE_WRITE) ? 'w' : '-'),
2125 ((prot & PAGE_EXEC) ? 'x' : '-'));
2126
2127 return (0);
2128}
2129
2130/* dump memory mappings */
2131void page_dump(FILE *f)
2132{
2133 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2134 "start", "end", "size", "prot");
2135 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002136}
2137
pbrook53a59602006-03-25 19:31:22 +00002138int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002139{
bellard9fa3e852004-01-04 18:06:42 +00002140 PageDesc *p;
2141
2142 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002143 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002144 return 0;
2145 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002146}
2147
bellard9fa3e852004-01-04 18:06:42 +00002148/* modify the flags of a page and invalidate the code if
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002149 necessary. The flag PAGE_WRITE_ORG is positioned automatically
bellard9fa3e852004-01-04 18:06:42 +00002150 depending on PAGE_WRITE */
pbrook53a59602006-03-25 19:31:22 +00002151void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002152{
2153 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002154 target_ulong addr;
bellard9fa3e852004-01-04 18:06:42 +00002155
pbrookc8a706f2008-06-02 16:16:42 +00002156 /* mmap_lock should already be held. */
bellard9fa3e852004-01-04 18:06:42 +00002157 start = start & TARGET_PAGE_MASK;
2158 end = TARGET_PAGE_ALIGN(end);
2159 if (flags & PAGE_WRITE)
2160 flags |= PAGE_WRITE_ORG;
bellard9fa3e852004-01-04 18:06:42 +00002161 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2162 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
pbrook17e23772008-06-09 13:47:45 +00002163 /* We may be called for host regions that are outside guest
2164 address space. */
2165 if (!p)
2166 return;
bellard9fa3e852004-01-04 18:06:42 +00002167 /* if the write protection is set, then we invalidate the code
2168 inside */
ths5fafdf22007-09-16 21:08:06 +00002169 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002170 (flags & PAGE_WRITE) &&
2171 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002172 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002173 }
2174 p->flags = flags;
2175 }
bellard9fa3e852004-01-04 18:06:42 +00002176}
2177
ths3d97b402007-11-02 19:02:07 +00002178int page_check_range(target_ulong start, target_ulong len, int flags)
2179{
2180 PageDesc *p;
2181 target_ulong end;
2182 target_ulong addr;
2183
balrog55f280c2008-10-28 10:24:11 +00002184 if (start + len < start)
2185 /* we've wrapped around */
2186 return -1;
2187
ths3d97b402007-11-02 19:02:07 +00002188 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2189 start = start & TARGET_PAGE_MASK;
2190
ths3d97b402007-11-02 19:02:07 +00002191 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2192 p = page_find(addr >> TARGET_PAGE_BITS);
2193 if( !p )
2194 return -1;
2195 if( !(p->flags & PAGE_VALID) )
2196 return -1;
2197
bellarddae32702007-11-14 10:51:00 +00002198 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002199 return -1;
bellarddae32702007-11-14 10:51:00 +00002200 if (flags & PAGE_WRITE) {
2201 if (!(p->flags & PAGE_WRITE_ORG))
2202 return -1;
2203 /* unprotect the page if it was put read-only because it
2204 contains translated code */
2205 if (!(p->flags & PAGE_WRITE)) {
2206 if (!page_unprotect(addr, 0, NULL))
2207 return -1;
2208 }
2209 return 0;
2210 }
ths3d97b402007-11-02 19:02:07 +00002211 }
2212 return 0;
2213}
2214
bellard9fa3e852004-01-04 18:06:42 +00002215/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002216 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002217int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002218{
2219 unsigned int page_index, prot, pindex;
2220 PageDesc *p, *p1;
pbrook53a59602006-03-25 19:31:22 +00002221 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002222
pbrookc8a706f2008-06-02 16:16:42 +00002223 /* Technically this isn't safe inside a signal handler. However we
2224 know this only ever happens in a synchronous SEGV handler, so in
2225 practice it seems to be ok. */
2226 mmap_lock();
2227
bellard83fb7ad2004-07-05 21:25:26 +00002228 host_start = address & qemu_host_page_mask;
bellard9fa3e852004-01-04 18:06:42 +00002229 page_index = host_start >> TARGET_PAGE_BITS;
2230 p1 = page_find(page_index);
pbrookc8a706f2008-06-02 16:16:42 +00002231 if (!p1) {
2232 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002233 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002234 }
bellard83fb7ad2004-07-05 21:25:26 +00002235 host_end = host_start + qemu_host_page_size;
bellard9fa3e852004-01-04 18:06:42 +00002236 p = p1;
2237 prot = 0;
2238 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2239 prot |= p->flags;
2240 p++;
2241 }
2242 /* if the page was really writable, then we change its
2243 protection back to writable */
2244 if (prot & PAGE_WRITE_ORG) {
2245 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2246 if (!(p1[pindex].flags & PAGE_WRITE)) {
ths5fafdf22007-09-16 21:08:06 +00002247 mprotect((void *)g2h(host_start), qemu_host_page_size,
bellard9fa3e852004-01-04 18:06:42 +00002248 (prot & PAGE_BITS) | PAGE_WRITE);
2249 p1[pindex].flags |= PAGE_WRITE;
2250 /* and since the content will be modified, we must invalidate
2251 the corresponding translated code. */
bellardd720b932004-04-25 17:57:43 +00002252 tb_invalidate_phys_page(address, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002253#ifdef DEBUG_TB_CHECK
2254 tb_invalidate_check(address);
2255#endif
pbrookc8a706f2008-06-02 16:16:42 +00002256 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002257 return 1;
2258 }
2259 }
pbrookc8a706f2008-06-02 16:16:42 +00002260 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002261 return 0;
2262}
2263
bellard6a00d602005-11-21 23:25:50 +00002264static inline void tlb_set_dirty(CPUState *env,
2265 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002266{
2267}
bellard9fa3e852004-01-04 18:06:42 +00002268#endif /* defined(CONFIG_USER_ONLY) */
2269
pbrooke2eef172008-06-08 01:09:01 +00002270#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002271
Anthony Liguoric227f092009-10-01 16:12:16 -05002272static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2273 ram_addr_t memory, ram_addr_t region_offset);
2274static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2275 ram_addr_t orig_memory, ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002276#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2277 need_subpage) \
2278 do { \
2279 if (addr > start_addr) \
2280 start_addr2 = 0; \
2281 else { \
2282 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2283 if (start_addr2 > 0) \
2284 need_subpage = 1; \
2285 } \
2286 \
blueswir149e9fba2007-05-30 17:25:06 +00002287 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002288 end_addr2 = TARGET_PAGE_SIZE - 1; \
2289 else { \
2290 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2291 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2292 need_subpage = 1; \
2293 } \
2294 } while (0)
2295
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002296/* register physical memory.
2297 For RAM, 'size' must be a multiple of the target page size.
2298 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002299 io memory page. The address used when calling the IO function is
2300 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002301 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002302 before calculating this offset. This should not be a problem unless
2303 the low bits of start_addr and region_offset differ. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002304void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2305 ram_addr_t size,
2306 ram_addr_t phys_offset,
2307 ram_addr_t region_offset)
bellard33417e72003-08-10 21:47:01 +00002308{
Anthony Liguoric227f092009-10-01 16:12:16 -05002309 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002310 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002311 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002312 ram_addr_t orig_size = size;
blueswir1db7b5422007-05-26 17:36:03 +00002313 void *subpage;
bellard33417e72003-08-10 21:47:01 +00002314
aliguori7ba1e612008-11-05 16:04:33 +00002315 if (kvm_enabled())
2316 kvm_set_phys_mem(start_addr, size, phys_offset);
2317
pbrook67c4d232009-02-23 13:16:07 +00002318 if (phys_offset == IO_MEM_UNASSIGNED) {
2319 region_offset = start_addr;
2320 }
pbrook8da3ff12008-12-01 18:59:50 +00002321 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002322 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002323 end_addr = start_addr + (target_phys_addr_t)size;
blueswir149e9fba2007-05-30 17:25:06 +00002324 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
blueswir1db7b5422007-05-26 17:36:03 +00002325 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2326 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002327 ram_addr_t orig_memory = p->phys_offset;
2328 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002329 int need_subpage = 0;
2330
2331 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2332 need_subpage);
blueswir14254fab2008-01-01 16:57:19 +00002333 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
blueswir1db7b5422007-05-26 17:36:03 +00002334 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2335 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002336 &p->phys_offset, orig_memory,
2337 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002338 } else {
2339 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2340 >> IO_MEM_SHIFT];
2341 }
pbrook8da3ff12008-12-01 18:59:50 +00002342 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2343 region_offset);
2344 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002345 } else {
2346 p->phys_offset = phys_offset;
2347 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2348 (phys_offset & IO_MEM_ROMD))
2349 phys_offset += TARGET_PAGE_SIZE;
2350 }
2351 } else {
2352 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2353 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002354 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002355 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002356 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002357 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002358 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002359 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002360 int need_subpage = 0;
2361
2362 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2363 end_addr2, need_subpage);
2364
blueswir14254fab2008-01-01 16:57:19 +00002365 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
blueswir1db7b5422007-05-26 17:36:03 +00002366 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002367 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002368 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002369 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002370 phys_offset, region_offset);
2371 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002372 }
2373 }
2374 }
pbrook8da3ff12008-12-01 18:59:50 +00002375 region_offset += TARGET_PAGE_SIZE;
bellard33417e72003-08-10 21:47:01 +00002376 }
ths3b46e622007-09-17 08:09:54 +00002377
bellard9d420372006-06-25 22:25:22 +00002378 /* since each CPU stores ram addresses in its TLB cache, we must
2379 reset the modified entries */
2380 /* XXX: slow ! */
2381 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2382 tlb_flush(env, 1);
2383 }
bellard33417e72003-08-10 21:47:01 +00002384}
2385
bellardba863452006-09-24 18:41:10 +00002386/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002387ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002388{
2389 PhysPageDesc *p;
2390
2391 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2392 if (!p)
2393 return IO_MEM_UNASSIGNED;
2394 return p->phys_offset;
2395}
2396
Anthony Liguoric227f092009-10-01 16:12:16 -05002397void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002398{
2399 if (kvm_enabled())
2400 kvm_coalesce_mmio_region(addr, size);
2401}
2402
Anthony Liguoric227f092009-10-01 16:12:16 -05002403void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002404{
2405 if (kvm_enabled())
2406 kvm_uncoalesce_mmio_region(addr, size);
2407}
2408
Anthony Liguoric227f092009-10-01 16:12:16 -05002409ram_addr_t qemu_ram_alloc(ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002410{
2411 RAMBlock *new_block;
2412
pbrook94a6b542009-04-11 17:15:54 +00002413 size = TARGET_PAGE_ALIGN(size);
2414 new_block = qemu_malloc(sizeof(*new_block));
2415
Alexander Graf6b024942009-12-05 12:44:25 +01002416#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2417 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2418 new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
2419 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2420#else
pbrook94a6b542009-04-11 17:15:54 +00002421 new_block->host = qemu_vmalloc(size);
Alexander Graf6b024942009-12-05 12:44:25 +01002422#endif
Izik Eidusccb167e2009-10-08 16:39:39 +02002423#ifdef MADV_MERGEABLE
2424 madvise(new_block->host, size, MADV_MERGEABLE);
2425#endif
pbrook94a6b542009-04-11 17:15:54 +00002426 new_block->offset = last_ram_offset;
2427 new_block->length = size;
2428
2429 new_block->next = ram_blocks;
2430 ram_blocks = new_block;
2431
2432 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2433 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2434 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2435 0xff, size >> TARGET_PAGE_BITS);
2436
2437 last_ram_offset += size;
2438
Jan Kiszka6f0437e2009-04-26 18:03:40 +02002439 if (kvm_enabled())
2440 kvm_setup_guest_memory(new_block->host, size);
2441
pbrook94a6b542009-04-11 17:15:54 +00002442 return new_block->offset;
2443}
bellarde9a1ab12007-02-08 23:08:38 +00002444
Anthony Liguoric227f092009-10-01 16:12:16 -05002445void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002446{
pbrook94a6b542009-04-11 17:15:54 +00002447 /* TODO: implement this. */
bellarde9a1ab12007-02-08 23:08:38 +00002448}
2449
pbrookdc828ca2009-04-09 22:21:07 +00002450/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002451 With the exception of the softmmu code in this file, this should
2452 only be used for local memory (e.g. video ram) that the device owns,
2453 and knows it isn't going to access beyond the end of the block.
2454
2455 It should not be used for general purpose DMA.
2456 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2457 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002458void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002459{
pbrook94a6b542009-04-11 17:15:54 +00002460 RAMBlock *prev;
2461 RAMBlock **prevp;
2462 RAMBlock *block;
2463
pbrook94a6b542009-04-11 17:15:54 +00002464 prev = NULL;
2465 prevp = &ram_blocks;
2466 block = ram_blocks;
2467 while (block && (block->offset > addr
2468 || block->offset + block->length <= addr)) {
2469 if (prev)
2470 prevp = &prev->next;
2471 prev = block;
2472 block = block->next;
2473 }
2474 if (!block) {
2475 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2476 abort();
2477 }
2478 /* Move this entry to to start of the list. */
2479 if (prev) {
2480 prev->next = block->next;
2481 block->next = *prevp;
2482 *prevp = block;
2483 }
2484 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00002485}
2486
pbrook5579c7f2009-04-11 14:47:08 +00002487/* Some of the softmmu routines need to translate from a host pointer
2488 (typically a TLB entry) back to a ram offset. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002489ram_addr_t qemu_ram_addr_from_host(void *ptr)
pbrook5579c7f2009-04-11 14:47:08 +00002490{
pbrook94a6b542009-04-11 17:15:54 +00002491 RAMBlock *prev;
2492 RAMBlock **prevp;
2493 RAMBlock *block;
2494 uint8_t *host = ptr;
2495
pbrook94a6b542009-04-11 17:15:54 +00002496 prev = NULL;
2497 prevp = &ram_blocks;
2498 block = ram_blocks;
2499 while (block && (block->host > host
2500 || block->host + block->length <= host)) {
2501 if (prev)
2502 prevp = &prev->next;
2503 prev = block;
2504 block = block->next;
2505 }
2506 if (!block) {
2507 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2508 abort();
2509 }
2510 return block->offset + (host - block->host);
pbrook5579c7f2009-04-11 14:47:08 +00002511}
2512
Anthony Liguoric227f092009-10-01 16:12:16 -05002513static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00002514{
pbrook67d3b952006-12-18 05:03:52 +00002515#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002516 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002517#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002518#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002519 do_unassigned_access(addr, 0, 0, 0, 1);
2520#endif
2521 return 0;
2522}
2523
Anthony Liguoric227f092009-10-01 16:12:16 -05002524static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002525{
2526#ifdef DEBUG_UNASSIGNED
2527 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2528#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002529#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002530 do_unassigned_access(addr, 0, 0, 0, 2);
2531#endif
2532 return 0;
2533}
2534
Anthony Liguoric227f092009-10-01 16:12:16 -05002535static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002536{
2537#ifdef DEBUG_UNASSIGNED
2538 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2539#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002540#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002541 do_unassigned_access(addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002542#endif
bellard33417e72003-08-10 21:47:01 +00002543 return 0;
2544}
2545
Anthony Liguoric227f092009-10-01 16:12:16 -05002546static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00002547{
pbrook67d3b952006-12-18 05:03:52 +00002548#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002549 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00002550#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002551#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002552 do_unassigned_access(addr, 1, 0, 0, 1);
2553#endif
2554}
2555
Anthony Liguoric227f092009-10-01 16:12:16 -05002556static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002557{
2558#ifdef DEBUG_UNASSIGNED
2559 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2560#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002561#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002562 do_unassigned_access(addr, 1, 0, 0, 2);
2563#endif
2564}
2565
Anthony Liguoric227f092009-10-01 16:12:16 -05002566static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002567{
2568#ifdef DEBUG_UNASSIGNED
2569 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2570#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002571#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002572 do_unassigned_access(addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002573#endif
bellard33417e72003-08-10 21:47:01 +00002574}
2575
Blue Swirld60efc62009-08-25 18:29:31 +00002576static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00002577 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00002578 unassigned_mem_readw,
2579 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00002580};
2581
Blue Swirld60efc62009-08-25 18:29:31 +00002582static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00002583 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00002584 unassigned_mem_writew,
2585 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00002586};
2587
Anthony Liguoric227f092009-10-01 16:12:16 -05002588static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002589 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002590{
bellard3a7d9292005-08-21 09:26:42 +00002591 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002592 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2593 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2594#if !defined(CONFIG_USER_ONLY)
2595 tb_invalidate_phys_page_fast(ram_addr, 1);
2596 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2597#endif
2598 }
pbrook5579c7f2009-04-11 14:47:08 +00002599 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002600 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2601 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2602 /* we remove the notdirty callback only if the code has been
2603 flushed */
2604 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002605 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002606}
2607
Anthony Liguoric227f092009-10-01 16:12:16 -05002608static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002609 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002610{
bellard3a7d9292005-08-21 09:26:42 +00002611 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002612 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2613 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2614#if !defined(CONFIG_USER_ONLY)
2615 tb_invalidate_phys_page_fast(ram_addr, 2);
2616 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2617#endif
2618 }
pbrook5579c7f2009-04-11 14:47:08 +00002619 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002620 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2621 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2622 /* we remove the notdirty callback only if the code has been
2623 flushed */
2624 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002625 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002626}
2627
Anthony Liguoric227f092009-10-01 16:12:16 -05002628static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002629 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002630{
bellard3a7d9292005-08-21 09:26:42 +00002631 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002632 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2633 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2634#if !defined(CONFIG_USER_ONLY)
2635 tb_invalidate_phys_page_fast(ram_addr, 4);
2636 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2637#endif
2638 }
pbrook5579c7f2009-04-11 14:47:08 +00002639 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002640 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2641 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2642 /* we remove the notdirty callback only if the code has been
2643 flushed */
2644 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002645 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002646}
2647
Blue Swirld60efc62009-08-25 18:29:31 +00002648static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00002649 NULL, /* never used */
2650 NULL, /* never used */
2651 NULL, /* never used */
2652};
2653
Blue Swirld60efc62009-08-25 18:29:31 +00002654static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00002655 notdirty_mem_writeb,
2656 notdirty_mem_writew,
2657 notdirty_mem_writel,
2658};
2659
pbrook0f459d12008-06-09 00:20:13 +00002660/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002661static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002662{
2663 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002664 target_ulong pc, cs_base;
2665 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002666 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002667 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002668 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002669
aliguori06d55cc2008-11-18 20:24:06 +00002670 if (env->watchpoint_hit) {
2671 /* We re-entered the check after replacing the TB. Now raise
2672 * the debug interrupt so that is will trigger after the
2673 * current instruction. */
2674 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2675 return;
2676 }
pbrook2e70f6e2008-06-29 01:03:05 +00002677 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002678 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002679 if ((vaddr == (wp->vaddr & len_mask) ||
2680 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002681 wp->flags |= BP_WATCHPOINT_HIT;
2682 if (!env->watchpoint_hit) {
2683 env->watchpoint_hit = wp;
2684 tb = tb_find_pc(env->mem_io_pc);
2685 if (!tb) {
2686 cpu_abort(env, "check_watchpoint: could not find TB for "
2687 "pc=%p", (void *)env->mem_io_pc);
2688 }
2689 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2690 tb_phys_invalidate(tb, -1);
2691 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2692 env->exception_index = EXCP_DEBUG;
2693 } else {
2694 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2695 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2696 }
2697 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00002698 }
aliguori6e140f22008-11-18 20:37:55 +00002699 } else {
2700 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002701 }
2702 }
2703}
2704
pbrook6658ffb2007-03-16 23:58:11 +00002705/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2706 so these check for a hit then pass through to the normal out-of-line
2707 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002708static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00002709{
aliguorib4051332008-11-18 20:14:20 +00002710 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002711 return ldub_phys(addr);
2712}
2713
Anthony Liguoric227f092009-10-01 16:12:16 -05002714static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00002715{
aliguorib4051332008-11-18 20:14:20 +00002716 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002717 return lduw_phys(addr);
2718}
2719
Anthony Liguoric227f092009-10-01 16:12:16 -05002720static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00002721{
aliguorib4051332008-11-18 20:14:20 +00002722 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002723 return ldl_phys(addr);
2724}
2725
Anthony Liguoric227f092009-10-01 16:12:16 -05002726static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00002727 uint32_t val)
2728{
aliguorib4051332008-11-18 20:14:20 +00002729 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002730 stb_phys(addr, val);
2731}
2732
Anthony Liguoric227f092009-10-01 16:12:16 -05002733static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00002734 uint32_t val)
2735{
aliguorib4051332008-11-18 20:14:20 +00002736 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002737 stw_phys(addr, val);
2738}
2739
Anthony Liguoric227f092009-10-01 16:12:16 -05002740static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00002741 uint32_t val)
2742{
aliguorib4051332008-11-18 20:14:20 +00002743 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002744 stl_phys(addr, val);
2745}
2746
Blue Swirld60efc62009-08-25 18:29:31 +00002747static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00002748 watch_mem_readb,
2749 watch_mem_readw,
2750 watch_mem_readl,
2751};
2752
Blue Swirld60efc62009-08-25 18:29:31 +00002753static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00002754 watch_mem_writeb,
2755 watch_mem_writew,
2756 watch_mem_writel,
2757};
pbrook6658ffb2007-03-16 23:58:11 +00002758
Anthony Liguoric227f092009-10-01 16:12:16 -05002759static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002760 unsigned int len)
2761{
blueswir1db7b5422007-05-26 17:36:03 +00002762 uint32_t ret;
2763 unsigned int idx;
2764
pbrook8da3ff12008-12-01 18:59:50 +00002765 idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00002766#if defined(DEBUG_SUBPAGE)
2767 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2768 mmio, len, addr, idx);
2769#endif
pbrook8da3ff12008-12-01 18:59:50 +00002770 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2771 addr + mmio->region_offset[idx][0][len]);
blueswir1db7b5422007-05-26 17:36:03 +00002772
2773 return ret;
2774}
2775
Anthony Liguoric227f092009-10-01 16:12:16 -05002776static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002777 uint32_t value, unsigned int len)
2778{
blueswir1db7b5422007-05-26 17:36:03 +00002779 unsigned int idx;
2780
pbrook8da3ff12008-12-01 18:59:50 +00002781 idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00002782#if defined(DEBUG_SUBPAGE)
2783 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2784 mmio, len, addr, idx, value);
2785#endif
pbrook8da3ff12008-12-01 18:59:50 +00002786 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2787 addr + mmio->region_offset[idx][1][len],
2788 value);
blueswir1db7b5422007-05-26 17:36:03 +00002789}
2790
Anthony Liguoric227f092009-10-01 16:12:16 -05002791static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00002792{
2793#if defined(DEBUG_SUBPAGE)
2794 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2795#endif
2796
2797 return subpage_readlen(opaque, addr, 0);
2798}
2799
Anthony Liguoric227f092009-10-01 16:12:16 -05002800static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002801 uint32_t value)
2802{
2803#if defined(DEBUG_SUBPAGE)
2804 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2805#endif
2806 subpage_writelen(opaque, addr, value, 0);
2807}
2808
Anthony Liguoric227f092009-10-01 16:12:16 -05002809static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00002810{
2811#if defined(DEBUG_SUBPAGE)
2812 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2813#endif
2814
2815 return subpage_readlen(opaque, addr, 1);
2816}
2817
Anthony Liguoric227f092009-10-01 16:12:16 -05002818static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002819 uint32_t value)
2820{
2821#if defined(DEBUG_SUBPAGE)
2822 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2823#endif
2824 subpage_writelen(opaque, addr, value, 1);
2825}
2826
Anthony Liguoric227f092009-10-01 16:12:16 -05002827static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00002828{
2829#if defined(DEBUG_SUBPAGE)
2830 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2831#endif
2832
2833 return subpage_readlen(opaque, addr, 2);
2834}
2835
2836static void subpage_writel (void *opaque,
Anthony Liguoric227f092009-10-01 16:12:16 -05002837 target_phys_addr_t addr, uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00002838{
2839#if defined(DEBUG_SUBPAGE)
2840 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2841#endif
2842 subpage_writelen(opaque, addr, value, 2);
2843}
2844
Blue Swirld60efc62009-08-25 18:29:31 +00002845static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00002846 &subpage_readb,
2847 &subpage_readw,
2848 &subpage_readl,
2849};
2850
Blue Swirld60efc62009-08-25 18:29:31 +00002851static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00002852 &subpage_writeb,
2853 &subpage_writew,
2854 &subpage_writel,
2855};
2856
Anthony Liguoric227f092009-10-01 16:12:16 -05002857static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2858 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00002859{
2860 int idx, eidx;
blueswir14254fab2008-01-01 16:57:19 +00002861 unsigned int i;
blueswir1db7b5422007-05-26 17:36:03 +00002862
2863 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2864 return -1;
2865 idx = SUBPAGE_IDX(start);
2866 eidx = SUBPAGE_IDX(end);
2867#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00002868 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00002869 mmio, start, end, idx, eidx, memory);
2870#endif
2871 memory >>= IO_MEM_SHIFT;
2872 for (; idx <= eidx; idx++) {
blueswir14254fab2008-01-01 16:57:19 +00002873 for (i = 0; i < 4; i++) {
blueswir13ee89922008-01-02 19:45:26 +00002874 if (io_mem_read[memory][i]) {
2875 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2876 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
pbrook8da3ff12008-12-01 18:59:50 +00002877 mmio->region_offset[idx][0][i] = region_offset;
blueswir13ee89922008-01-02 19:45:26 +00002878 }
2879 if (io_mem_write[memory][i]) {
2880 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2881 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
pbrook8da3ff12008-12-01 18:59:50 +00002882 mmio->region_offset[idx][1][i] = region_offset;
blueswir13ee89922008-01-02 19:45:26 +00002883 }
blueswir14254fab2008-01-01 16:57:19 +00002884 }
blueswir1db7b5422007-05-26 17:36:03 +00002885 }
2886
2887 return 0;
2888}
2889
Anthony Liguoric227f092009-10-01 16:12:16 -05002890static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2891 ram_addr_t orig_memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00002892{
Anthony Liguoric227f092009-10-01 16:12:16 -05002893 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002894 int subpage_memory;
2895
Anthony Liguoric227f092009-10-01 16:12:16 -05002896 mmio = qemu_mallocz(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002897
2898 mmio->base = base;
Avi Kivity1eed09c2009-06-14 11:38:51 +03002899 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
blueswir1db7b5422007-05-26 17:36:03 +00002900#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00002901 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2902 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00002903#endif
aliguori1eec6142009-02-05 22:06:18 +00002904 *phys = subpage_memory | IO_MEM_SUBPAGE;
2905 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
pbrook8da3ff12008-12-01 18:59:50 +00002906 region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002907
2908 return mmio;
2909}
2910
aliguori88715652009-02-11 15:20:58 +00002911static int get_free_io_mem_idx(void)
2912{
2913 int i;
2914
2915 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2916 if (!io_mem_used[i]) {
2917 io_mem_used[i] = 1;
2918 return i;
2919 }
Riku Voipioc6703b42009-12-03 15:56:05 +02002920 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00002921 return -1;
2922}
2923
bellard33417e72003-08-10 21:47:01 +00002924/* mem_read and mem_write are arrays of functions containing the
2925 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01002926 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00002927 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00002928 modified. If it is zero, a new io zone is allocated. The return
2929 value can be used with cpu_register_physical_memory(). (-1) is
2930 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03002931static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00002932 CPUReadMemoryFunc * const *mem_read,
2933 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03002934 void *opaque)
bellard33417e72003-08-10 21:47:01 +00002935{
blueswir14254fab2008-01-01 16:57:19 +00002936 int i, subwidth = 0;
bellard33417e72003-08-10 21:47:01 +00002937
2938 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00002939 io_index = get_free_io_mem_idx();
2940 if (io_index == -1)
2941 return io_index;
bellard33417e72003-08-10 21:47:01 +00002942 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03002943 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00002944 if (io_index >= IO_MEM_NB_ENTRIES)
2945 return -1;
2946 }
bellardb5ff1b32005-11-26 10:38:39 +00002947
bellard33417e72003-08-10 21:47:01 +00002948 for(i = 0;i < 3; i++) {
blueswir14254fab2008-01-01 16:57:19 +00002949 if (!mem_read[i] || !mem_write[i])
2950 subwidth = IO_MEM_SUBWIDTH;
bellard33417e72003-08-10 21:47:01 +00002951 io_mem_read[io_index][i] = mem_read[i];
2952 io_mem_write[io_index][i] = mem_write[i];
2953 }
bellarda4193c82004-06-03 14:01:43 +00002954 io_mem_opaque[io_index] = opaque;
blueswir14254fab2008-01-01 16:57:19 +00002955 return (io_index << IO_MEM_SHIFT) | subwidth;
bellard33417e72003-08-10 21:47:01 +00002956}
bellard61382a52003-10-27 21:22:23 +00002957
Blue Swirld60efc62009-08-25 18:29:31 +00002958int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
2959 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03002960 void *opaque)
2961{
2962 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
2963}
2964
aliguori88715652009-02-11 15:20:58 +00002965void cpu_unregister_io_memory(int io_table_address)
2966{
2967 int i;
2968 int io_index = io_table_address >> IO_MEM_SHIFT;
2969
2970 for (i=0;i < 3; i++) {
2971 io_mem_read[io_index][i] = unassigned_mem_read[i];
2972 io_mem_write[io_index][i] = unassigned_mem_write[i];
2973 }
2974 io_mem_opaque[io_index] = NULL;
2975 io_mem_used[io_index] = 0;
2976}
2977
Avi Kivitye9179ce2009-06-14 11:38:52 +03002978static void io_mem_init(void)
2979{
2980 int i;
2981
2982 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
2983 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
2984 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
2985 for (i=0; i<5; i++)
2986 io_mem_used[i] = 1;
2987
2988 io_mem_watch = cpu_register_io_memory(watch_mem_read,
2989 watch_mem_write, NULL);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002990}
2991
pbrooke2eef172008-06-08 01:09:01 +00002992#endif /* !defined(CONFIG_USER_ONLY) */
2993
bellard13eb76e2004-01-24 15:23:36 +00002994/* physical memory access (slow version, mainly for debug) */
2995#if defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -05002996void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00002997 int len, int is_write)
2998{
2999 int l, flags;
3000 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003001 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003002
3003 while (len > 0) {
3004 page = addr & TARGET_PAGE_MASK;
3005 l = (page + TARGET_PAGE_SIZE) - addr;
3006 if (l > len)
3007 l = len;
3008 flags = page_get_flags(page);
3009 if (!(flags & PAGE_VALID))
3010 return;
3011 if (is_write) {
3012 if (!(flags & PAGE_WRITE))
3013 return;
bellard579a97f2007-11-11 14:26:47 +00003014 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003015 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
bellard579a97f2007-11-11 14:26:47 +00003016 /* FIXME - should this return an error rather than just fail? */
3017 return;
aurel3272fb7da2008-04-27 23:53:45 +00003018 memcpy(p, buf, l);
3019 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003020 } else {
3021 if (!(flags & PAGE_READ))
3022 return;
bellard579a97f2007-11-11 14:26:47 +00003023 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003024 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
bellard579a97f2007-11-11 14:26:47 +00003025 /* FIXME - should this return an error rather than just fail? */
3026 return;
aurel3272fb7da2008-04-27 23:53:45 +00003027 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003028 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003029 }
3030 len -= l;
3031 buf += l;
3032 addr += l;
3033 }
3034}
bellard8df1cd02005-01-28 22:37:22 +00003035
bellard13eb76e2004-01-24 15:23:36 +00003036#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003037void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003038 int len, int is_write)
3039{
3040 int l, io_index;
3041 uint8_t *ptr;
3042 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003043 target_phys_addr_t page;
bellard2e126692004-04-25 21:28:44 +00003044 unsigned long pd;
bellard92e873b2004-05-21 14:52:29 +00003045 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003046
bellard13eb76e2004-01-24 15:23:36 +00003047 while (len > 0) {
3048 page = addr & TARGET_PAGE_MASK;
3049 l = (page + TARGET_PAGE_SIZE) - addr;
3050 if (l > len)
3051 l = len;
bellard92e873b2004-05-21 14:52:29 +00003052 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003053 if (!p) {
3054 pd = IO_MEM_UNASSIGNED;
3055 } else {
3056 pd = p->phys_offset;
3057 }
ths3b46e622007-09-17 08:09:54 +00003058
bellard13eb76e2004-01-24 15:23:36 +00003059 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003060 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003061 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003062 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003063 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003064 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003065 /* XXX: could force cpu_single_env to NULL to avoid
3066 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003067 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003068 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003069 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003070 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003071 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003072 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003073 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003074 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003075 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003076 l = 2;
3077 } else {
bellard1c213d12005-09-03 10:49:04 +00003078 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003079 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003080 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003081 l = 1;
3082 }
3083 } else {
bellardb448f2f2004-02-25 23:24:04 +00003084 unsigned long addr1;
3085 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003086 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003087 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003088 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003089 if (!cpu_physical_memory_is_dirty(addr1)) {
3090 /* invalidate code */
3091 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3092 /* set dirty bit */
ths5fafdf22007-09-16 21:08:06 +00003093 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
bellardf23db162005-08-21 19:12:28 +00003094 (0xff & ~CODE_DIRTY_FLAG);
bellard3a7d9292005-08-21 09:26:42 +00003095 }
bellard13eb76e2004-01-24 15:23:36 +00003096 }
3097 } else {
ths5fafdf22007-09-16 21:08:06 +00003098 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003099 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003100 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003101 /* I/O case */
3102 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003103 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003104 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3105 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003106 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003107 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003108 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003109 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003110 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003111 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003112 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003113 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003114 l = 2;
3115 } else {
bellard1c213d12005-09-03 10:49:04 +00003116 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003117 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003118 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003119 l = 1;
3120 }
3121 } else {
3122 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003123 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard13eb76e2004-01-24 15:23:36 +00003124 (addr & ~TARGET_PAGE_MASK);
3125 memcpy(buf, ptr, l);
3126 }
3127 }
3128 len -= l;
3129 buf += l;
3130 addr += l;
3131 }
3132}
bellard8df1cd02005-01-28 22:37:22 +00003133
bellardd0ecd2a2006-04-23 17:14:48 +00003134/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003135void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003136 const uint8_t *buf, int len)
3137{
3138 int l;
3139 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003140 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003141 unsigned long pd;
3142 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003143
bellardd0ecd2a2006-04-23 17:14:48 +00003144 while (len > 0) {
3145 page = addr & TARGET_PAGE_MASK;
3146 l = (page + TARGET_PAGE_SIZE) - addr;
3147 if (l > len)
3148 l = len;
3149 p = phys_page_find(page >> TARGET_PAGE_BITS);
3150 if (!p) {
3151 pd = IO_MEM_UNASSIGNED;
3152 } else {
3153 pd = p->phys_offset;
3154 }
ths3b46e622007-09-17 08:09:54 +00003155
bellardd0ecd2a2006-04-23 17:14:48 +00003156 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00003157 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3158 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003159 /* do nothing */
3160 } else {
3161 unsigned long addr1;
3162 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3163 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003164 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003165 memcpy(ptr, buf, l);
3166 }
3167 len -= l;
3168 buf += l;
3169 addr += l;
3170 }
3171}
3172
aliguori6d16c2f2009-01-22 16:59:11 +00003173typedef struct {
3174 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003175 target_phys_addr_t addr;
3176 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003177} BounceBuffer;
3178
3179static BounceBuffer bounce;
3180
aliguoriba223c22009-01-22 16:59:16 +00003181typedef struct MapClient {
3182 void *opaque;
3183 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003184 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003185} MapClient;
3186
Blue Swirl72cf2d42009-09-12 07:36:22 +00003187static QLIST_HEAD(map_client_list, MapClient) map_client_list
3188 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003189
3190void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3191{
3192 MapClient *client = qemu_malloc(sizeof(*client));
3193
3194 client->opaque = opaque;
3195 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003196 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003197 return client;
3198}
3199
3200void cpu_unregister_map_client(void *_client)
3201{
3202 MapClient *client = (MapClient *)_client;
3203
Blue Swirl72cf2d42009-09-12 07:36:22 +00003204 QLIST_REMOVE(client, link);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003205 qemu_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003206}
3207
3208static void cpu_notify_map_clients(void)
3209{
3210 MapClient *client;
3211
Blue Swirl72cf2d42009-09-12 07:36:22 +00003212 while (!QLIST_EMPTY(&map_client_list)) {
3213 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003214 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003215 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003216 }
3217}
3218
aliguori6d16c2f2009-01-22 16:59:11 +00003219/* Map a physical memory region into a host virtual address.
3220 * May map a subset of the requested range, given by and returned in *plen.
3221 * May return NULL if resources needed to perform the mapping are exhausted.
3222 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003223 * Use cpu_register_map_client() to know when retrying the map operation is
3224 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003225 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003226void *cpu_physical_memory_map(target_phys_addr_t addr,
3227 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003228 int is_write)
3229{
Anthony Liguoric227f092009-10-01 16:12:16 -05003230 target_phys_addr_t len = *plen;
3231 target_phys_addr_t done = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003232 int l;
3233 uint8_t *ret = NULL;
3234 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003235 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003236 unsigned long pd;
3237 PhysPageDesc *p;
3238 unsigned long addr1;
3239
3240 while (len > 0) {
3241 page = addr & TARGET_PAGE_MASK;
3242 l = (page + TARGET_PAGE_SIZE) - addr;
3243 if (l > len)
3244 l = len;
3245 p = phys_page_find(page >> TARGET_PAGE_BITS);
3246 if (!p) {
3247 pd = IO_MEM_UNASSIGNED;
3248 } else {
3249 pd = p->phys_offset;
3250 }
3251
3252 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3253 if (done || bounce.buffer) {
3254 break;
3255 }
3256 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3257 bounce.addr = addr;
3258 bounce.len = l;
3259 if (!is_write) {
3260 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3261 }
3262 ptr = bounce.buffer;
3263 } else {
3264 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003265 ptr = qemu_get_ram_ptr(addr1);
aliguori6d16c2f2009-01-22 16:59:11 +00003266 }
3267 if (!done) {
3268 ret = ptr;
3269 } else if (ret + done != ptr) {
3270 break;
3271 }
3272
3273 len -= l;
3274 addr += l;
3275 done += l;
3276 }
3277 *plen = done;
3278 return ret;
3279}
3280
3281/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3282 * Will also mark the memory as dirty if is_write == 1. access_len gives
3283 * the amount of memory that was actually read or written by the caller.
3284 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003285void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3286 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003287{
3288 if (buffer != bounce.buffer) {
3289 if (is_write) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003290 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003291 while (access_len) {
3292 unsigned l;
3293 l = TARGET_PAGE_SIZE;
3294 if (l > access_len)
3295 l = access_len;
3296 if (!cpu_physical_memory_is_dirty(addr1)) {
3297 /* invalidate code */
3298 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3299 /* set dirty bit */
3300 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3301 (0xff & ~CODE_DIRTY_FLAG);
3302 }
3303 addr1 += l;
3304 access_len -= l;
3305 }
3306 }
3307 return;
3308 }
3309 if (is_write) {
3310 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3311 }
3312 qemu_free(bounce.buffer);
3313 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003314 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003315}
bellardd0ecd2a2006-04-23 17:14:48 +00003316
bellard8df1cd02005-01-28 22:37:22 +00003317/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003318uint32_t ldl_phys(target_phys_addr_t addr)
bellard8df1cd02005-01-28 22:37:22 +00003319{
3320 int io_index;
3321 uint8_t *ptr;
3322 uint32_t val;
3323 unsigned long pd;
3324 PhysPageDesc *p;
3325
3326 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3327 if (!p) {
3328 pd = IO_MEM_UNASSIGNED;
3329 } else {
3330 pd = p->phys_offset;
3331 }
ths3b46e622007-09-17 08:09:54 +00003332
ths5fafdf22007-09-16 21:08:06 +00003333 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003334 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00003335 /* I/O case */
3336 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003337 if (p)
3338 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003339 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3340 } else {
3341 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003342 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00003343 (addr & ~TARGET_PAGE_MASK);
3344 val = ldl_p(ptr);
3345 }
3346 return val;
3347}
3348
bellard84b7b8e2005-11-28 21:19:04 +00003349/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003350uint64_t ldq_phys(target_phys_addr_t addr)
bellard84b7b8e2005-11-28 21:19:04 +00003351{
3352 int io_index;
3353 uint8_t *ptr;
3354 uint64_t val;
3355 unsigned long pd;
3356 PhysPageDesc *p;
3357
3358 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3359 if (!p) {
3360 pd = IO_MEM_UNASSIGNED;
3361 } else {
3362 pd = p->phys_offset;
3363 }
ths3b46e622007-09-17 08:09:54 +00003364
bellard2a4188a2006-06-25 21:54:59 +00003365 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3366 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00003367 /* I/O case */
3368 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003369 if (p)
3370 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard84b7b8e2005-11-28 21:19:04 +00003371#ifdef TARGET_WORDS_BIGENDIAN
3372 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3373 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3374#else
3375 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3376 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3377#endif
3378 } else {
3379 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003380 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00003381 (addr & ~TARGET_PAGE_MASK);
3382 val = ldq_p(ptr);
3383 }
3384 return val;
3385}
3386
bellardaab33092005-10-30 20:48:42 +00003387/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003388uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003389{
3390 uint8_t val;
3391 cpu_physical_memory_read(addr, &val, 1);
3392 return val;
3393}
3394
3395/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003396uint32_t lduw_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003397{
3398 uint16_t val;
3399 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3400 return tswap16(val);
3401}
3402
bellard8df1cd02005-01-28 22:37:22 +00003403/* warning: addr must be aligned. The ram page is not masked as dirty
3404 and the code inside is not invalidated. It is useful if the dirty
3405 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003406void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003407{
3408 int io_index;
3409 uint8_t *ptr;
3410 unsigned long pd;
3411 PhysPageDesc *p;
3412
3413 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3414 if (!p) {
3415 pd = IO_MEM_UNASSIGNED;
3416 } else {
3417 pd = p->phys_offset;
3418 }
ths3b46e622007-09-17 08:09:54 +00003419
bellard3a7d9292005-08-21 09:26:42 +00003420 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003421 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003422 if (p)
3423 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003424 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3425 } else {
aliguori74576192008-10-06 14:02:03 +00003426 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003427 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003428 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003429
3430 if (unlikely(in_migration)) {
3431 if (!cpu_physical_memory_is_dirty(addr1)) {
3432 /* invalidate code */
3433 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3434 /* set dirty bit */
3435 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3436 (0xff & ~CODE_DIRTY_FLAG);
3437 }
3438 }
bellard8df1cd02005-01-28 22:37:22 +00003439 }
3440}
3441
Anthony Liguoric227f092009-10-01 16:12:16 -05003442void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003443{
3444 int io_index;
3445 uint8_t *ptr;
3446 unsigned long pd;
3447 PhysPageDesc *p;
3448
3449 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3450 if (!p) {
3451 pd = IO_MEM_UNASSIGNED;
3452 } else {
3453 pd = p->phys_offset;
3454 }
ths3b46e622007-09-17 08:09:54 +00003455
j_mayerbc98a7e2007-04-04 07:55:12 +00003456 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3457 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003458 if (p)
3459 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00003460#ifdef TARGET_WORDS_BIGENDIAN
3461 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3462 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3463#else
3464 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3465 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3466#endif
3467 } else {
pbrook5579c7f2009-04-11 14:47:08 +00003468 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00003469 (addr & ~TARGET_PAGE_MASK);
3470 stq_p(ptr, val);
3471 }
3472}
3473
bellard8df1cd02005-01-28 22:37:22 +00003474/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003475void stl_phys(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003476{
3477 int io_index;
3478 uint8_t *ptr;
3479 unsigned long pd;
3480 PhysPageDesc *p;
3481
3482 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3483 if (!p) {
3484 pd = IO_MEM_UNASSIGNED;
3485 } else {
3486 pd = p->phys_offset;
3487 }
ths3b46e622007-09-17 08:09:54 +00003488
bellard3a7d9292005-08-21 09:26:42 +00003489 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003490 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003491 if (p)
3492 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003493 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3494 } else {
3495 unsigned long addr1;
3496 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3497 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003498 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003499 stl_p(ptr, val);
bellard3a7d9292005-08-21 09:26:42 +00003500 if (!cpu_physical_memory_is_dirty(addr1)) {
3501 /* invalidate code */
3502 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3503 /* set dirty bit */
bellardf23db162005-08-21 19:12:28 +00003504 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3505 (0xff & ~CODE_DIRTY_FLAG);
bellard3a7d9292005-08-21 09:26:42 +00003506 }
bellard8df1cd02005-01-28 22:37:22 +00003507 }
3508}
3509
bellardaab33092005-10-30 20:48:42 +00003510/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003511void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003512{
3513 uint8_t v = val;
3514 cpu_physical_memory_write(addr, &v, 1);
3515}
3516
3517/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003518void stw_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003519{
3520 uint16_t v = tswap16(val);
3521 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3522}
3523
3524/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003525void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003526{
3527 val = tswap64(val);
3528 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3529}
3530
bellard13eb76e2004-01-24 15:23:36 +00003531#endif
3532
aliguori5e2972f2009-03-28 17:51:36 +00003533/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00003534int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003535 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003536{
3537 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003538 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003539 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003540
3541 while (len > 0) {
3542 page = addr & TARGET_PAGE_MASK;
3543 phys_addr = cpu_get_phys_page_debug(env, page);
3544 /* if no physical page mapped, return an error */
3545 if (phys_addr == -1)
3546 return -1;
3547 l = (page + TARGET_PAGE_SIZE) - addr;
3548 if (l > len)
3549 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003550 phys_addr += (addr & ~TARGET_PAGE_MASK);
3551#if !defined(CONFIG_USER_ONLY)
3552 if (is_write)
3553 cpu_physical_memory_write_rom(phys_addr, buf, l);
3554 else
3555#endif
3556 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00003557 len -= l;
3558 buf += l;
3559 addr += l;
3560 }
3561 return 0;
3562}
3563
pbrook2e70f6e2008-06-29 01:03:05 +00003564/* in deterministic execution mode, instructions doing device I/Os
3565 must be at the end of the TB */
3566void cpu_io_recompile(CPUState *env, void *retaddr)
3567{
3568 TranslationBlock *tb;
3569 uint32_t n, cflags;
3570 target_ulong pc, cs_base;
3571 uint64_t flags;
3572
3573 tb = tb_find_pc((unsigned long)retaddr);
3574 if (!tb) {
3575 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3576 retaddr);
3577 }
3578 n = env->icount_decr.u16.low + tb->icount;
3579 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3580 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00003581 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00003582 n = n - env->icount_decr.u16.low;
3583 /* Generate a new TB ending on the I/O insn. */
3584 n++;
3585 /* On MIPS and SH, delay slot instructions can only be restarted if
3586 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00003587 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00003588 branch. */
3589#if defined(TARGET_MIPS)
3590 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3591 env->active_tc.PC -= 4;
3592 env->icount_decr.u16.low++;
3593 env->hflags &= ~MIPS_HFLAG_BMASK;
3594 }
3595#elif defined(TARGET_SH4)
3596 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3597 && n > 1) {
3598 env->pc -= 2;
3599 env->icount_decr.u16.low++;
3600 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3601 }
3602#endif
3603 /* This should never happen. */
3604 if (n > CF_COUNT_MASK)
3605 cpu_abort(env, "TB too big during recompile");
3606
3607 cflags = n | CF_LAST_IO;
3608 pc = tb->pc;
3609 cs_base = tb->cs_base;
3610 flags = tb->flags;
3611 tb_phys_invalidate(tb, -1);
3612 /* FIXME: In theory this could raise an exception. In practice
3613 we have already translated the block once so it's probably ok. */
3614 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00003615 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00003616 the first in the TB) then we end up generating a whole new TB and
3617 repeating the fault, which is horribly inefficient.
3618 Better would be to execute just this insn uncached, or generate a
3619 second new TB. */
3620 cpu_resume_from_signal(env, NULL);
3621}
3622
bellarde3db7222005-01-26 22:00:47 +00003623void dump_exec_info(FILE *f,
3624 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3625{
3626 int i, target_code_size, max_target_code_size;
3627 int direct_jmp_count, direct_jmp2_count, cross_page;
3628 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00003629
bellarde3db7222005-01-26 22:00:47 +00003630 target_code_size = 0;
3631 max_target_code_size = 0;
3632 cross_page = 0;
3633 direct_jmp_count = 0;
3634 direct_jmp2_count = 0;
3635 for(i = 0; i < nb_tbs; i++) {
3636 tb = &tbs[i];
3637 target_code_size += tb->size;
3638 if (tb->size > max_target_code_size)
3639 max_target_code_size = tb->size;
3640 if (tb->page_addr[1] != -1)
3641 cross_page++;
3642 if (tb->tb_next_offset[0] != 0xffff) {
3643 direct_jmp_count++;
3644 if (tb->tb_next_offset[1] != 0xffff) {
3645 direct_jmp2_count++;
3646 }
3647 }
3648 }
3649 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00003650 cpu_fprintf(f, "Translation buffer state:\n");
bellard26a5f132008-05-28 12:30:31 +00003651 cpu_fprintf(f, "gen code size %ld/%ld\n",
3652 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3653 cpu_fprintf(f, "TB count %d/%d\n",
3654 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00003655 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00003656 nb_tbs ? target_code_size / nb_tbs : 0,
3657 max_target_code_size);
ths5fafdf22007-09-16 21:08:06 +00003658 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00003659 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3660 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00003661 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3662 cross_page,
bellarde3db7222005-01-26 22:00:47 +00003663 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3664 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00003665 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00003666 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3667 direct_jmp2_count,
3668 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00003669 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00003670 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3671 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3672 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00003673 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00003674}
3675
ths5fafdf22007-09-16 21:08:06 +00003676#if !defined(CONFIG_USER_ONLY)
bellard61382a52003-10-27 21:22:23 +00003677
3678#define MMUSUFFIX _cmmu
3679#define GETPC() NULL
3680#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00003681#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00003682
3683#define SHIFT 0
3684#include "softmmu_template.h"
3685
3686#define SHIFT 1
3687#include "softmmu_template.h"
3688
3689#define SHIFT 2
3690#include "softmmu_template.h"
3691
3692#define SHIFT 3
3693#include "softmmu_template.h"
3694
3695#undef env
3696
3697#endif