blob: 44c34e6422daf74e8ad9209c4cb346b49e486aab [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
bellard6180a182003-09-30 21:04:53 +000034#include "cpu.h"
35#include "exec-all.h"
aurel32ca10f862008-04-11 21:35:42 +000036#include "qemu-common.h"
bellardb67d9a52008-05-23 09:57:34 +000037#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000038#include "hw/hw.h"
aliguori74576192008-10-06 14:02:03 +000039#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000040#include "kvm.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
43#endif
bellard54936002003-05-13 00:25:15 +000044
bellardfd6ce8f2003-05-14 19:00:11 +000045//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000046//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000047//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000048//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000049
50/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000051//#define DEBUG_TB_CHECK
52//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000053
ths1196be32007-03-17 15:17:58 +000054//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000055//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000056
pbrook99773bd2006-04-16 15:14:59 +000057#if !defined(CONFIG_USER_ONLY)
58/* TB consistency checks only implemented for usermode emulation. */
59#undef DEBUG_TB_CHECK
60#endif
61
bellard9fa3e852004-01-04 18:06:42 +000062#define SMC_BITMAP_USE_THRESHOLD 10
63
bellard108c49b2005-07-24 12:55:09 +000064#if defined(TARGET_SPARC64)
65#define TARGET_PHYS_ADDR_SPACE_BITS 41
blueswir15dcb6b92007-05-19 12:58:30 +000066#elif defined(TARGET_SPARC)
67#define TARGET_PHYS_ADDR_SPACE_BITS 36
j_mayerbedb69e2007-04-05 20:08:21 +000068#elif defined(TARGET_ALPHA)
69#define TARGET_PHYS_ADDR_SPACE_BITS 42
70#define TARGET_VIRT_ADDR_SPACE_BITS 42
bellard108c49b2005-07-24 12:55:09 +000071#elif defined(TARGET_PPC64)
72#define TARGET_PHYS_ADDR_SPACE_BITS 42
Anthony Liguori4a1418e2009-08-10 17:07:24 -050073#elif defined(TARGET_X86_64)
aurel3200f82b82008-04-27 21:12:55 +000074#define TARGET_PHYS_ADDR_SPACE_BITS 42
Anthony Liguori4a1418e2009-08-10 17:07:24 -050075#elif defined(TARGET_I386)
aurel3200f82b82008-04-27 21:12:55 +000076#define TARGET_PHYS_ADDR_SPACE_BITS 36
bellard108c49b2005-07-24 12:55:09 +000077#else
bellard108c49b2005-07-24 12:55:09 +000078#define TARGET_PHYS_ADDR_SPACE_BITS 32
79#endif
80
blueswir1bdaf78e2008-10-04 07:24:27 +000081static TranslationBlock *tbs;
bellard26a5f132008-05-28 12:30:31 +000082int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000083TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000084static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000085/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050086spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000087
blueswir1141ac462008-07-26 15:05:57 +000088#if defined(__arm__) || defined(__sparc_v9__)
89/* The prologue must be reachable with a direct jump. ARM and Sparc64
90 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000091 section close to code segment. */
92#define code_gen_section \
93 __attribute__((__section__(".gen_code"))) \
94 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020095#elif defined(_WIN32)
96/* Maximum alignment for Win32 is 16. */
97#define code_gen_section \
98 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000099#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000107/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000108static unsigned long code_gen_buffer_max_size;
bellardfd6ce8f2003-05-14 19:00:11 +0000109uint8_t *code_gen_ptr;
110
pbrooke2eef172008-06-08 01:09:01 +0000111#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000112int phys_ram_fd;
bellard1ccde1c2004-02-06 19:46:14 +0000113uint8_t *phys_ram_dirty;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
116typedef struct RAMBlock {
117 uint8_t *host;
Anthony Liguoric227f092009-10-01 16:12:16 -0500118 ram_addr_t offset;
119 ram_addr_t length;
pbrook94a6b542009-04-11 17:15:54 +0000120 struct RAMBlock *next;
121} RAMBlock;
122
123static RAMBlock *ram_blocks;
124/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100125 then we can no longer assume contiguous ram offsets, and external uses
pbrook94a6b542009-04-11 17:15:54 +0000126 of this variable will break. */
Anthony Liguoric227f092009-10-01 16:12:16 -0500127ram_addr_t last_ram_offset;
pbrooke2eef172008-06-08 01:09:01 +0000128#endif
bellard9fa3e852004-01-04 18:06:42 +0000129
bellard6a00d602005-11-21 23:25:50 +0000130CPUState *first_cpu;
131/* current CPU in the current thread. It is only valid inside
132 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000133CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000134/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000135 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000136 2 = Adaptive rate instruction counting. */
137int use_icount = 0;
138/* Current instruction counter. While executing translated code this may
139 include some instructions that have not yet been executed. */
140int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000141
bellard54936002003-05-13 00:25:15 +0000142typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000143 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000144 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000145 /* in order to optimize self modifying code, we count the number
146 of lookups we do to a given page to use a bitmap */
147 unsigned int code_write_count;
148 uint8_t *code_bitmap;
149#if defined(CONFIG_USER_ONLY)
150 unsigned long flags;
151#endif
bellard54936002003-05-13 00:25:15 +0000152} PageDesc;
153
bellard92e873b2004-05-21 14:52:29 +0000154typedef struct PhysPageDesc {
pbrook0f459d12008-06-09 00:20:13 +0000155 /* offset in host memory of the page + io_index in the low bits */
Anthony Liguoric227f092009-10-01 16:12:16 -0500156 ram_addr_t phys_offset;
157 ram_addr_t region_offset;
bellard92e873b2004-05-21 14:52:29 +0000158} PhysPageDesc;
159
bellard54936002003-05-13 00:25:15 +0000160#define L2_BITS 10
j_mayerbedb69e2007-04-05 20:08:21 +0000161#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
162/* XXX: this is a temporary hack for alpha target.
163 * In the future, this is to be replaced by a multi-level table
164 * to actually be able to handle the complete 64 bits address space.
165 */
166#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
167#else
aurel3203875442008-04-22 20:45:18 +0000168#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
j_mayerbedb69e2007-04-05 20:08:21 +0000169#endif
bellard54936002003-05-13 00:25:15 +0000170
171#define L1_SIZE (1 << L1_BITS)
172#define L2_SIZE (1 << L2_BITS)
173
bellard83fb7ad2004-07-05 21:25:26 +0000174unsigned long qemu_real_host_page_size;
175unsigned long qemu_host_page_bits;
176unsigned long qemu_host_page_size;
177unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000178
bellard92e873b2004-05-21 14:52:29 +0000179/* XXX: for system emulation, it could just be an array */
bellard54936002003-05-13 00:25:15 +0000180static PageDesc *l1_map[L1_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +0000181static PhysPageDesc **l1_phys_map;
bellard54936002003-05-13 00:25:15 +0000182
pbrooke2eef172008-06-08 01:09:01 +0000183#if !defined(CONFIG_USER_ONLY)
184static void io_mem_init(void);
185
bellard33417e72003-08-10 21:47:01 +0000186/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000187CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
188CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000189void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000190static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000191static int io_mem_watch;
192#endif
bellard33417e72003-08-10 21:47:01 +0000193
bellard34865132003-10-05 14:28:56 +0000194/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200195#ifdef WIN32
196static const char *logfilename = "qemu.log";
197#else
blueswir1d9b630f2008-10-05 09:57:08 +0000198static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200199#endif
bellard34865132003-10-05 14:28:56 +0000200FILE *logfile;
201int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000202static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000203
bellarde3db7222005-01-26 22:00:47 +0000204/* statistics */
205static int tlb_flush_count;
206static int tb_flush_count;
207static int tb_phys_invalidate_count;
208
blueswir1db7b5422007-05-26 17:36:03 +0000209#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
Anthony Liguoric227f092009-10-01 16:12:16 -0500210typedef struct subpage_t {
211 target_phys_addr_t base;
Blue Swirld60efc62009-08-25 18:29:31 +0000212 CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
213 CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
blueswir13ee89922008-01-02 19:45:26 +0000214 void *opaque[TARGET_PAGE_SIZE][2][4];
Anthony Liguoric227f092009-10-01 16:12:16 -0500215 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
216} subpage_t;
blueswir1db7b5422007-05-26 17:36:03 +0000217
bellard7cb69ca2008-05-10 10:55:51 +0000218#ifdef _WIN32
219static void map_exec(void *addr, long size)
220{
221 DWORD old_protect;
222 VirtualProtect(addr, size,
223 PAGE_EXECUTE_READWRITE, &old_protect);
224
225}
226#else
227static void map_exec(void *addr, long size)
228{
bellard43694152008-05-29 09:35:57 +0000229 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000230
bellard43694152008-05-29 09:35:57 +0000231 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000232 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000233 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000234
235 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000236 end += page_size - 1;
237 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000238
239 mprotect((void *)start, end - start,
240 PROT_READ | PROT_WRITE | PROT_EXEC);
241}
242#endif
243
bellardb346ff42003-06-15 20:05:50 +0000244static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000245{
bellard83fb7ad2004-07-05 21:25:26 +0000246 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000247 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000248#ifdef _WIN32
249 {
250 SYSTEM_INFO system_info;
251
252 GetSystemInfo(&system_info);
253 qemu_real_host_page_size = system_info.dwPageSize;
254 }
255#else
256 qemu_real_host_page_size = getpagesize();
257#endif
bellard83fb7ad2004-07-05 21:25:26 +0000258 if (qemu_host_page_size == 0)
259 qemu_host_page_size = qemu_real_host_page_size;
260 if (qemu_host_page_size < TARGET_PAGE_SIZE)
261 qemu_host_page_size = TARGET_PAGE_SIZE;
262 qemu_host_page_bits = 0;
263 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
264 qemu_host_page_bits++;
265 qemu_host_page_mask = ~(qemu_host_page_size - 1);
bellard108c49b2005-07-24 12:55:09 +0000266 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
267 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
balrog50a95692007-12-12 01:16:23 +0000268
269#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
270 {
271 long long startaddr, endaddr;
272 FILE *f;
273 int n;
274
pbrookc8a706f2008-06-02 16:16:42 +0000275 mmap_lock();
pbrook07765902008-05-31 16:33:53 +0000276 last_brk = (unsigned long)sbrk(0);
balrog50a95692007-12-12 01:16:23 +0000277 f = fopen("/proc/self/maps", "r");
278 if (f) {
279 do {
280 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
281 if (n == 2) {
blueswir1e0b8d652008-05-03 17:51:24 +0000282 startaddr = MIN(startaddr,
283 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
284 endaddr = MIN(endaddr,
285 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
pbrookb5fc9092008-05-29 13:56:10 +0000286 page_set_flags(startaddr & TARGET_PAGE_MASK,
balrog50a95692007-12-12 01:16:23 +0000287 TARGET_PAGE_ALIGN(endaddr),
288 PAGE_RESERVED);
289 }
290 } while (!feof(f));
291 fclose(f);
292 }
pbrookc8a706f2008-06-02 16:16:42 +0000293 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000294 }
295#endif
bellard54936002003-05-13 00:25:15 +0000296}
297
aliguori434929b2008-09-15 15:56:30 +0000298static inline PageDesc **page_l1_map(target_ulong index)
bellard54936002003-05-13 00:25:15 +0000299{
pbrook17e23772008-06-09 13:47:45 +0000300#if TARGET_LONG_BITS > 32
301 /* Host memory outside guest VM. For 32-bit targets we have already
302 excluded high addresses. */
thsd8173e02008-08-29 13:10:00 +0000303 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
pbrook17e23772008-06-09 13:47:45 +0000304 return NULL;
305#endif
aliguori434929b2008-09-15 15:56:30 +0000306 return &l1_map[index >> L2_BITS];
307}
308
309static inline PageDesc *page_find_alloc(target_ulong index)
310{
311 PageDesc **lp, *p;
312 lp = page_l1_map(index);
313 if (!lp)
314 return NULL;
315
bellard54936002003-05-13 00:25:15 +0000316 p = *lp;
317 if (!p) {
318 /* allocate if not found */
pbrook17e23772008-06-09 13:47:45 +0000319#if defined(CONFIG_USER_ONLY)
pbrook17e23772008-06-09 13:47:45 +0000320 size_t len = sizeof(PageDesc) * L2_SIZE;
321 /* Don't use qemu_malloc because it may recurse. */
Blue Swirl660f11b2009-07-31 21:16:51 +0000322 p = mmap(NULL, len, PROT_READ | PROT_WRITE,
pbrook17e23772008-06-09 13:47:45 +0000323 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
bellard54936002003-05-13 00:25:15 +0000324 *lp = p;
aurel32fb1c2cd2008-12-08 18:12:26 +0000325 if (h2g_valid(p)) {
326 unsigned long addr = h2g(p);
pbrook17e23772008-06-09 13:47:45 +0000327 page_set_flags(addr & TARGET_PAGE_MASK,
328 TARGET_PAGE_ALIGN(addr + len),
329 PAGE_RESERVED);
330 }
331#else
332 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
333 *lp = p;
334#endif
bellard54936002003-05-13 00:25:15 +0000335 }
336 return p + (index & (L2_SIZE - 1));
337}
338
aurel3200f82b82008-04-27 21:12:55 +0000339static inline PageDesc *page_find(target_ulong index)
bellard54936002003-05-13 00:25:15 +0000340{
aliguori434929b2008-09-15 15:56:30 +0000341 PageDesc **lp, *p;
342 lp = page_l1_map(index);
343 if (!lp)
344 return NULL;
bellard54936002003-05-13 00:25:15 +0000345
aliguori434929b2008-09-15 15:56:30 +0000346 p = *lp;
Blue Swirl660f11b2009-07-31 21:16:51 +0000347 if (!p) {
348 return NULL;
349 }
bellardfd6ce8f2003-05-14 19:00:11 +0000350 return p + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000351}
352
Anthony Liguoric227f092009-10-01 16:12:16 -0500353static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000354{
bellard108c49b2005-07-24 12:55:09 +0000355 void **lp, **p;
pbrooke3f4e2a2006-04-08 20:02:06 +0000356 PhysPageDesc *pd;
bellard92e873b2004-05-21 14:52:29 +0000357
bellard108c49b2005-07-24 12:55:09 +0000358 p = (void **)l1_phys_map;
359#if TARGET_PHYS_ADDR_SPACE_BITS > 32
360
361#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
362#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
363#endif
364 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000365 p = *lp;
366 if (!p) {
367 /* allocate if not found */
bellard108c49b2005-07-24 12:55:09 +0000368 if (!alloc)
369 return NULL;
370 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
371 memset(p, 0, sizeof(void *) * L1_SIZE);
372 *lp = p;
373 }
374#endif
375 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
pbrooke3f4e2a2006-04-08 20:02:06 +0000376 pd = *lp;
377 if (!pd) {
378 int i;
bellard108c49b2005-07-24 12:55:09 +0000379 /* allocate if not found */
380 if (!alloc)
381 return NULL;
pbrooke3f4e2a2006-04-08 20:02:06 +0000382 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
383 *lp = pd;
pbrook67c4d232009-02-23 13:16:07 +0000384 for (i = 0; i < L2_SIZE; i++) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000385 pd[i].phys_offset = IO_MEM_UNASSIGNED;
pbrook67c4d232009-02-23 13:16:07 +0000386 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
387 }
bellard92e873b2004-05-21 14:52:29 +0000388 }
pbrooke3f4e2a2006-04-08 20:02:06 +0000389 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000390}
391
Anthony Liguoric227f092009-10-01 16:12:16 -0500392static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000393{
bellard108c49b2005-07-24 12:55:09 +0000394 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000395}
396
bellard9fa3e852004-01-04 18:06:42 +0000397#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500398static void tlb_protect_code(ram_addr_t ram_addr);
399static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000400 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000401#define mmap_lock() do { } while(0)
402#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000403#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000404
bellard43694152008-05-29 09:35:57 +0000405#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
406
407#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100408/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000409 user mode. It will change when a dedicated libc will be used */
410#define USE_STATIC_CODE_GEN_BUFFER
411#endif
412
413#ifdef USE_STATIC_CODE_GEN_BUFFER
414static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
415#endif
416
blueswir18fcd3692008-08-17 20:26:25 +0000417static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000418{
bellard43694152008-05-29 09:35:57 +0000419#ifdef USE_STATIC_CODE_GEN_BUFFER
420 code_gen_buffer = static_code_gen_buffer;
421 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
422 map_exec(code_gen_buffer, code_gen_buffer_size);
423#else
bellard26a5f132008-05-28 12:30:31 +0000424 code_gen_buffer_size = tb_size;
425 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000426#if defined(CONFIG_USER_ONLY)
427 /* in user mode, phys_ram_size is not meaningful */
428 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
429#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100430 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000431 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000432#endif
bellard26a5f132008-05-28 12:30:31 +0000433 }
434 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
435 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
436 /* The code gen buffer location may have constraints depending on
437 the host cpu and OS */
438#if defined(__linux__)
439 {
440 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000441 void *start = NULL;
442
bellard26a5f132008-05-28 12:30:31 +0000443 flags = MAP_PRIVATE | MAP_ANONYMOUS;
444#if defined(__x86_64__)
445 flags |= MAP_32BIT;
446 /* Cannot map more than that */
447 if (code_gen_buffer_size > (800 * 1024 * 1024))
448 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000449#elif defined(__sparc_v9__)
450 // Map the buffer below 2G, so we can use direct calls and branches
451 flags |= MAP_FIXED;
452 start = (void *) 0x60000000UL;
453 if (code_gen_buffer_size > (512 * 1024 * 1024))
454 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000455#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000456 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000457 flags |= MAP_FIXED;
458 start = (void *) 0x01000000UL;
459 if (code_gen_buffer_size > 16 * 1024 * 1024)
460 code_gen_buffer_size = 16 * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000461#endif
blueswir1141ac462008-07-26 15:05:57 +0000462 code_gen_buffer = mmap(start, code_gen_buffer_size,
463 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000464 flags, -1, 0);
465 if (code_gen_buffer == MAP_FAILED) {
466 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
467 exit(1);
468 }
469 }
Aurelien Jarnoa167ba52009-11-29 18:00:41 +0100470#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
aliguori06e67a82008-09-27 15:32:41 +0000471 {
472 int flags;
473 void *addr = NULL;
474 flags = MAP_PRIVATE | MAP_ANONYMOUS;
475#if defined(__x86_64__)
476 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
477 * 0x40000000 is free */
478 flags |= MAP_FIXED;
479 addr = (void *)0x40000000;
480 /* Cannot map more than that */
481 if (code_gen_buffer_size > (800 * 1024 * 1024))
482 code_gen_buffer_size = (800 * 1024 * 1024);
483#endif
484 code_gen_buffer = mmap(addr, code_gen_buffer_size,
485 PROT_WRITE | PROT_READ | PROT_EXEC,
486 flags, -1, 0);
487 if (code_gen_buffer == MAP_FAILED) {
488 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
489 exit(1);
490 }
491 }
bellard26a5f132008-05-28 12:30:31 +0000492#else
493 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000494 map_exec(code_gen_buffer, code_gen_buffer_size);
495#endif
bellard43694152008-05-29 09:35:57 +0000496#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000497 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
498 code_gen_buffer_max_size = code_gen_buffer_size -
499 code_gen_max_block_size();
500 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
501 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
502}
503
504/* Must be called before using the QEMU cpus. 'tb_size' is the size
505 (in bytes) allocated to the translation buffer. Zero means default
506 size. */
507void cpu_exec_init_all(unsigned long tb_size)
508{
bellard26a5f132008-05-28 12:30:31 +0000509 cpu_gen_init();
510 code_gen_alloc(tb_size);
511 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000512 page_init();
pbrooke2eef172008-06-08 01:09:01 +0000513#if !defined(CONFIG_USER_ONLY)
bellard26a5f132008-05-28 12:30:31 +0000514 io_mem_init();
pbrooke2eef172008-06-08 01:09:01 +0000515#endif
bellard26a5f132008-05-28 12:30:31 +0000516}
517
pbrook9656f322008-07-01 20:01:19 +0000518#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
519
Juan Quintelad4bfa4d2009-09-29 22:48:22 +0200520static void cpu_common_pre_save(void *opaque)
pbrook9656f322008-07-01 20:01:19 +0000521{
Juan Quintelad4bfa4d2009-09-29 22:48:22 +0200522 CPUState *env = opaque;
pbrook9656f322008-07-01 20:01:19 +0000523
Avi Kivity4c0960c2009-08-17 23:19:53 +0300524 cpu_synchronize_state(env);
pbrook9656f322008-07-01 20:01:19 +0000525}
526
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200527static int cpu_common_pre_load(void *opaque)
pbrook9656f322008-07-01 20:01:19 +0000528{
529 CPUState *env = opaque;
530
Avi Kivity4c0960c2009-08-17 23:19:53 +0300531 cpu_synchronize_state(env);
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200532 return 0;
533}
pbrook9656f322008-07-01 20:01:19 +0000534
Juan Quintelae59fb372009-09-29 22:48:21 +0200535static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200536{
537 CPUState *env = opaque;
538
aurel323098dba2009-03-07 21:28:24 +0000539 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
540 version_id is increased. */
541 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000542 tlb_flush(env, 1);
543
544 return 0;
545}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200546
547static const VMStateDescription vmstate_cpu_common = {
548 .name = "cpu_common",
549 .version_id = 1,
550 .minimum_version_id = 1,
551 .minimum_version_id_old = 1,
552 .pre_save = cpu_common_pre_save,
553 .pre_load = cpu_common_pre_load,
554 .post_load = cpu_common_post_load,
555 .fields = (VMStateField []) {
556 VMSTATE_UINT32(halted, CPUState),
557 VMSTATE_UINT32(interrupt_request, CPUState),
558 VMSTATE_END_OF_LIST()
559 }
560};
pbrook9656f322008-07-01 20:01:19 +0000561#endif
562
Glauber Costa950f1472009-06-09 12:15:18 -0400563CPUState *qemu_get_cpu(int cpu)
564{
565 CPUState *env = first_cpu;
566
567 while (env) {
568 if (env->cpu_index == cpu)
569 break;
570 env = env->next_cpu;
571 }
572
573 return env;
574}
575
bellard6a00d602005-11-21 23:25:50 +0000576void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000577{
bellard6a00d602005-11-21 23:25:50 +0000578 CPUState **penv;
579 int cpu_index;
580
pbrookc2764712009-03-07 15:24:59 +0000581#if defined(CONFIG_USER_ONLY)
582 cpu_list_lock();
583#endif
bellard6a00d602005-11-21 23:25:50 +0000584 env->next_cpu = NULL;
585 penv = &first_cpu;
586 cpu_index = 0;
587 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700588 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000589 cpu_index++;
590 }
591 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000592 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000593 QTAILQ_INIT(&env->breakpoints);
594 QTAILQ_INIT(&env->watchpoints);
bellard6a00d602005-11-21 23:25:50 +0000595 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000596#if defined(CONFIG_USER_ONLY)
597 cpu_list_unlock();
598#endif
pbrookb3c77242008-06-30 16:31:04 +0000599#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200600 vmstate_register(cpu_index, &vmstate_cpu_common, env);
pbrookb3c77242008-06-30 16:31:04 +0000601 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
602 cpu_save, cpu_load, env);
603#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000604}
605
bellard9fa3e852004-01-04 18:06:42 +0000606static inline void invalidate_page_bitmap(PageDesc *p)
607{
608 if (p->code_bitmap) {
bellard59817cc2004-02-16 22:01:13 +0000609 qemu_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000610 p->code_bitmap = NULL;
611 }
612 p->code_write_count = 0;
613}
614
bellardfd6ce8f2003-05-14 19:00:11 +0000615/* set to NULL all the 'first_tb' fields in all PageDescs */
616static void page_flush_tb(void)
617{
618 int i, j;
619 PageDesc *p;
620
621 for(i = 0; i < L1_SIZE; i++) {
622 p = l1_map[i];
623 if (p) {
bellard9fa3e852004-01-04 18:06:42 +0000624 for(j = 0; j < L2_SIZE; j++) {
625 p->first_tb = NULL;
626 invalidate_page_bitmap(p);
627 p++;
628 }
bellardfd6ce8f2003-05-14 19:00:11 +0000629 }
630 }
631}
632
633/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000634/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000635void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000636{
bellard6a00d602005-11-21 23:25:50 +0000637 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000638#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000639 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
640 (unsigned long)(code_gen_ptr - code_gen_buffer),
641 nb_tbs, nb_tbs > 0 ?
642 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000643#endif
bellard26a5f132008-05-28 12:30:31 +0000644 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000645 cpu_abort(env1, "Internal error: code buffer overflow\n");
646
bellardfd6ce8f2003-05-14 19:00:11 +0000647 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000648
bellard6a00d602005-11-21 23:25:50 +0000649 for(env = first_cpu; env != NULL; env = env->next_cpu) {
650 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
651 }
bellard9fa3e852004-01-04 18:06:42 +0000652
bellard8a8a6082004-10-03 13:36:49 +0000653 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000654 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000655
bellardfd6ce8f2003-05-14 19:00:11 +0000656 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000657 /* XXX: flush processor icache at this point if cache flush is
658 expensive */
bellarde3db7222005-01-26 22:00:47 +0000659 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000660}
661
662#ifdef DEBUG_TB_CHECK
663
j_mayerbc98a7e2007-04-04 07:55:12 +0000664static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000665{
666 TranslationBlock *tb;
667 int i;
668 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000669 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
670 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000671 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
672 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000673 printf("ERROR invalidate: address=" TARGET_FMT_lx
674 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000675 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000676 }
677 }
678 }
679}
680
681/* verify that all the pages have correct rights for code */
682static void tb_page_check(void)
683{
684 TranslationBlock *tb;
685 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000686
pbrook99773bd2006-04-16 15:14:59 +0000687 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
688 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000689 flags1 = page_get_flags(tb->pc);
690 flags2 = page_get_flags(tb->pc + tb->size - 1);
691 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
692 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000693 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000694 }
695 }
696 }
697}
698
699#endif
700
701/* invalidate one TB */
702static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
703 int next_offset)
704{
705 TranslationBlock *tb1;
706 for(;;) {
707 tb1 = *ptb;
708 if (tb1 == tb) {
709 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
710 break;
711 }
712 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
713 }
714}
715
bellard9fa3e852004-01-04 18:06:42 +0000716static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
717{
718 TranslationBlock *tb1;
719 unsigned int n1;
720
721 for(;;) {
722 tb1 = *ptb;
723 n1 = (long)tb1 & 3;
724 tb1 = (TranslationBlock *)((long)tb1 & ~3);
725 if (tb1 == tb) {
726 *ptb = tb1->page_next[n1];
727 break;
728 }
729 ptb = &tb1->page_next[n1];
730 }
731}
732
bellardd4e81642003-05-25 16:46:15 +0000733static inline void tb_jmp_remove(TranslationBlock *tb, int n)
734{
735 TranslationBlock *tb1, **ptb;
736 unsigned int n1;
737
738 ptb = &tb->jmp_next[n];
739 tb1 = *ptb;
740 if (tb1) {
741 /* find tb(n) in circular list */
742 for(;;) {
743 tb1 = *ptb;
744 n1 = (long)tb1 & 3;
745 tb1 = (TranslationBlock *)((long)tb1 & ~3);
746 if (n1 == n && tb1 == tb)
747 break;
748 if (n1 == 2) {
749 ptb = &tb1->jmp_first;
750 } else {
751 ptb = &tb1->jmp_next[n1];
752 }
753 }
754 /* now we can suppress tb(n) from the list */
755 *ptb = tb->jmp_next[n];
756
757 tb->jmp_next[n] = NULL;
758 }
759}
760
761/* reset the jump entry 'n' of a TB so that it is not chained to
762 another TB */
763static inline void tb_reset_jump(TranslationBlock *tb, int n)
764{
765 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
766}
767
pbrook2e70f6e2008-06-29 01:03:05 +0000768void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000769{
bellard6a00d602005-11-21 23:25:50 +0000770 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000771 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000772 unsigned int h, n1;
Anthony Liguoric227f092009-10-01 16:12:16 -0500773 target_phys_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000774 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000775
bellard9fa3e852004-01-04 18:06:42 +0000776 /* remove the TB from the hash list */
777 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
778 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000779 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000780 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000781
bellard9fa3e852004-01-04 18:06:42 +0000782 /* remove the TB from the page list */
783 if (tb->page_addr[0] != page_addr) {
784 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
785 tb_page_remove(&p->first_tb, tb);
786 invalidate_page_bitmap(p);
787 }
788 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
789 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
790 tb_page_remove(&p->first_tb, tb);
791 invalidate_page_bitmap(p);
792 }
793
bellard8a40a182005-11-20 10:35:40 +0000794 tb_invalidated_flag = 1;
795
796 /* remove the TB from the hash list */
797 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000798 for(env = first_cpu; env != NULL; env = env->next_cpu) {
799 if (env->tb_jmp_cache[h] == tb)
800 env->tb_jmp_cache[h] = NULL;
801 }
bellard8a40a182005-11-20 10:35:40 +0000802
803 /* suppress this TB from the two jump lists */
804 tb_jmp_remove(tb, 0);
805 tb_jmp_remove(tb, 1);
806
807 /* suppress any remaining jumps to this TB */
808 tb1 = tb->jmp_first;
809 for(;;) {
810 n1 = (long)tb1 & 3;
811 if (n1 == 2)
812 break;
813 tb1 = (TranslationBlock *)((long)tb1 & ~3);
814 tb2 = tb1->jmp_next[n1];
815 tb_reset_jump(tb1, n1);
816 tb1->jmp_next[n1] = NULL;
817 tb1 = tb2;
818 }
819 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
820
bellarde3db7222005-01-26 22:00:47 +0000821 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000822}
823
824static inline void set_bits(uint8_t *tab, int start, int len)
825{
826 int end, mask, end1;
827
828 end = start + len;
829 tab += start >> 3;
830 mask = 0xff << (start & 7);
831 if ((start & ~7) == (end & ~7)) {
832 if (start < end) {
833 mask &= ~(0xff << (end & 7));
834 *tab |= mask;
835 }
836 } else {
837 *tab++ |= mask;
838 start = (start + 8) & ~7;
839 end1 = end & ~7;
840 while (start < end1) {
841 *tab++ = 0xff;
842 start += 8;
843 }
844 if (start < end) {
845 mask = ~(0xff << (end & 7));
846 *tab |= mask;
847 }
848 }
849}
850
851static void build_page_bitmap(PageDesc *p)
852{
853 int n, tb_start, tb_end;
854 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000855
pbrookb2a70812008-06-09 13:57:23 +0000856 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000857
858 tb = p->first_tb;
859 while (tb != NULL) {
860 n = (long)tb & 3;
861 tb = (TranslationBlock *)((long)tb & ~3);
862 /* NOTE: this is subtle as a TB may span two physical pages */
863 if (n == 0) {
864 /* NOTE: tb_end may be after the end of the page, but
865 it is not a problem */
866 tb_start = tb->pc & ~TARGET_PAGE_MASK;
867 tb_end = tb_start + tb->size;
868 if (tb_end > TARGET_PAGE_SIZE)
869 tb_end = TARGET_PAGE_SIZE;
870 } else {
871 tb_start = 0;
872 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
873 }
874 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
875 tb = tb->page_next[n];
876 }
877}
878
pbrook2e70f6e2008-06-29 01:03:05 +0000879TranslationBlock *tb_gen_code(CPUState *env,
880 target_ulong pc, target_ulong cs_base,
881 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000882{
883 TranslationBlock *tb;
884 uint8_t *tc_ptr;
885 target_ulong phys_pc, phys_page2, virt_page2;
886 int code_gen_size;
887
bellardc27004e2005-01-03 23:35:10 +0000888 phys_pc = get_phys_addr_code(env, pc);
889 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000890 if (!tb) {
891 /* flush must be done */
892 tb_flush(env);
893 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000894 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000895 /* Don't forget to invalidate previous TB info. */
896 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000897 }
898 tc_ptr = code_gen_ptr;
899 tb->tc_ptr = tc_ptr;
900 tb->cs_base = cs_base;
901 tb->flags = flags;
902 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +0000903 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +0000904 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +0000905
bellardd720b932004-04-25 17:57:43 +0000906 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +0000907 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +0000908 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +0000909 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
bellardd720b932004-04-25 17:57:43 +0000910 phys_page2 = get_phys_addr_code(env, virt_page2);
911 }
912 tb_link_phys(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +0000913 return tb;
bellardd720b932004-04-25 17:57:43 +0000914}
ths3b46e622007-09-17 08:09:54 +0000915
bellard9fa3e852004-01-04 18:06:42 +0000916/* invalidate all TBs which intersect with the target physical page
917 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +0000918 the same physical page. 'is_cpu_write_access' should be true if called
919 from a real cpu write access: the virtual CPU will exit the current
920 TB if code is modified inside this TB. */
Anthony Liguoric227f092009-10-01 16:12:16 -0500921void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
bellardd720b932004-04-25 17:57:43 +0000922 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +0000923{
aliguori6b917542008-11-18 19:46:41 +0000924 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +0000925 CPUState *env = cpu_single_env;
bellard9fa3e852004-01-04 18:06:42 +0000926 target_ulong tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +0000927 PageDesc *p;
928 int n;
929#ifdef TARGET_HAS_PRECISE_SMC
930 int current_tb_not_found = is_cpu_write_access;
931 TranslationBlock *current_tb = NULL;
932 int current_tb_modified = 0;
933 target_ulong current_pc = 0;
934 target_ulong current_cs_base = 0;
935 int current_flags = 0;
936#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +0000937
938 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +0000939 if (!p)
bellard9fa3e852004-01-04 18:06:42 +0000940 return;
ths5fafdf22007-09-16 21:08:06 +0000941 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +0000942 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
943 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +0000944 /* build code bitmap */
945 build_page_bitmap(p);
946 }
947
948 /* we remove all the TBs in the range [start, end[ */
949 /* XXX: see if in some cases it could be faster to invalidate all the code */
950 tb = p->first_tb;
951 while (tb != NULL) {
952 n = (long)tb & 3;
953 tb = (TranslationBlock *)((long)tb & ~3);
954 tb_next = tb->page_next[n];
955 /* NOTE: this is subtle as a TB may span two physical pages */
956 if (n == 0) {
957 /* NOTE: tb_end may be after the end of the page, but
958 it is not a problem */
959 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
960 tb_end = tb_start + tb->size;
961 } else {
962 tb_start = tb->page_addr[1];
963 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
964 }
965 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +0000966#ifdef TARGET_HAS_PRECISE_SMC
967 if (current_tb_not_found) {
968 current_tb_not_found = 0;
969 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +0000970 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +0000971 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +0000972 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +0000973 }
974 }
975 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +0000976 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +0000977 /* If we are modifying the current TB, we must stop
978 its execution. We could be more precise by checking
979 that the modification is after the current PC, but it
980 would require a specialized function to partially
981 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +0000982
bellardd720b932004-04-25 17:57:43 +0000983 current_tb_modified = 1;
ths5fafdf22007-09-16 21:08:06 +0000984 cpu_restore_state(current_tb, env,
pbrook2e70f6e2008-06-29 01:03:05 +0000985 env->mem_io_pc, NULL);
aliguori6b917542008-11-18 19:46:41 +0000986 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
987 &current_flags);
bellardd720b932004-04-25 17:57:43 +0000988 }
989#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +0000990 /* we need to do that to handle the case where a signal
991 occurs while doing tb_phys_invalidate() */
992 saved_tb = NULL;
993 if (env) {
994 saved_tb = env->current_tb;
995 env->current_tb = NULL;
996 }
bellard9fa3e852004-01-04 18:06:42 +0000997 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +0000998 if (env) {
999 env->current_tb = saved_tb;
1000 if (env->interrupt_request && env->current_tb)
1001 cpu_interrupt(env, env->interrupt_request);
1002 }
bellard9fa3e852004-01-04 18:06:42 +00001003 }
1004 tb = tb_next;
1005 }
1006#if !defined(CONFIG_USER_ONLY)
1007 /* if no code remaining, no need to continue to use slow writes */
1008 if (!p->first_tb) {
1009 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001010 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001011 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001012 }
1013 }
1014#endif
1015#ifdef TARGET_HAS_PRECISE_SMC
1016 if (current_tb_modified) {
1017 /* we generate a block containing just the instruction
1018 modifying the memory. It will ensure that it cannot modify
1019 itself */
bellardea1c1802004-06-14 18:56:36 +00001020 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001021 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001022 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001023 }
1024#endif
1025}
1026
1027/* len must be <= 8 and start must be a multiple of len */
Anthony Liguoric227f092009-10-01 16:12:16 -05001028static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001029{
1030 PageDesc *p;
1031 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001032#if 0
bellarda4193c82004-06-03 14:01:43 +00001033 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001034 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1035 cpu_single_env->mem_io_vaddr, len,
1036 cpu_single_env->eip,
1037 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001038 }
1039#endif
bellard9fa3e852004-01-04 18:06:42 +00001040 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001041 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001042 return;
1043 if (p->code_bitmap) {
1044 offset = start & ~TARGET_PAGE_MASK;
1045 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1046 if (b & ((1 << len) - 1))
1047 goto do_invalidate;
1048 } else {
1049 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001050 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001051 }
1052}
1053
bellard9fa3e852004-01-04 18:06:42 +00001054#if !defined(CONFIG_SOFTMMU)
Anthony Liguoric227f092009-10-01 16:12:16 -05001055static void tb_invalidate_phys_page(target_phys_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001056 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001057{
aliguori6b917542008-11-18 19:46:41 +00001058 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001059 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001060 int n;
bellardd720b932004-04-25 17:57:43 +00001061#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001062 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001063 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001064 int current_tb_modified = 0;
1065 target_ulong current_pc = 0;
1066 target_ulong current_cs_base = 0;
1067 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001068#endif
bellard9fa3e852004-01-04 18:06:42 +00001069
1070 addr &= TARGET_PAGE_MASK;
1071 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001072 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001073 return;
1074 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001075#ifdef TARGET_HAS_PRECISE_SMC
1076 if (tb && pc != 0) {
1077 current_tb = tb_find_pc(pc);
1078 }
1079#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001080 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001081 n = (long)tb & 3;
1082 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001083#ifdef TARGET_HAS_PRECISE_SMC
1084 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001085 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001086 /* If we are modifying the current TB, we must stop
1087 its execution. We could be more precise by checking
1088 that the modification is after the current PC, but it
1089 would require a specialized function to partially
1090 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001091
bellardd720b932004-04-25 17:57:43 +00001092 current_tb_modified = 1;
1093 cpu_restore_state(current_tb, env, pc, puc);
aliguori6b917542008-11-18 19:46:41 +00001094 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1095 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001096 }
1097#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001098 tb_phys_invalidate(tb, addr);
1099 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001100 }
1101 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001102#ifdef TARGET_HAS_PRECISE_SMC
1103 if (current_tb_modified) {
1104 /* we generate a block containing just the instruction
1105 modifying the memory. It will ensure that it cannot modify
1106 itself */
bellardea1c1802004-06-14 18:56:36 +00001107 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001108 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001109 cpu_resume_from_signal(env, puc);
1110 }
1111#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001112}
bellard9fa3e852004-01-04 18:06:42 +00001113#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001114
1115/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001116static inline void tb_alloc_page(TranslationBlock *tb,
pbrook53a59602006-03-25 19:31:22 +00001117 unsigned int n, target_ulong page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001118{
1119 PageDesc *p;
bellard9fa3e852004-01-04 18:06:42 +00001120 TranslationBlock *last_first_tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001121
bellard9fa3e852004-01-04 18:06:42 +00001122 tb->page_addr[n] = page_addr;
bellard3a7d9292005-08-21 09:26:42 +00001123 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00001124 tb->page_next[n] = p->first_tb;
1125 last_first_tb = p->first_tb;
1126 p->first_tb = (TranslationBlock *)((long)tb | n);
1127 invalidate_page_bitmap(p);
1128
bellard107db442004-06-22 18:48:46 +00001129#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001130
bellard9fa3e852004-01-04 18:06:42 +00001131#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001132 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001133 target_ulong addr;
1134 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001135 int prot;
1136
bellardfd6ce8f2003-05-14 19:00:11 +00001137 /* force the host page as non writable (writes will have a
1138 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001139 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001140 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001141 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1142 addr += TARGET_PAGE_SIZE) {
1143
1144 p2 = page_find (addr >> TARGET_PAGE_BITS);
1145 if (!p2)
1146 continue;
1147 prot |= p2->flags;
1148 p2->flags &= ~PAGE_WRITE;
1149 page_get_flags(addr);
1150 }
ths5fafdf22007-09-16 21:08:06 +00001151 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001152 (prot & PAGE_BITS) & ~PAGE_WRITE);
1153#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001154 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001155 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001156#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001157 }
bellard9fa3e852004-01-04 18:06:42 +00001158#else
1159 /* if some code is already present, then the pages are already
1160 protected. So we handle the case where only the first TB is
1161 allocated in a physical page */
1162 if (!last_first_tb) {
bellard6a00d602005-11-21 23:25:50 +00001163 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001164 }
1165#endif
bellardd720b932004-04-25 17:57:43 +00001166
1167#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001168}
1169
1170/* Allocate a new translation block. Flush the translation buffer if
1171 too many translation blocks or too much generated code. */
bellardc27004e2005-01-03 23:35:10 +00001172TranslationBlock *tb_alloc(target_ulong pc)
bellardfd6ce8f2003-05-14 19:00:11 +00001173{
1174 TranslationBlock *tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001175
bellard26a5f132008-05-28 12:30:31 +00001176 if (nb_tbs >= code_gen_max_blocks ||
1177 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
bellardd4e81642003-05-25 16:46:15 +00001178 return NULL;
bellardfd6ce8f2003-05-14 19:00:11 +00001179 tb = &tbs[nb_tbs++];
1180 tb->pc = pc;
bellardb448f2f2004-02-25 23:24:04 +00001181 tb->cflags = 0;
bellardd4e81642003-05-25 16:46:15 +00001182 return tb;
1183}
1184
pbrook2e70f6e2008-06-29 01:03:05 +00001185void tb_free(TranslationBlock *tb)
1186{
thsbf20dc02008-06-30 17:22:19 +00001187 /* In practice this is mostly used for single use temporary TB
pbrook2e70f6e2008-06-29 01:03:05 +00001188 Ignore the hard cases and just back up if this TB happens to
1189 be the last one generated. */
1190 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1191 code_gen_ptr = tb->tc_ptr;
1192 nb_tbs--;
1193 }
1194}
1195
bellard9fa3e852004-01-04 18:06:42 +00001196/* add a new TB and link it to the physical page tables. phys_page2 is
1197 (-1) to indicate that only one page contains the TB. */
ths5fafdf22007-09-16 21:08:06 +00001198void tb_link_phys(TranslationBlock *tb,
bellard9fa3e852004-01-04 18:06:42 +00001199 target_ulong phys_pc, target_ulong phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001200{
bellard9fa3e852004-01-04 18:06:42 +00001201 unsigned int h;
1202 TranslationBlock **ptb;
1203
pbrookc8a706f2008-06-02 16:16:42 +00001204 /* Grab the mmap lock to stop another thread invalidating this TB
1205 before we are done. */
1206 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001207 /* add in the physical hash table */
1208 h = tb_phys_hash_func(phys_pc);
1209 ptb = &tb_phys_hash[h];
1210 tb->phys_hash_next = *ptb;
1211 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001212
1213 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001214 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1215 if (phys_page2 != -1)
1216 tb_alloc_page(tb, 1, phys_page2);
1217 else
1218 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001219
bellardd4e81642003-05-25 16:46:15 +00001220 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1221 tb->jmp_next[0] = NULL;
1222 tb->jmp_next[1] = NULL;
1223
1224 /* init original jump addresses */
1225 if (tb->tb_next_offset[0] != 0xffff)
1226 tb_reset_jump(tb, 0);
1227 if (tb->tb_next_offset[1] != 0xffff)
1228 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001229
1230#ifdef DEBUG_TB_CHECK
1231 tb_page_check();
1232#endif
pbrookc8a706f2008-06-02 16:16:42 +00001233 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001234}
1235
bellarda513fe12003-05-27 23:29:48 +00001236/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1237 tb[1].tc_ptr. Return NULL if not found */
1238TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1239{
1240 int m_min, m_max, m;
1241 unsigned long v;
1242 TranslationBlock *tb;
1243
1244 if (nb_tbs <= 0)
1245 return NULL;
1246 if (tc_ptr < (unsigned long)code_gen_buffer ||
1247 tc_ptr >= (unsigned long)code_gen_ptr)
1248 return NULL;
1249 /* binary search (cf Knuth) */
1250 m_min = 0;
1251 m_max = nb_tbs - 1;
1252 while (m_min <= m_max) {
1253 m = (m_min + m_max) >> 1;
1254 tb = &tbs[m];
1255 v = (unsigned long)tb->tc_ptr;
1256 if (v == tc_ptr)
1257 return tb;
1258 else if (tc_ptr < v) {
1259 m_max = m - 1;
1260 } else {
1261 m_min = m + 1;
1262 }
ths5fafdf22007-09-16 21:08:06 +00001263 }
bellarda513fe12003-05-27 23:29:48 +00001264 return &tbs[m_max];
1265}
bellard75012672003-06-21 13:11:07 +00001266
bellardea041c02003-06-25 16:16:50 +00001267static void tb_reset_jump_recursive(TranslationBlock *tb);
1268
1269static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1270{
1271 TranslationBlock *tb1, *tb_next, **ptb;
1272 unsigned int n1;
1273
1274 tb1 = tb->jmp_next[n];
1275 if (tb1 != NULL) {
1276 /* find head of list */
1277 for(;;) {
1278 n1 = (long)tb1 & 3;
1279 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1280 if (n1 == 2)
1281 break;
1282 tb1 = tb1->jmp_next[n1];
1283 }
1284 /* we are now sure now that tb jumps to tb1 */
1285 tb_next = tb1;
1286
1287 /* remove tb from the jmp_first list */
1288 ptb = &tb_next->jmp_first;
1289 for(;;) {
1290 tb1 = *ptb;
1291 n1 = (long)tb1 & 3;
1292 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1293 if (n1 == n && tb1 == tb)
1294 break;
1295 ptb = &tb1->jmp_next[n1];
1296 }
1297 *ptb = tb->jmp_next[n];
1298 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001299
bellardea041c02003-06-25 16:16:50 +00001300 /* suppress the jump to next tb in generated code */
1301 tb_reset_jump(tb, n);
1302
bellard01243112004-01-04 15:48:17 +00001303 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001304 tb_reset_jump_recursive(tb_next);
1305 }
1306}
1307
1308static void tb_reset_jump_recursive(TranslationBlock *tb)
1309{
1310 tb_reset_jump_recursive2(tb, 0);
1311 tb_reset_jump_recursive2(tb, 1);
1312}
1313
bellard1fddef42005-04-17 19:16:13 +00001314#if defined(TARGET_HAS_ICE)
bellardd720b932004-04-25 17:57:43 +00001315static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1316{
Anthony Liguoric227f092009-10-01 16:12:16 -05001317 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001318 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001319 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001320 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001321
pbrookc2f07f82006-04-08 17:14:56 +00001322 addr = cpu_get_phys_page_debug(env, pc);
1323 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1324 if (!p) {
1325 pd = IO_MEM_UNASSIGNED;
1326 } else {
1327 pd = p->phys_offset;
1328 }
1329 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001330 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001331}
bellardc27004e2005-01-03 23:35:10 +00001332#endif
bellardd720b932004-04-25 17:57:43 +00001333
pbrook6658ffb2007-03-16 23:58:11 +00001334/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001335int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1336 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001337{
aliguorib4051332008-11-18 20:14:20 +00001338 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001339 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001340
aliguorib4051332008-11-18 20:14:20 +00001341 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1342 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1343 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1344 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1345 return -EINVAL;
1346 }
aliguoria1d1bb32008-11-18 20:07:32 +00001347 wp = qemu_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001348
aliguoria1d1bb32008-11-18 20:07:32 +00001349 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001350 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001351 wp->flags = flags;
1352
aliguori2dc9f412008-11-18 20:56:59 +00001353 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001354 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001355 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001356 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001357 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001358
pbrook6658ffb2007-03-16 23:58:11 +00001359 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001360
1361 if (watchpoint)
1362 *watchpoint = wp;
1363 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001364}
1365
aliguoria1d1bb32008-11-18 20:07:32 +00001366/* Remove a specific watchpoint. */
1367int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1368 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001369{
aliguorib4051332008-11-18 20:14:20 +00001370 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001371 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001372
Blue Swirl72cf2d42009-09-12 07:36:22 +00001373 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001374 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001375 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001376 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001377 return 0;
1378 }
1379 }
aliguoria1d1bb32008-11-18 20:07:32 +00001380 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001381}
1382
aliguoria1d1bb32008-11-18 20:07:32 +00001383/* Remove a specific watchpoint by reference. */
1384void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1385{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001386 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001387
aliguoria1d1bb32008-11-18 20:07:32 +00001388 tlb_flush_page(env, watchpoint->vaddr);
1389
1390 qemu_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001391}
1392
aliguoria1d1bb32008-11-18 20:07:32 +00001393/* Remove all matching watchpoints. */
1394void cpu_watchpoint_remove_all(CPUState *env, int mask)
1395{
aliguoric0ce9982008-11-25 22:13:57 +00001396 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001397
Blue Swirl72cf2d42009-09-12 07:36:22 +00001398 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001399 if (wp->flags & mask)
1400 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001401 }
aliguoria1d1bb32008-11-18 20:07:32 +00001402}
1403
1404/* Add a breakpoint. */
1405int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1406 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001407{
bellard1fddef42005-04-17 19:16:13 +00001408#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001409 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001410
aliguoria1d1bb32008-11-18 20:07:32 +00001411 bp = qemu_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001412
1413 bp->pc = pc;
1414 bp->flags = flags;
1415
aliguori2dc9f412008-11-18 20:56:59 +00001416 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001417 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001418 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001419 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001420 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001421
1422 breakpoint_invalidate(env, pc);
1423
1424 if (breakpoint)
1425 *breakpoint = bp;
1426 return 0;
1427#else
1428 return -ENOSYS;
1429#endif
1430}
1431
1432/* Remove a specific breakpoint. */
1433int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1434{
1435#if defined(TARGET_HAS_ICE)
1436 CPUBreakpoint *bp;
1437
Blue Swirl72cf2d42009-09-12 07:36:22 +00001438 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001439 if (bp->pc == pc && bp->flags == flags) {
1440 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001441 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001442 }
bellard4c3a88a2003-07-26 12:06:08 +00001443 }
aliguoria1d1bb32008-11-18 20:07:32 +00001444 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001445#else
aliguoria1d1bb32008-11-18 20:07:32 +00001446 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001447#endif
1448}
1449
aliguoria1d1bb32008-11-18 20:07:32 +00001450/* Remove a specific breakpoint by reference. */
1451void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001452{
bellard1fddef42005-04-17 19:16:13 +00001453#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001454 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001455
aliguoria1d1bb32008-11-18 20:07:32 +00001456 breakpoint_invalidate(env, breakpoint->pc);
1457
1458 qemu_free(breakpoint);
1459#endif
1460}
1461
1462/* Remove all matching breakpoints. */
1463void cpu_breakpoint_remove_all(CPUState *env, int mask)
1464{
1465#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001466 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001467
Blue Swirl72cf2d42009-09-12 07:36:22 +00001468 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001469 if (bp->flags & mask)
1470 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001471 }
bellard4c3a88a2003-07-26 12:06:08 +00001472#endif
1473}
1474
bellardc33a3462003-07-29 20:50:33 +00001475/* enable or disable single step mode. EXCP_DEBUG is returned by the
1476 CPU loop after each instruction */
1477void cpu_single_step(CPUState *env, int enabled)
1478{
bellard1fddef42005-04-17 19:16:13 +00001479#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001480 if (env->singlestep_enabled != enabled) {
1481 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001482 if (kvm_enabled())
1483 kvm_update_guest_debug(env, 0);
1484 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001485 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001486 /* XXX: only flush what is necessary */
1487 tb_flush(env);
1488 }
bellardc33a3462003-07-29 20:50:33 +00001489 }
1490#endif
1491}
1492
bellard34865132003-10-05 14:28:56 +00001493/* enable or disable low levels log */
1494void cpu_set_log(int log_flags)
1495{
1496 loglevel = log_flags;
1497 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001498 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001499 if (!logfile) {
1500 perror(logfilename);
1501 _exit(1);
1502 }
bellard9fa3e852004-01-04 18:06:42 +00001503#if !defined(CONFIG_SOFTMMU)
1504 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1505 {
blueswir1b55266b2008-09-20 08:07:15 +00001506 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001507 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1508 }
Filip Navarabf65f532009-07-27 10:02:04 -05001509#elif !defined(_WIN32)
1510 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001511 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001512#endif
pbrooke735b912007-06-30 13:53:24 +00001513 log_append = 1;
1514 }
1515 if (!loglevel && logfile) {
1516 fclose(logfile);
1517 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001518 }
1519}
1520
1521void cpu_set_log_filename(const char *filename)
1522{
1523 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001524 if (logfile) {
1525 fclose(logfile);
1526 logfile = NULL;
1527 }
1528 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001529}
bellardc33a3462003-07-29 20:50:33 +00001530
aurel323098dba2009-03-07 21:28:24 +00001531static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001532{
Juan Quintela2f7bb872009-07-27 16:13:24 +02001533#if defined(CONFIG_USE_NPTL)
pbrookd5975362008-06-07 20:50:51 +00001534 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1535 problem and hope the cpu will stop of its own accord. For userspace
1536 emulation this often isn't actually as bad as it sounds. Often
1537 signals are used primarily to interrupt blocking syscalls. */
1538#else
aurel323098dba2009-03-07 21:28:24 +00001539 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001540 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001541
1542 tb = env->current_tb;
1543 /* if the cpu is currently executing code, we must unlink it and
1544 all the potentially executing TB */
1545 if (tb && !testandset(&interrupt_lock)) {
1546 env->current_tb = NULL;
1547 tb_reset_jump_recursive(tb);
1548 resetlock(&interrupt_lock);
1549 }
1550#endif
1551}
1552
1553/* mask must never be zero, except for A20 change call */
1554void cpu_interrupt(CPUState *env, int mask)
1555{
1556 int old_mask;
1557
1558 old_mask = env->interrupt_request;
1559 env->interrupt_request |= mask;
1560
aliguori8edac962009-04-24 18:03:45 +00001561#ifndef CONFIG_USER_ONLY
1562 /*
1563 * If called from iothread context, wake the target cpu in
1564 * case its halted.
1565 */
1566 if (!qemu_cpu_self(env)) {
1567 qemu_cpu_kick(env);
1568 return;
1569 }
1570#endif
1571
pbrook2e70f6e2008-06-29 01:03:05 +00001572 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001573 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001574#ifndef CONFIG_USER_ONLY
pbrook2e70f6e2008-06-29 01:03:05 +00001575 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001576 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001577 cpu_abort(env, "Raised interrupt while not in I/O function");
1578 }
1579#endif
1580 } else {
aurel323098dba2009-03-07 21:28:24 +00001581 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001582 }
1583}
1584
bellardb54ad042004-05-20 13:42:52 +00001585void cpu_reset_interrupt(CPUState *env, int mask)
1586{
1587 env->interrupt_request &= ~mask;
1588}
1589
aurel323098dba2009-03-07 21:28:24 +00001590void cpu_exit(CPUState *env)
1591{
1592 env->exit_request = 1;
1593 cpu_unlink_tb(env);
1594}
1595
blueswir1c7cd6a32008-10-02 18:27:46 +00001596const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001597 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001598 "show generated host assembly code for each compiled TB" },
1599 { CPU_LOG_TB_IN_ASM, "in_asm",
1600 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001601 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001602 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001603 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001604 "show micro ops "
1605#ifdef TARGET_I386
1606 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001607#endif
blueswir1e01a1152008-03-14 17:37:11 +00001608 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001609 { CPU_LOG_INT, "int",
1610 "show interrupts/exceptions in short format" },
1611 { CPU_LOG_EXEC, "exec",
1612 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001613 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001614 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001615#ifdef TARGET_I386
1616 { CPU_LOG_PCALL, "pcall",
1617 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001618 { CPU_LOG_RESET, "cpu_reset",
1619 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001620#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001621#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001622 { CPU_LOG_IOPORT, "ioport",
1623 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001624#endif
bellardf193c792004-03-21 17:06:25 +00001625 { 0, NULL, NULL },
1626};
1627
1628static int cmp1(const char *s1, int n, const char *s2)
1629{
1630 if (strlen(s2) != n)
1631 return 0;
1632 return memcmp(s1, s2, n) == 0;
1633}
ths3b46e622007-09-17 08:09:54 +00001634
bellardf193c792004-03-21 17:06:25 +00001635/* takes a comma separated list of log masks. Return 0 if error. */
1636int cpu_str_to_log_mask(const char *str)
1637{
blueswir1c7cd6a32008-10-02 18:27:46 +00001638 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001639 int mask;
1640 const char *p, *p1;
1641
1642 p = str;
1643 mask = 0;
1644 for(;;) {
1645 p1 = strchr(p, ',');
1646 if (!p1)
1647 p1 = p + strlen(p);
bellard8e3a9fd2004-10-09 17:32:58 +00001648 if(cmp1(p,p1-p,"all")) {
1649 for(item = cpu_log_items; item->mask != 0; item++) {
1650 mask |= item->mask;
1651 }
1652 } else {
bellardf193c792004-03-21 17:06:25 +00001653 for(item = cpu_log_items; item->mask != 0; item++) {
1654 if (cmp1(p, p1 - p, item->name))
1655 goto found;
1656 }
1657 return 0;
bellard8e3a9fd2004-10-09 17:32:58 +00001658 }
bellardf193c792004-03-21 17:06:25 +00001659 found:
1660 mask |= item->mask;
1661 if (*p1 != ',')
1662 break;
1663 p = p1 + 1;
1664 }
1665 return mask;
1666}
bellardea041c02003-06-25 16:16:50 +00001667
bellard75012672003-06-21 13:11:07 +00001668void cpu_abort(CPUState *env, const char *fmt, ...)
1669{
1670 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001671 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001672
1673 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001674 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001675 fprintf(stderr, "qemu: fatal: ");
1676 vfprintf(stderr, fmt, ap);
1677 fprintf(stderr, "\n");
1678#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001679 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1680#else
1681 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001682#endif
aliguori93fcfe32009-01-15 22:34:14 +00001683 if (qemu_log_enabled()) {
1684 qemu_log("qemu: fatal: ");
1685 qemu_log_vprintf(fmt, ap2);
1686 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001687#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001688 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001689#else
aliguori93fcfe32009-01-15 22:34:14 +00001690 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001691#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001692 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001693 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001694 }
pbrook493ae1f2007-11-23 16:53:59 +00001695 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001696 va_end(ap);
bellard75012672003-06-21 13:11:07 +00001697 abort();
1698}
1699
thsc5be9f02007-02-28 20:20:53 +00001700CPUState *cpu_copy(CPUState *env)
1701{
ths01ba9812007-12-09 02:22:57 +00001702 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001703 CPUState *next_cpu = new_env->next_cpu;
1704 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001705#if defined(TARGET_HAS_ICE)
1706 CPUBreakpoint *bp;
1707 CPUWatchpoint *wp;
1708#endif
1709
thsc5be9f02007-02-28 20:20:53 +00001710 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001711
1712 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001713 new_env->next_cpu = next_cpu;
1714 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001715
1716 /* Clone all break/watchpoints.
1717 Note: Once we support ptrace with hw-debug register access, make sure
1718 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001719 QTAILQ_INIT(&env->breakpoints);
1720 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001721#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001722 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001723 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1724 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001725 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001726 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1727 wp->flags, NULL);
1728 }
1729#endif
1730
thsc5be9f02007-02-28 20:20:53 +00001731 return new_env;
1732}
1733
bellard01243112004-01-04 15:48:17 +00001734#if !defined(CONFIG_USER_ONLY)
1735
edgar_igl5c751e92008-05-06 08:44:21 +00001736static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1737{
1738 unsigned int i;
1739
1740 /* Discard jump cache entries for any tb which might potentially
1741 overlap the flushed page. */
1742 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1743 memset (&env->tb_jmp_cache[i], 0,
1744 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1745
1746 i = tb_jmp_cache_hash_page(addr);
1747 memset (&env->tb_jmp_cache[i], 0,
1748 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1749}
1750
Igor Kovalenko08738982009-07-12 02:15:40 +04001751static CPUTLBEntry s_cputlb_empty_entry = {
1752 .addr_read = -1,
1753 .addr_write = -1,
1754 .addr_code = -1,
1755 .addend = -1,
1756};
1757
bellardee8b7022004-02-03 23:35:10 +00001758/* NOTE: if flush_global is true, also flush global entries (not
1759 implemented yet) */
1760void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001761{
bellard33417e72003-08-10 21:47:01 +00001762 int i;
bellard01243112004-01-04 15:48:17 +00001763
bellard9fa3e852004-01-04 18:06:42 +00001764#if defined(DEBUG_TLB)
1765 printf("tlb_flush:\n");
1766#endif
bellard01243112004-01-04 15:48:17 +00001767 /* must reset current TB so that interrupts cannot modify the
1768 links while we are modifying them */
1769 env->current_tb = NULL;
1770
bellard33417e72003-08-10 21:47:01 +00001771 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001772 int mmu_idx;
1773 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001774 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001775 }
bellard33417e72003-08-10 21:47:01 +00001776 }
bellard9fa3e852004-01-04 18:06:42 +00001777
bellard8a40a182005-11-20 10:35:40 +00001778 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001779
bellarde3db7222005-01-26 22:00:47 +00001780 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001781}
1782
bellard274da6b2004-05-20 21:56:27 +00001783static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001784{
ths5fafdf22007-09-16 21:08:06 +00001785 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001786 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001787 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001788 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001789 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001790 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001791 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001792 }
bellard61382a52003-10-27 21:22:23 +00001793}
1794
bellard2e126692004-04-25 21:28:44 +00001795void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001796{
bellard8a40a182005-11-20 10:35:40 +00001797 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001798 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001799
bellard9fa3e852004-01-04 18:06:42 +00001800#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001801 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001802#endif
bellard01243112004-01-04 15:48:17 +00001803 /* must reset current TB so that interrupts cannot modify the
1804 links while we are modifying them */
1805 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001806
bellard61382a52003-10-27 21:22:23 +00001807 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001808 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001809 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1810 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001811
edgar_igl5c751e92008-05-06 08:44:21 +00001812 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001813}
1814
bellard9fa3e852004-01-04 18:06:42 +00001815/* update the TLBs so that writes to code in the virtual page 'addr'
1816 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001817static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001818{
ths5fafdf22007-09-16 21:08:06 +00001819 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001820 ram_addr + TARGET_PAGE_SIZE,
1821 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001822}
1823
bellard9fa3e852004-01-04 18:06:42 +00001824/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001825 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001826static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001827 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001828{
bellard3a7d9292005-08-21 09:26:42 +00001829 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
bellard1ccde1c2004-02-06 19:46:14 +00001830}
1831
ths5fafdf22007-09-16 21:08:06 +00001832static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001833 unsigned long start, unsigned long length)
1834{
1835 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00001836 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1837 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001838 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001839 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001840 }
1841 }
1842}
1843
pbrook5579c7f2009-04-11 14:47:08 +00001844/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001845void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001846 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001847{
1848 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001849 unsigned long length, start1;
bellard0a962c02005-02-10 22:00:27 +00001850 int i, mask, len;
1851 uint8_t *p;
bellard1ccde1c2004-02-06 19:46:14 +00001852
1853 start &= TARGET_PAGE_MASK;
1854 end = TARGET_PAGE_ALIGN(end);
1855
1856 length = end - start;
1857 if (length == 0)
1858 return;
bellard0a962c02005-02-10 22:00:27 +00001859 len = length >> TARGET_PAGE_BITS;
bellardf23db162005-08-21 19:12:28 +00001860 mask = ~dirty_flags;
1861 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1862 for(i = 0; i < len; i++)
1863 p[i] &= mask;
1864
bellard1ccde1c2004-02-06 19:46:14 +00001865 /* we modify the TLB cache so that the dirty bit will be set again
1866 when accessing the range */
pbrook5579c7f2009-04-11 14:47:08 +00001867 start1 = (unsigned long)qemu_get_ram_ptr(start);
1868 /* Chek that we don't span multiple blocks - this breaks the
1869 address comparisons below. */
1870 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1871 != (end - 1) - start) {
1872 abort();
1873 }
1874
bellard6a00d602005-11-21 23:25:50 +00001875 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001876 int mmu_idx;
1877 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1878 for(i = 0; i < CPU_TLB_SIZE; i++)
1879 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1880 start1, length);
1881 }
bellard6a00d602005-11-21 23:25:50 +00001882 }
bellard1ccde1c2004-02-06 19:46:14 +00001883}
1884
aliguori74576192008-10-06 14:02:03 +00001885int cpu_physical_memory_set_dirty_tracking(int enable)
1886{
1887 in_migration = enable;
Jan Kiszkab0a46a32009-05-02 00:22:51 +02001888 if (kvm_enabled()) {
1889 return kvm_set_migration_log(enable);
1890 }
aliguori74576192008-10-06 14:02:03 +00001891 return 0;
1892}
1893
1894int cpu_physical_memory_get_dirty_tracking(void)
1895{
1896 return in_migration;
1897}
1898
Anthony Liguoric227f092009-10-01 16:12:16 -05001899int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1900 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00001901{
Jan Kiszka151f7742009-05-01 20:52:47 +02001902 int ret = 0;
1903
aliguori2bec46d2008-11-24 20:21:41 +00001904 if (kvm_enabled())
Jan Kiszka151f7742009-05-01 20:52:47 +02001905 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1906 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00001907}
1908
bellard3a7d9292005-08-21 09:26:42 +00001909static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1910{
Anthony Liguoric227f092009-10-01 16:12:16 -05001911 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001912 void *p;
bellard3a7d9292005-08-21 09:26:42 +00001913
bellard84b7b8e2005-11-28 21:19:04 +00001914 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00001915 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1916 + tlb_entry->addend);
1917 ram_addr = qemu_ram_addr_from_host(p);
bellard3a7d9292005-08-21 09:26:42 +00001918 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00001919 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00001920 }
1921 }
1922}
1923
1924/* update the TLB according to the current state of the dirty bits */
1925void cpu_tlb_update_dirty(CPUState *env)
1926{
1927 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001928 int mmu_idx;
1929 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1930 for(i = 0; i < CPU_TLB_SIZE; i++)
1931 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1932 }
bellard3a7d9292005-08-21 09:26:42 +00001933}
1934
pbrook0f459d12008-06-09 00:20:13 +00001935static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00001936{
pbrook0f459d12008-06-09 00:20:13 +00001937 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1938 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00001939}
1940
pbrook0f459d12008-06-09 00:20:13 +00001941/* update the TLB corresponding to virtual page vaddr
1942 so that it is no longer dirty */
1943static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00001944{
bellard1ccde1c2004-02-06 19:46:14 +00001945 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001946 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00001947
pbrook0f459d12008-06-09 00:20:13 +00001948 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00001949 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001950 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1951 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00001952}
1953
bellard59817cc2004-02-16 22:01:13 +00001954/* add a new TLB entry. At most one entry for a given virtual address
1955 is permitted. Return 0 if OK or 2 if the page could not be mapped
1956 (can only happen in non SOFTMMU mode for I/O pages or pages
1957 conflicting with the host address space). */
ths5fafdf22007-09-16 21:08:06 +00001958int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
Anthony Liguoric227f092009-10-01 16:12:16 -05001959 target_phys_addr_t paddr, int prot,
j_mayer6ebbf392007-10-14 07:07:08 +00001960 int mmu_idx, int is_softmmu)
bellard9fa3e852004-01-04 18:06:42 +00001961{
bellard92e873b2004-05-21 14:52:29 +00001962 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00001963 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00001964 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00001965 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00001966 target_ulong code_address;
Anthony Liguoric227f092009-10-01 16:12:16 -05001967 target_phys_addr_t addend;
bellard9fa3e852004-01-04 18:06:42 +00001968 int ret;
bellard84b7b8e2005-11-28 21:19:04 +00001969 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00001970 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05001971 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00001972
bellard92e873b2004-05-21 14:52:29 +00001973 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00001974 if (!p) {
1975 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00001976 } else {
1977 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00001978 }
1979#if defined(DEBUG_TLB)
j_mayer6ebbf392007-10-14 07:07:08 +00001980 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1981 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
bellard9fa3e852004-01-04 18:06:42 +00001982#endif
1983
1984 ret = 0;
pbrook0f459d12008-06-09 00:20:13 +00001985 address = vaddr;
1986 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1987 /* IO memory case (romd handled later) */
1988 address |= TLB_MMIO;
1989 }
pbrook5579c7f2009-04-11 14:47:08 +00001990 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00001991 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1992 /* Normal RAM. */
1993 iotlb = pd & TARGET_PAGE_MASK;
1994 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1995 iotlb |= IO_MEM_NOTDIRTY;
1996 else
1997 iotlb |= IO_MEM_ROM;
1998 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001999 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002000 It would be nice to pass an offset from the base address
2001 of that region. This would avoid having to special case RAM,
2002 and avoid full address decoding in every device.
2003 We can't use the high bits of pd for this because
2004 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002005 iotlb = (pd & ~TARGET_PAGE_MASK);
2006 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002007 iotlb += p->region_offset;
2008 } else {
2009 iotlb += paddr;
2010 }
pbrook0f459d12008-06-09 00:20:13 +00002011 }
pbrook6658ffb2007-03-16 23:58:11 +00002012
pbrook0f459d12008-06-09 00:20:13 +00002013 code_address = address;
2014 /* Make accesses to pages with watchpoints go via the
2015 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002016 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002017 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
pbrook0f459d12008-06-09 00:20:13 +00002018 iotlb = io_mem_watch + paddr;
2019 /* TODO: The memory case can be optimized by not trapping
2020 reads of pages with a write breakpoint. */
2021 address |= TLB_MMIO;
pbrook6658ffb2007-03-16 23:58:11 +00002022 }
pbrook0f459d12008-06-09 00:20:13 +00002023 }
balrogd79acba2007-06-26 20:01:13 +00002024
pbrook0f459d12008-06-09 00:20:13 +00002025 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2026 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2027 te = &env->tlb_table[mmu_idx][index];
2028 te->addend = addend - vaddr;
2029 if (prot & PAGE_READ) {
2030 te->addr_read = address;
2031 } else {
2032 te->addr_read = -1;
2033 }
edgar_igl5c751e92008-05-06 08:44:21 +00002034
pbrook0f459d12008-06-09 00:20:13 +00002035 if (prot & PAGE_EXEC) {
2036 te->addr_code = code_address;
2037 } else {
2038 te->addr_code = -1;
2039 }
2040 if (prot & PAGE_WRITE) {
2041 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2042 (pd & IO_MEM_ROMD)) {
2043 /* Write access calls the I/O callback. */
2044 te->addr_write = address | TLB_MMIO;
2045 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2046 !cpu_physical_memory_is_dirty(pd)) {
2047 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002048 } else {
pbrook0f459d12008-06-09 00:20:13 +00002049 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002050 }
pbrook0f459d12008-06-09 00:20:13 +00002051 } else {
2052 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002053 }
bellard9fa3e852004-01-04 18:06:42 +00002054 return ret;
2055}
2056
bellard01243112004-01-04 15:48:17 +00002057#else
2058
bellardee8b7022004-02-03 23:35:10 +00002059void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002060{
2061}
2062
bellard2e126692004-04-25 21:28:44 +00002063void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002064{
2065}
2066
ths5fafdf22007-09-16 21:08:06 +00002067int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002068 target_phys_addr_t paddr, int prot,
j_mayer6ebbf392007-10-14 07:07:08 +00002069 int mmu_idx, int is_softmmu)
bellard33417e72003-08-10 21:47:01 +00002070{
bellard9fa3e852004-01-04 18:06:42 +00002071 return 0;
2072}
bellard33417e72003-08-10 21:47:01 +00002073
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002074/*
2075 * Walks guest process memory "regions" one by one
2076 * and calls callback function 'fn' for each region.
2077 */
2078int walk_memory_regions(void *priv,
2079 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
bellard9fa3e852004-01-04 18:06:42 +00002080{
2081 unsigned long start, end;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002082 PageDesc *p = NULL;
bellard9fa3e852004-01-04 18:06:42 +00002083 int i, j, prot, prot1;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002084 int rc = 0;
bellard9fa3e852004-01-04 18:06:42 +00002085
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002086 start = end = -1;
bellard9fa3e852004-01-04 18:06:42 +00002087 prot = 0;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002088
2089 for (i = 0; i <= L1_SIZE; i++) {
2090 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2091 for (j = 0; j < L2_SIZE; j++) {
2092 prot1 = (p == NULL) ? 0 : p[j].flags;
2093 /*
2094 * "region" is one continuous chunk of memory
2095 * that has same protection flags set.
2096 */
bellard9fa3e852004-01-04 18:06:42 +00002097 if (prot1 != prot) {
2098 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2099 if (start != -1) {
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002100 rc = (*fn)(priv, start, end, prot);
2101 /* callback can stop iteration by returning != 0 */
2102 if (rc != 0)
2103 return (rc);
bellard9fa3e852004-01-04 18:06:42 +00002104 }
2105 if (prot1 != 0)
2106 start = end;
2107 else
2108 start = -1;
2109 prot = prot1;
2110 }
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002111 if (p == NULL)
bellard9fa3e852004-01-04 18:06:42 +00002112 break;
2113 }
bellard33417e72003-08-10 21:47:01 +00002114 }
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002115 return (rc);
2116}
2117
2118static int dump_region(void *priv, unsigned long start,
2119 unsigned long end, unsigned long prot)
2120{
2121 FILE *f = (FILE *)priv;
2122
2123 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2124 start, end, end - start,
2125 ((prot & PAGE_READ) ? 'r' : '-'),
2126 ((prot & PAGE_WRITE) ? 'w' : '-'),
2127 ((prot & PAGE_EXEC) ? 'x' : '-'));
2128
2129 return (0);
2130}
2131
2132/* dump memory mappings */
2133void page_dump(FILE *f)
2134{
2135 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2136 "start", "end", "size", "prot");
2137 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002138}
2139
pbrook53a59602006-03-25 19:31:22 +00002140int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002141{
bellard9fa3e852004-01-04 18:06:42 +00002142 PageDesc *p;
2143
2144 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002145 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002146 return 0;
2147 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002148}
2149
bellard9fa3e852004-01-04 18:06:42 +00002150/* modify the flags of a page and invalidate the code if
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002151 necessary. The flag PAGE_WRITE_ORG is positioned automatically
bellard9fa3e852004-01-04 18:06:42 +00002152 depending on PAGE_WRITE */
pbrook53a59602006-03-25 19:31:22 +00002153void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002154{
2155 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002156 target_ulong addr;
bellard9fa3e852004-01-04 18:06:42 +00002157
pbrookc8a706f2008-06-02 16:16:42 +00002158 /* mmap_lock should already be held. */
bellard9fa3e852004-01-04 18:06:42 +00002159 start = start & TARGET_PAGE_MASK;
2160 end = TARGET_PAGE_ALIGN(end);
2161 if (flags & PAGE_WRITE)
2162 flags |= PAGE_WRITE_ORG;
bellard9fa3e852004-01-04 18:06:42 +00002163 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2164 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
pbrook17e23772008-06-09 13:47:45 +00002165 /* We may be called for host regions that are outside guest
2166 address space. */
2167 if (!p)
2168 return;
bellard9fa3e852004-01-04 18:06:42 +00002169 /* if the write protection is set, then we invalidate the code
2170 inside */
ths5fafdf22007-09-16 21:08:06 +00002171 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002172 (flags & PAGE_WRITE) &&
2173 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002174 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002175 }
2176 p->flags = flags;
2177 }
bellard9fa3e852004-01-04 18:06:42 +00002178}
2179
ths3d97b402007-11-02 19:02:07 +00002180int page_check_range(target_ulong start, target_ulong len, int flags)
2181{
2182 PageDesc *p;
2183 target_ulong end;
2184 target_ulong addr;
2185
balrog55f280c2008-10-28 10:24:11 +00002186 if (start + len < start)
2187 /* we've wrapped around */
2188 return -1;
2189
ths3d97b402007-11-02 19:02:07 +00002190 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2191 start = start & TARGET_PAGE_MASK;
2192
ths3d97b402007-11-02 19:02:07 +00002193 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2194 p = page_find(addr >> TARGET_PAGE_BITS);
2195 if( !p )
2196 return -1;
2197 if( !(p->flags & PAGE_VALID) )
2198 return -1;
2199
bellarddae32702007-11-14 10:51:00 +00002200 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002201 return -1;
bellarddae32702007-11-14 10:51:00 +00002202 if (flags & PAGE_WRITE) {
2203 if (!(p->flags & PAGE_WRITE_ORG))
2204 return -1;
2205 /* unprotect the page if it was put read-only because it
2206 contains translated code */
2207 if (!(p->flags & PAGE_WRITE)) {
2208 if (!page_unprotect(addr, 0, NULL))
2209 return -1;
2210 }
2211 return 0;
2212 }
ths3d97b402007-11-02 19:02:07 +00002213 }
2214 return 0;
2215}
2216
bellard9fa3e852004-01-04 18:06:42 +00002217/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002218 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002219int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002220{
2221 unsigned int page_index, prot, pindex;
2222 PageDesc *p, *p1;
pbrook53a59602006-03-25 19:31:22 +00002223 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002224
pbrookc8a706f2008-06-02 16:16:42 +00002225 /* Technically this isn't safe inside a signal handler. However we
2226 know this only ever happens in a synchronous SEGV handler, so in
2227 practice it seems to be ok. */
2228 mmap_lock();
2229
bellard83fb7ad2004-07-05 21:25:26 +00002230 host_start = address & qemu_host_page_mask;
bellard9fa3e852004-01-04 18:06:42 +00002231 page_index = host_start >> TARGET_PAGE_BITS;
2232 p1 = page_find(page_index);
pbrookc8a706f2008-06-02 16:16:42 +00002233 if (!p1) {
2234 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002235 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002236 }
bellard83fb7ad2004-07-05 21:25:26 +00002237 host_end = host_start + qemu_host_page_size;
bellard9fa3e852004-01-04 18:06:42 +00002238 p = p1;
2239 prot = 0;
2240 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2241 prot |= p->flags;
2242 p++;
2243 }
2244 /* if the page was really writable, then we change its
2245 protection back to writable */
2246 if (prot & PAGE_WRITE_ORG) {
2247 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2248 if (!(p1[pindex].flags & PAGE_WRITE)) {
ths5fafdf22007-09-16 21:08:06 +00002249 mprotect((void *)g2h(host_start), qemu_host_page_size,
bellard9fa3e852004-01-04 18:06:42 +00002250 (prot & PAGE_BITS) | PAGE_WRITE);
2251 p1[pindex].flags |= PAGE_WRITE;
2252 /* and since the content will be modified, we must invalidate
2253 the corresponding translated code. */
bellardd720b932004-04-25 17:57:43 +00002254 tb_invalidate_phys_page(address, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002255#ifdef DEBUG_TB_CHECK
2256 tb_invalidate_check(address);
2257#endif
pbrookc8a706f2008-06-02 16:16:42 +00002258 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002259 return 1;
2260 }
2261 }
pbrookc8a706f2008-06-02 16:16:42 +00002262 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002263 return 0;
2264}
2265
bellard6a00d602005-11-21 23:25:50 +00002266static inline void tlb_set_dirty(CPUState *env,
2267 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002268{
2269}
bellard9fa3e852004-01-04 18:06:42 +00002270#endif /* defined(CONFIG_USER_ONLY) */
2271
pbrooke2eef172008-06-08 01:09:01 +00002272#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002273
Anthony Liguoric227f092009-10-01 16:12:16 -05002274static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2275 ram_addr_t memory, ram_addr_t region_offset);
2276static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2277 ram_addr_t orig_memory, ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002278#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2279 need_subpage) \
2280 do { \
2281 if (addr > start_addr) \
2282 start_addr2 = 0; \
2283 else { \
2284 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2285 if (start_addr2 > 0) \
2286 need_subpage = 1; \
2287 } \
2288 \
blueswir149e9fba2007-05-30 17:25:06 +00002289 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002290 end_addr2 = TARGET_PAGE_SIZE - 1; \
2291 else { \
2292 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2293 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2294 need_subpage = 1; \
2295 } \
2296 } while (0)
2297
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002298/* register physical memory.
2299 For RAM, 'size' must be a multiple of the target page size.
2300 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002301 io memory page. The address used when calling the IO function is
2302 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002303 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002304 before calculating this offset. This should not be a problem unless
2305 the low bits of start_addr and region_offset differ. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002306void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2307 ram_addr_t size,
2308 ram_addr_t phys_offset,
2309 ram_addr_t region_offset)
bellard33417e72003-08-10 21:47:01 +00002310{
Anthony Liguoric227f092009-10-01 16:12:16 -05002311 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002312 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002313 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002314 ram_addr_t orig_size = size;
blueswir1db7b5422007-05-26 17:36:03 +00002315 void *subpage;
bellard33417e72003-08-10 21:47:01 +00002316
aliguori7ba1e612008-11-05 16:04:33 +00002317 if (kvm_enabled())
2318 kvm_set_phys_mem(start_addr, size, phys_offset);
2319
pbrook67c4d232009-02-23 13:16:07 +00002320 if (phys_offset == IO_MEM_UNASSIGNED) {
2321 region_offset = start_addr;
2322 }
pbrook8da3ff12008-12-01 18:59:50 +00002323 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002324 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002325 end_addr = start_addr + (target_phys_addr_t)size;
blueswir149e9fba2007-05-30 17:25:06 +00002326 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
blueswir1db7b5422007-05-26 17:36:03 +00002327 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2328 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002329 ram_addr_t orig_memory = p->phys_offset;
2330 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002331 int need_subpage = 0;
2332
2333 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2334 need_subpage);
blueswir14254fab2008-01-01 16:57:19 +00002335 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
blueswir1db7b5422007-05-26 17:36:03 +00002336 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2337 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002338 &p->phys_offset, orig_memory,
2339 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002340 } else {
2341 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2342 >> IO_MEM_SHIFT];
2343 }
pbrook8da3ff12008-12-01 18:59:50 +00002344 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2345 region_offset);
2346 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002347 } else {
2348 p->phys_offset = phys_offset;
2349 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2350 (phys_offset & IO_MEM_ROMD))
2351 phys_offset += TARGET_PAGE_SIZE;
2352 }
2353 } else {
2354 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2355 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002356 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002357 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002358 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002359 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002360 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002361 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002362 int need_subpage = 0;
2363
2364 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2365 end_addr2, need_subpage);
2366
blueswir14254fab2008-01-01 16:57:19 +00002367 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
blueswir1db7b5422007-05-26 17:36:03 +00002368 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002369 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002370 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002371 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002372 phys_offset, region_offset);
2373 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002374 }
2375 }
2376 }
pbrook8da3ff12008-12-01 18:59:50 +00002377 region_offset += TARGET_PAGE_SIZE;
bellard33417e72003-08-10 21:47:01 +00002378 }
ths3b46e622007-09-17 08:09:54 +00002379
bellard9d420372006-06-25 22:25:22 +00002380 /* since each CPU stores ram addresses in its TLB cache, we must
2381 reset the modified entries */
2382 /* XXX: slow ! */
2383 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2384 tlb_flush(env, 1);
2385 }
bellard33417e72003-08-10 21:47:01 +00002386}
2387
bellardba863452006-09-24 18:41:10 +00002388/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002389ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002390{
2391 PhysPageDesc *p;
2392
2393 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2394 if (!p)
2395 return IO_MEM_UNASSIGNED;
2396 return p->phys_offset;
2397}
2398
Anthony Liguoric227f092009-10-01 16:12:16 -05002399void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002400{
2401 if (kvm_enabled())
2402 kvm_coalesce_mmio_region(addr, size);
2403}
2404
Anthony Liguoric227f092009-10-01 16:12:16 -05002405void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002406{
2407 if (kvm_enabled())
2408 kvm_uncoalesce_mmio_region(addr, size);
2409}
2410
Anthony Liguoric227f092009-10-01 16:12:16 -05002411ram_addr_t qemu_ram_alloc(ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002412{
2413 RAMBlock *new_block;
2414
pbrook94a6b542009-04-11 17:15:54 +00002415 size = TARGET_PAGE_ALIGN(size);
2416 new_block = qemu_malloc(sizeof(*new_block));
2417
Alexander Graf6b024942009-12-05 12:44:25 +01002418#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2419 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2420 new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
2421 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2422#else
pbrook94a6b542009-04-11 17:15:54 +00002423 new_block->host = qemu_vmalloc(size);
Alexander Graf6b024942009-12-05 12:44:25 +01002424#endif
Izik Eidusccb167e2009-10-08 16:39:39 +02002425#ifdef MADV_MERGEABLE
2426 madvise(new_block->host, size, MADV_MERGEABLE);
2427#endif
pbrook94a6b542009-04-11 17:15:54 +00002428 new_block->offset = last_ram_offset;
2429 new_block->length = size;
2430
2431 new_block->next = ram_blocks;
2432 ram_blocks = new_block;
2433
2434 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2435 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2436 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2437 0xff, size >> TARGET_PAGE_BITS);
2438
2439 last_ram_offset += size;
2440
Jan Kiszka6f0437e2009-04-26 18:03:40 +02002441 if (kvm_enabled())
2442 kvm_setup_guest_memory(new_block->host, size);
2443
pbrook94a6b542009-04-11 17:15:54 +00002444 return new_block->offset;
2445}
bellarde9a1ab12007-02-08 23:08:38 +00002446
Anthony Liguoric227f092009-10-01 16:12:16 -05002447void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002448{
pbrook94a6b542009-04-11 17:15:54 +00002449 /* TODO: implement this. */
bellarde9a1ab12007-02-08 23:08:38 +00002450}
2451
pbrookdc828ca2009-04-09 22:21:07 +00002452/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002453 With the exception of the softmmu code in this file, this should
2454 only be used for local memory (e.g. video ram) that the device owns,
2455 and knows it isn't going to access beyond the end of the block.
2456
2457 It should not be used for general purpose DMA.
2458 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2459 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002460void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002461{
pbrook94a6b542009-04-11 17:15:54 +00002462 RAMBlock *prev;
2463 RAMBlock **prevp;
2464 RAMBlock *block;
2465
pbrook94a6b542009-04-11 17:15:54 +00002466 prev = NULL;
2467 prevp = &ram_blocks;
2468 block = ram_blocks;
2469 while (block && (block->offset > addr
2470 || block->offset + block->length <= addr)) {
2471 if (prev)
2472 prevp = &prev->next;
2473 prev = block;
2474 block = block->next;
2475 }
2476 if (!block) {
2477 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2478 abort();
2479 }
2480 /* Move this entry to to start of the list. */
2481 if (prev) {
2482 prev->next = block->next;
2483 block->next = *prevp;
2484 *prevp = block;
2485 }
2486 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00002487}
2488
pbrook5579c7f2009-04-11 14:47:08 +00002489/* Some of the softmmu routines need to translate from a host pointer
2490 (typically a TLB entry) back to a ram offset. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002491ram_addr_t qemu_ram_addr_from_host(void *ptr)
pbrook5579c7f2009-04-11 14:47:08 +00002492{
pbrook94a6b542009-04-11 17:15:54 +00002493 RAMBlock *prev;
2494 RAMBlock **prevp;
2495 RAMBlock *block;
2496 uint8_t *host = ptr;
2497
pbrook94a6b542009-04-11 17:15:54 +00002498 prev = NULL;
2499 prevp = &ram_blocks;
2500 block = ram_blocks;
2501 while (block && (block->host > host
2502 || block->host + block->length <= host)) {
2503 if (prev)
2504 prevp = &prev->next;
2505 prev = block;
2506 block = block->next;
2507 }
2508 if (!block) {
2509 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2510 abort();
2511 }
2512 return block->offset + (host - block->host);
pbrook5579c7f2009-04-11 14:47:08 +00002513}
2514
Anthony Liguoric227f092009-10-01 16:12:16 -05002515static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00002516{
pbrook67d3b952006-12-18 05:03:52 +00002517#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002518 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002519#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002520#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002521 do_unassigned_access(addr, 0, 0, 0, 1);
2522#endif
2523 return 0;
2524}
2525
Anthony Liguoric227f092009-10-01 16:12:16 -05002526static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002527{
2528#ifdef DEBUG_UNASSIGNED
2529 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2530#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002531#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002532 do_unassigned_access(addr, 0, 0, 0, 2);
2533#endif
2534 return 0;
2535}
2536
Anthony Liguoric227f092009-10-01 16:12:16 -05002537static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002538{
2539#ifdef DEBUG_UNASSIGNED
2540 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2541#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002542#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002543 do_unassigned_access(addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002544#endif
bellard33417e72003-08-10 21:47:01 +00002545 return 0;
2546}
2547
Anthony Liguoric227f092009-10-01 16:12:16 -05002548static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00002549{
pbrook67d3b952006-12-18 05:03:52 +00002550#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002551 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00002552#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002553#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002554 do_unassigned_access(addr, 1, 0, 0, 1);
2555#endif
2556}
2557
Anthony Liguoric227f092009-10-01 16:12:16 -05002558static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002559{
2560#ifdef DEBUG_UNASSIGNED
2561 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2562#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002563#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002564 do_unassigned_access(addr, 1, 0, 0, 2);
2565#endif
2566}
2567
Anthony Liguoric227f092009-10-01 16:12:16 -05002568static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002569{
2570#ifdef DEBUG_UNASSIGNED
2571 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2572#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002573#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002574 do_unassigned_access(addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002575#endif
bellard33417e72003-08-10 21:47:01 +00002576}
2577
Blue Swirld60efc62009-08-25 18:29:31 +00002578static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00002579 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00002580 unassigned_mem_readw,
2581 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00002582};
2583
Blue Swirld60efc62009-08-25 18:29:31 +00002584static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00002585 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00002586 unassigned_mem_writew,
2587 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00002588};
2589
Anthony Liguoric227f092009-10-01 16:12:16 -05002590static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002591 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002592{
bellard3a7d9292005-08-21 09:26:42 +00002593 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002594 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2595 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2596#if !defined(CONFIG_USER_ONLY)
2597 tb_invalidate_phys_page_fast(ram_addr, 1);
2598 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2599#endif
2600 }
pbrook5579c7f2009-04-11 14:47:08 +00002601 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002602 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2603 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2604 /* we remove the notdirty callback only if the code has been
2605 flushed */
2606 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002607 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002608}
2609
Anthony Liguoric227f092009-10-01 16:12:16 -05002610static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002611 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002612{
bellard3a7d9292005-08-21 09:26:42 +00002613 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002614 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2615 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2616#if !defined(CONFIG_USER_ONLY)
2617 tb_invalidate_phys_page_fast(ram_addr, 2);
2618 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2619#endif
2620 }
pbrook5579c7f2009-04-11 14:47:08 +00002621 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002622 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2623 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2624 /* we remove the notdirty callback only if the code has been
2625 flushed */
2626 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002627 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002628}
2629
Anthony Liguoric227f092009-10-01 16:12:16 -05002630static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002631 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002632{
bellard3a7d9292005-08-21 09:26:42 +00002633 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002634 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2635 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2636#if !defined(CONFIG_USER_ONLY)
2637 tb_invalidate_phys_page_fast(ram_addr, 4);
2638 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2639#endif
2640 }
pbrook5579c7f2009-04-11 14:47:08 +00002641 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002642 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2643 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2644 /* we remove the notdirty callback only if the code has been
2645 flushed */
2646 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002647 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002648}
2649
Blue Swirld60efc62009-08-25 18:29:31 +00002650static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00002651 NULL, /* never used */
2652 NULL, /* never used */
2653 NULL, /* never used */
2654};
2655
Blue Swirld60efc62009-08-25 18:29:31 +00002656static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00002657 notdirty_mem_writeb,
2658 notdirty_mem_writew,
2659 notdirty_mem_writel,
2660};
2661
pbrook0f459d12008-06-09 00:20:13 +00002662/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002663static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002664{
2665 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002666 target_ulong pc, cs_base;
2667 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002668 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002669 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002670 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002671
aliguori06d55cc2008-11-18 20:24:06 +00002672 if (env->watchpoint_hit) {
2673 /* We re-entered the check after replacing the TB. Now raise
2674 * the debug interrupt so that is will trigger after the
2675 * current instruction. */
2676 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2677 return;
2678 }
pbrook2e70f6e2008-06-29 01:03:05 +00002679 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002680 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002681 if ((vaddr == (wp->vaddr & len_mask) ||
2682 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002683 wp->flags |= BP_WATCHPOINT_HIT;
2684 if (!env->watchpoint_hit) {
2685 env->watchpoint_hit = wp;
2686 tb = tb_find_pc(env->mem_io_pc);
2687 if (!tb) {
2688 cpu_abort(env, "check_watchpoint: could not find TB for "
2689 "pc=%p", (void *)env->mem_io_pc);
2690 }
2691 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2692 tb_phys_invalidate(tb, -1);
2693 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2694 env->exception_index = EXCP_DEBUG;
2695 } else {
2696 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2697 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2698 }
2699 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00002700 }
aliguori6e140f22008-11-18 20:37:55 +00002701 } else {
2702 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002703 }
2704 }
2705}
2706
pbrook6658ffb2007-03-16 23:58:11 +00002707/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2708 so these check for a hit then pass through to the normal out-of-line
2709 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002710static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00002711{
aliguorib4051332008-11-18 20:14:20 +00002712 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002713 return ldub_phys(addr);
2714}
2715
Anthony Liguoric227f092009-10-01 16:12:16 -05002716static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00002717{
aliguorib4051332008-11-18 20:14:20 +00002718 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002719 return lduw_phys(addr);
2720}
2721
Anthony Liguoric227f092009-10-01 16:12:16 -05002722static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00002723{
aliguorib4051332008-11-18 20:14:20 +00002724 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002725 return ldl_phys(addr);
2726}
2727
Anthony Liguoric227f092009-10-01 16:12:16 -05002728static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00002729 uint32_t val)
2730{
aliguorib4051332008-11-18 20:14:20 +00002731 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002732 stb_phys(addr, val);
2733}
2734
Anthony Liguoric227f092009-10-01 16:12:16 -05002735static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00002736 uint32_t val)
2737{
aliguorib4051332008-11-18 20:14:20 +00002738 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002739 stw_phys(addr, val);
2740}
2741
Anthony Liguoric227f092009-10-01 16:12:16 -05002742static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00002743 uint32_t val)
2744{
aliguorib4051332008-11-18 20:14:20 +00002745 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002746 stl_phys(addr, val);
2747}
2748
Blue Swirld60efc62009-08-25 18:29:31 +00002749static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00002750 watch_mem_readb,
2751 watch_mem_readw,
2752 watch_mem_readl,
2753};
2754
Blue Swirld60efc62009-08-25 18:29:31 +00002755static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00002756 watch_mem_writeb,
2757 watch_mem_writew,
2758 watch_mem_writel,
2759};
pbrook6658ffb2007-03-16 23:58:11 +00002760
Anthony Liguoric227f092009-10-01 16:12:16 -05002761static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002762 unsigned int len)
2763{
blueswir1db7b5422007-05-26 17:36:03 +00002764 uint32_t ret;
2765 unsigned int idx;
2766
pbrook8da3ff12008-12-01 18:59:50 +00002767 idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00002768#if defined(DEBUG_SUBPAGE)
2769 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2770 mmio, len, addr, idx);
2771#endif
pbrook8da3ff12008-12-01 18:59:50 +00002772 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2773 addr + mmio->region_offset[idx][0][len]);
blueswir1db7b5422007-05-26 17:36:03 +00002774
2775 return ret;
2776}
2777
Anthony Liguoric227f092009-10-01 16:12:16 -05002778static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002779 uint32_t value, unsigned int len)
2780{
blueswir1db7b5422007-05-26 17:36:03 +00002781 unsigned int idx;
2782
pbrook8da3ff12008-12-01 18:59:50 +00002783 idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00002784#if defined(DEBUG_SUBPAGE)
2785 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2786 mmio, len, addr, idx, value);
2787#endif
pbrook8da3ff12008-12-01 18:59:50 +00002788 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2789 addr + mmio->region_offset[idx][1][len],
2790 value);
blueswir1db7b5422007-05-26 17:36:03 +00002791}
2792
Anthony Liguoric227f092009-10-01 16:12:16 -05002793static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00002794{
2795#if defined(DEBUG_SUBPAGE)
2796 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2797#endif
2798
2799 return subpage_readlen(opaque, addr, 0);
2800}
2801
Anthony Liguoric227f092009-10-01 16:12:16 -05002802static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002803 uint32_t value)
2804{
2805#if defined(DEBUG_SUBPAGE)
2806 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2807#endif
2808 subpage_writelen(opaque, addr, value, 0);
2809}
2810
Anthony Liguoric227f092009-10-01 16:12:16 -05002811static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00002812{
2813#if defined(DEBUG_SUBPAGE)
2814 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2815#endif
2816
2817 return subpage_readlen(opaque, addr, 1);
2818}
2819
Anthony Liguoric227f092009-10-01 16:12:16 -05002820static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002821 uint32_t value)
2822{
2823#if defined(DEBUG_SUBPAGE)
2824 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2825#endif
2826 subpage_writelen(opaque, addr, value, 1);
2827}
2828
Anthony Liguoric227f092009-10-01 16:12:16 -05002829static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00002830{
2831#if defined(DEBUG_SUBPAGE)
2832 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2833#endif
2834
2835 return subpage_readlen(opaque, addr, 2);
2836}
2837
2838static void subpage_writel (void *opaque,
Anthony Liguoric227f092009-10-01 16:12:16 -05002839 target_phys_addr_t addr, uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00002840{
2841#if defined(DEBUG_SUBPAGE)
2842 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2843#endif
2844 subpage_writelen(opaque, addr, value, 2);
2845}
2846
Blue Swirld60efc62009-08-25 18:29:31 +00002847static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00002848 &subpage_readb,
2849 &subpage_readw,
2850 &subpage_readl,
2851};
2852
Blue Swirld60efc62009-08-25 18:29:31 +00002853static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00002854 &subpage_writeb,
2855 &subpage_writew,
2856 &subpage_writel,
2857};
2858
Anthony Liguoric227f092009-10-01 16:12:16 -05002859static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2860 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00002861{
2862 int idx, eidx;
blueswir14254fab2008-01-01 16:57:19 +00002863 unsigned int i;
blueswir1db7b5422007-05-26 17:36:03 +00002864
2865 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2866 return -1;
2867 idx = SUBPAGE_IDX(start);
2868 eidx = SUBPAGE_IDX(end);
2869#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00002870 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00002871 mmio, start, end, idx, eidx, memory);
2872#endif
2873 memory >>= IO_MEM_SHIFT;
2874 for (; idx <= eidx; idx++) {
blueswir14254fab2008-01-01 16:57:19 +00002875 for (i = 0; i < 4; i++) {
blueswir13ee89922008-01-02 19:45:26 +00002876 if (io_mem_read[memory][i]) {
2877 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2878 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
pbrook8da3ff12008-12-01 18:59:50 +00002879 mmio->region_offset[idx][0][i] = region_offset;
blueswir13ee89922008-01-02 19:45:26 +00002880 }
2881 if (io_mem_write[memory][i]) {
2882 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2883 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
pbrook8da3ff12008-12-01 18:59:50 +00002884 mmio->region_offset[idx][1][i] = region_offset;
blueswir13ee89922008-01-02 19:45:26 +00002885 }
blueswir14254fab2008-01-01 16:57:19 +00002886 }
blueswir1db7b5422007-05-26 17:36:03 +00002887 }
2888
2889 return 0;
2890}
2891
Anthony Liguoric227f092009-10-01 16:12:16 -05002892static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2893 ram_addr_t orig_memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00002894{
Anthony Liguoric227f092009-10-01 16:12:16 -05002895 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002896 int subpage_memory;
2897
Anthony Liguoric227f092009-10-01 16:12:16 -05002898 mmio = qemu_mallocz(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002899
2900 mmio->base = base;
Avi Kivity1eed09c2009-06-14 11:38:51 +03002901 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
blueswir1db7b5422007-05-26 17:36:03 +00002902#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00002903 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2904 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00002905#endif
aliguori1eec6142009-02-05 22:06:18 +00002906 *phys = subpage_memory | IO_MEM_SUBPAGE;
2907 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
pbrook8da3ff12008-12-01 18:59:50 +00002908 region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002909
2910 return mmio;
2911}
2912
aliguori88715652009-02-11 15:20:58 +00002913static int get_free_io_mem_idx(void)
2914{
2915 int i;
2916
2917 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2918 if (!io_mem_used[i]) {
2919 io_mem_used[i] = 1;
2920 return i;
2921 }
2922
2923 return -1;
2924}
2925
bellard33417e72003-08-10 21:47:01 +00002926/* mem_read and mem_write are arrays of functions containing the
2927 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01002928 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00002929 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00002930 modified. If it is zero, a new io zone is allocated. The return
2931 value can be used with cpu_register_physical_memory(). (-1) is
2932 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03002933static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00002934 CPUReadMemoryFunc * const *mem_read,
2935 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03002936 void *opaque)
bellard33417e72003-08-10 21:47:01 +00002937{
blueswir14254fab2008-01-01 16:57:19 +00002938 int i, subwidth = 0;
bellard33417e72003-08-10 21:47:01 +00002939
2940 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00002941 io_index = get_free_io_mem_idx();
2942 if (io_index == -1)
2943 return io_index;
bellard33417e72003-08-10 21:47:01 +00002944 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03002945 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00002946 if (io_index >= IO_MEM_NB_ENTRIES)
2947 return -1;
2948 }
bellardb5ff1b32005-11-26 10:38:39 +00002949
bellard33417e72003-08-10 21:47:01 +00002950 for(i = 0;i < 3; i++) {
blueswir14254fab2008-01-01 16:57:19 +00002951 if (!mem_read[i] || !mem_write[i])
2952 subwidth = IO_MEM_SUBWIDTH;
bellard33417e72003-08-10 21:47:01 +00002953 io_mem_read[io_index][i] = mem_read[i];
2954 io_mem_write[io_index][i] = mem_write[i];
2955 }
bellarda4193c82004-06-03 14:01:43 +00002956 io_mem_opaque[io_index] = opaque;
blueswir14254fab2008-01-01 16:57:19 +00002957 return (io_index << IO_MEM_SHIFT) | subwidth;
bellard33417e72003-08-10 21:47:01 +00002958}
bellard61382a52003-10-27 21:22:23 +00002959
Blue Swirld60efc62009-08-25 18:29:31 +00002960int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
2961 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03002962 void *opaque)
2963{
2964 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
2965}
2966
aliguori88715652009-02-11 15:20:58 +00002967void cpu_unregister_io_memory(int io_table_address)
2968{
2969 int i;
2970 int io_index = io_table_address >> IO_MEM_SHIFT;
2971
2972 for (i=0;i < 3; i++) {
2973 io_mem_read[io_index][i] = unassigned_mem_read[i];
2974 io_mem_write[io_index][i] = unassigned_mem_write[i];
2975 }
2976 io_mem_opaque[io_index] = NULL;
2977 io_mem_used[io_index] = 0;
2978}
2979
Avi Kivitye9179ce2009-06-14 11:38:52 +03002980static void io_mem_init(void)
2981{
2982 int i;
2983
2984 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
2985 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
2986 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
2987 for (i=0; i<5; i++)
2988 io_mem_used[i] = 1;
2989
2990 io_mem_watch = cpu_register_io_memory(watch_mem_read,
2991 watch_mem_write, NULL);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002992}
2993
pbrooke2eef172008-06-08 01:09:01 +00002994#endif /* !defined(CONFIG_USER_ONLY) */
2995
bellard13eb76e2004-01-24 15:23:36 +00002996/* physical memory access (slow version, mainly for debug) */
2997#if defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -05002998void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00002999 int len, int is_write)
3000{
3001 int l, flags;
3002 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003003 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003004
3005 while (len > 0) {
3006 page = addr & TARGET_PAGE_MASK;
3007 l = (page + TARGET_PAGE_SIZE) - addr;
3008 if (l > len)
3009 l = len;
3010 flags = page_get_flags(page);
3011 if (!(flags & PAGE_VALID))
3012 return;
3013 if (is_write) {
3014 if (!(flags & PAGE_WRITE))
3015 return;
bellard579a97f2007-11-11 14:26:47 +00003016 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003017 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
bellard579a97f2007-11-11 14:26:47 +00003018 /* FIXME - should this return an error rather than just fail? */
3019 return;
aurel3272fb7da2008-04-27 23:53:45 +00003020 memcpy(p, buf, l);
3021 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003022 } else {
3023 if (!(flags & PAGE_READ))
3024 return;
bellard579a97f2007-11-11 14:26:47 +00003025 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003026 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
bellard579a97f2007-11-11 14:26:47 +00003027 /* FIXME - should this return an error rather than just fail? */
3028 return;
aurel3272fb7da2008-04-27 23:53:45 +00003029 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003030 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003031 }
3032 len -= l;
3033 buf += l;
3034 addr += l;
3035 }
3036}
bellard8df1cd02005-01-28 22:37:22 +00003037
bellard13eb76e2004-01-24 15:23:36 +00003038#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003039void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003040 int len, int is_write)
3041{
3042 int l, io_index;
3043 uint8_t *ptr;
3044 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003045 target_phys_addr_t page;
bellard2e126692004-04-25 21:28:44 +00003046 unsigned long pd;
bellard92e873b2004-05-21 14:52:29 +00003047 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003048
bellard13eb76e2004-01-24 15:23:36 +00003049 while (len > 0) {
3050 page = addr & TARGET_PAGE_MASK;
3051 l = (page + TARGET_PAGE_SIZE) - addr;
3052 if (l > len)
3053 l = len;
bellard92e873b2004-05-21 14:52:29 +00003054 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003055 if (!p) {
3056 pd = IO_MEM_UNASSIGNED;
3057 } else {
3058 pd = p->phys_offset;
3059 }
ths3b46e622007-09-17 08:09:54 +00003060
bellard13eb76e2004-01-24 15:23:36 +00003061 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003062 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003063 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003064 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003065 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003066 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003067 /* XXX: could force cpu_single_env to NULL to avoid
3068 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003069 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003070 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003071 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003072 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003073 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003074 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003075 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003076 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003077 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003078 l = 2;
3079 } else {
bellard1c213d12005-09-03 10:49:04 +00003080 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003081 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003082 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003083 l = 1;
3084 }
3085 } else {
bellardb448f2f2004-02-25 23:24:04 +00003086 unsigned long addr1;
3087 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003088 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003089 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003090 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003091 if (!cpu_physical_memory_is_dirty(addr1)) {
3092 /* invalidate code */
3093 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3094 /* set dirty bit */
ths5fafdf22007-09-16 21:08:06 +00003095 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
bellardf23db162005-08-21 19:12:28 +00003096 (0xff & ~CODE_DIRTY_FLAG);
bellard3a7d9292005-08-21 09:26:42 +00003097 }
bellard13eb76e2004-01-24 15:23:36 +00003098 }
3099 } else {
ths5fafdf22007-09-16 21:08:06 +00003100 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003101 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003102 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003103 /* I/O case */
3104 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003105 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003106 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3107 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003108 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003109 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003110 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003111 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003112 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003113 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003114 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003115 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003116 l = 2;
3117 } else {
bellard1c213d12005-09-03 10:49:04 +00003118 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003119 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003120 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003121 l = 1;
3122 }
3123 } else {
3124 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003125 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard13eb76e2004-01-24 15:23:36 +00003126 (addr & ~TARGET_PAGE_MASK);
3127 memcpy(buf, ptr, l);
3128 }
3129 }
3130 len -= l;
3131 buf += l;
3132 addr += l;
3133 }
3134}
bellard8df1cd02005-01-28 22:37:22 +00003135
bellardd0ecd2a2006-04-23 17:14:48 +00003136/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003137void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003138 const uint8_t *buf, int len)
3139{
3140 int l;
3141 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003142 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003143 unsigned long pd;
3144 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003145
bellardd0ecd2a2006-04-23 17:14:48 +00003146 while (len > 0) {
3147 page = addr & TARGET_PAGE_MASK;
3148 l = (page + TARGET_PAGE_SIZE) - addr;
3149 if (l > len)
3150 l = len;
3151 p = phys_page_find(page >> TARGET_PAGE_BITS);
3152 if (!p) {
3153 pd = IO_MEM_UNASSIGNED;
3154 } else {
3155 pd = p->phys_offset;
3156 }
ths3b46e622007-09-17 08:09:54 +00003157
bellardd0ecd2a2006-04-23 17:14:48 +00003158 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00003159 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3160 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003161 /* do nothing */
3162 } else {
3163 unsigned long addr1;
3164 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3165 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003166 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003167 memcpy(ptr, buf, l);
3168 }
3169 len -= l;
3170 buf += l;
3171 addr += l;
3172 }
3173}
3174
aliguori6d16c2f2009-01-22 16:59:11 +00003175typedef struct {
3176 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003177 target_phys_addr_t addr;
3178 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003179} BounceBuffer;
3180
3181static BounceBuffer bounce;
3182
aliguoriba223c22009-01-22 16:59:16 +00003183typedef struct MapClient {
3184 void *opaque;
3185 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003186 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003187} MapClient;
3188
Blue Swirl72cf2d42009-09-12 07:36:22 +00003189static QLIST_HEAD(map_client_list, MapClient) map_client_list
3190 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003191
3192void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3193{
3194 MapClient *client = qemu_malloc(sizeof(*client));
3195
3196 client->opaque = opaque;
3197 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003198 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003199 return client;
3200}
3201
3202void cpu_unregister_map_client(void *_client)
3203{
3204 MapClient *client = (MapClient *)_client;
3205
Blue Swirl72cf2d42009-09-12 07:36:22 +00003206 QLIST_REMOVE(client, link);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003207 qemu_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003208}
3209
3210static void cpu_notify_map_clients(void)
3211{
3212 MapClient *client;
3213
Blue Swirl72cf2d42009-09-12 07:36:22 +00003214 while (!QLIST_EMPTY(&map_client_list)) {
3215 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003216 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003217 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003218 }
3219}
3220
aliguori6d16c2f2009-01-22 16:59:11 +00003221/* Map a physical memory region into a host virtual address.
3222 * May map a subset of the requested range, given by and returned in *plen.
3223 * May return NULL if resources needed to perform the mapping are exhausted.
3224 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003225 * Use cpu_register_map_client() to know when retrying the map operation is
3226 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003227 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003228void *cpu_physical_memory_map(target_phys_addr_t addr,
3229 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003230 int is_write)
3231{
Anthony Liguoric227f092009-10-01 16:12:16 -05003232 target_phys_addr_t len = *plen;
3233 target_phys_addr_t done = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003234 int l;
3235 uint8_t *ret = NULL;
3236 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003237 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003238 unsigned long pd;
3239 PhysPageDesc *p;
3240 unsigned long addr1;
3241
3242 while (len > 0) {
3243 page = addr & TARGET_PAGE_MASK;
3244 l = (page + TARGET_PAGE_SIZE) - addr;
3245 if (l > len)
3246 l = len;
3247 p = phys_page_find(page >> TARGET_PAGE_BITS);
3248 if (!p) {
3249 pd = IO_MEM_UNASSIGNED;
3250 } else {
3251 pd = p->phys_offset;
3252 }
3253
3254 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3255 if (done || bounce.buffer) {
3256 break;
3257 }
3258 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3259 bounce.addr = addr;
3260 bounce.len = l;
3261 if (!is_write) {
3262 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3263 }
3264 ptr = bounce.buffer;
3265 } else {
3266 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003267 ptr = qemu_get_ram_ptr(addr1);
aliguori6d16c2f2009-01-22 16:59:11 +00003268 }
3269 if (!done) {
3270 ret = ptr;
3271 } else if (ret + done != ptr) {
3272 break;
3273 }
3274
3275 len -= l;
3276 addr += l;
3277 done += l;
3278 }
3279 *plen = done;
3280 return ret;
3281}
3282
3283/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3284 * Will also mark the memory as dirty if is_write == 1. access_len gives
3285 * the amount of memory that was actually read or written by the caller.
3286 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003287void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3288 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003289{
3290 if (buffer != bounce.buffer) {
3291 if (is_write) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003292 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003293 while (access_len) {
3294 unsigned l;
3295 l = TARGET_PAGE_SIZE;
3296 if (l > access_len)
3297 l = access_len;
3298 if (!cpu_physical_memory_is_dirty(addr1)) {
3299 /* invalidate code */
3300 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3301 /* set dirty bit */
3302 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3303 (0xff & ~CODE_DIRTY_FLAG);
3304 }
3305 addr1 += l;
3306 access_len -= l;
3307 }
3308 }
3309 return;
3310 }
3311 if (is_write) {
3312 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3313 }
3314 qemu_free(bounce.buffer);
3315 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003316 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003317}
bellardd0ecd2a2006-04-23 17:14:48 +00003318
bellard8df1cd02005-01-28 22:37:22 +00003319/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003320uint32_t ldl_phys(target_phys_addr_t addr)
bellard8df1cd02005-01-28 22:37:22 +00003321{
3322 int io_index;
3323 uint8_t *ptr;
3324 uint32_t val;
3325 unsigned long pd;
3326 PhysPageDesc *p;
3327
3328 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3329 if (!p) {
3330 pd = IO_MEM_UNASSIGNED;
3331 } else {
3332 pd = p->phys_offset;
3333 }
ths3b46e622007-09-17 08:09:54 +00003334
ths5fafdf22007-09-16 21:08:06 +00003335 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003336 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00003337 /* I/O case */
3338 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003339 if (p)
3340 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003341 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3342 } else {
3343 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003344 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00003345 (addr & ~TARGET_PAGE_MASK);
3346 val = ldl_p(ptr);
3347 }
3348 return val;
3349}
3350
bellard84b7b8e2005-11-28 21:19:04 +00003351/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003352uint64_t ldq_phys(target_phys_addr_t addr)
bellard84b7b8e2005-11-28 21:19:04 +00003353{
3354 int io_index;
3355 uint8_t *ptr;
3356 uint64_t val;
3357 unsigned long pd;
3358 PhysPageDesc *p;
3359
3360 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3361 if (!p) {
3362 pd = IO_MEM_UNASSIGNED;
3363 } else {
3364 pd = p->phys_offset;
3365 }
ths3b46e622007-09-17 08:09:54 +00003366
bellard2a4188a2006-06-25 21:54:59 +00003367 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3368 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00003369 /* I/O case */
3370 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003371 if (p)
3372 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard84b7b8e2005-11-28 21:19:04 +00003373#ifdef TARGET_WORDS_BIGENDIAN
3374 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3375 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3376#else
3377 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3378 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3379#endif
3380 } else {
3381 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003382 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00003383 (addr & ~TARGET_PAGE_MASK);
3384 val = ldq_p(ptr);
3385 }
3386 return val;
3387}
3388
bellardaab33092005-10-30 20:48:42 +00003389/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003390uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003391{
3392 uint8_t val;
3393 cpu_physical_memory_read(addr, &val, 1);
3394 return val;
3395}
3396
3397/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003398uint32_t lduw_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003399{
3400 uint16_t val;
3401 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3402 return tswap16(val);
3403}
3404
bellard8df1cd02005-01-28 22:37:22 +00003405/* warning: addr must be aligned. The ram page is not masked as dirty
3406 and the code inside is not invalidated. It is useful if the dirty
3407 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003408void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003409{
3410 int io_index;
3411 uint8_t *ptr;
3412 unsigned long pd;
3413 PhysPageDesc *p;
3414
3415 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3416 if (!p) {
3417 pd = IO_MEM_UNASSIGNED;
3418 } else {
3419 pd = p->phys_offset;
3420 }
ths3b46e622007-09-17 08:09:54 +00003421
bellard3a7d9292005-08-21 09:26:42 +00003422 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003423 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003424 if (p)
3425 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003426 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3427 } else {
aliguori74576192008-10-06 14:02:03 +00003428 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003429 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003430 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003431
3432 if (unlikely(in_migration)) {
3433 if (!cpu_physical_memory_is_dirty(addr1)) {
3434 /* invalidate code */
3435 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3436 /* set dirty bit */
3437 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3438 (0xff & ~CODE_DIRTY_FLAG);
3439 }
3440 }
bellard8df1cd02005-01-28 22:37:22 +00003441 }
3442}
3443
Anthony Liguoric227f092009-10-01 16:12:16 -05003444void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003445{
3446 int io_index;
3447 uint8_t *ptr;
3448 unsigned long pd;
3449 PhysPageDesc *p;
3450
3451 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3452 if (!p) {
3453 pd = IO_MEM_UNASSIGNED;
3454 } else {
3455 pd = p->phys_offset;
3456 }
ths3b46e622007-09-17 08:09:54 +00003457
j_mayerbc98a7e2007-04-04 07:55:12 +00003458 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3459 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003460 if (p)
3461 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00003462#ifdef TARGET_WORDS_BIGENDIAN
3463 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3464 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3465#else
3466 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3467 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3468#endif
3469 } else {
pbrook5579c7f2009-04-11 14:47:08 +00003470 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00003471 (addr & ~TARGET_PAGE_MASK);
3472 stq_p(ptr, val);
3473 }
3474}
3475
bellard8df1cd02005-01-28 22:37:22 +00003476/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003477void stl_phys(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003478{
3479 int io_index;
3480 uint8_t *ptr;
3481 unsigned long pd;
3482 PhysPageDesc *p;
3483
3484 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3485 if (!p) {
3486 pd = IO_MEM_UNASSIGNED;
3487 } else {
3488 pd = p->phys_offset;
3489 }
ths3b46e622007-09-17 08:09:54 +00003490
bellard3a7d9292005-08-21 09:26:42 +00003491 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003492 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003493 if (p)
3494 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003495 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3496 } else {
3497 unsigned long addr1;
3498 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3499 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003500 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003501 stl_p(ptr, val);
bellard3a7d9292005-08-21 09:26:42 +00003502 if (!cpu_physical_memory_is_dirty(addr1)) {
3503 /* invalidate code */
3504 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3505 /* set dirty bit */
bellardf23db162005-08-21 19:12:28 +00003506 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3507 (0xff & ~CODE_DIRTY_FLAG);
bellard3a7d9292005-08-21 09:26:42 +00003508 }
bellard8df1cd02005-01-28 22:37:22 +00003509 }
3510}
3511
bellardaab33092005-10-30 20:48:42 +00003512/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003513void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003514{
3515 uint8_t v = val;
3516 cpu_physical_memory_write(addr, &v, 1);
3517}
3518
3519/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003520void stw_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003521{
3522 uint16_t v = tswap16(val);
3523 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3524}
3525
3526/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003527void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003528{
3529 val = tswap64(val);
3530 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3531}
3532
bellard13eb76e2004-01-24 15:23:36 +00003533#endif
3534
aliguori5e2972f2009-03-28 17:51:36 +00003535/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00003536int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003537 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003538{
3539 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003540 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003541 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003542
3543 while (len > 0) {
3544 page = addr & TARGET_PAGE_MASK;
3545 phys_addr = cpu_get_phys_page_debug(env, page);
3546 /* if no physical page mapped, return an error */
3547 if (phys_addr == -1)
3548 return -1;
3549 l = (page + TARGET_PAGE_SIZE) - addr;
3550 if (l > len)
3551 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003552 phys_addr += (addr & ~TARGET_PAGE_MASK);
3553#if !defined(CONFIG_USER_ONLY)
3554 if (is_write)
3555 cpu_physical_memory_write_rom(phys_addr, buf, l);
3556 else
3557#endif
3558 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00003559 len -= l;
3560 buf += l;
3561 addr += l;
3562 }
3563 return 0;
3564}
3565
pbrook2e70f6e2008-06-29 01:03:05 +00003566/* in deterministic execution mode, instructions doing device I/Os
3567 must be at the end of the TB */
3568void cpu_io_recompile(CPUState *env, void *retaddr)
3569{
3570 TranslationBlock *tb;
3571 uint32_t n, cflags;
3572 target_ulong pc, cs_base;
3573 uint64_t flags;
3574
3575 tb = tb_find_pc((unsigned long)retaddr);
3576 if (!tb) {
3577 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3578 retaddr);
3579 }
3580 n = env->icount_decr.u16.low + tb->icount;
3581 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3582 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00003583 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00003584 n = n - env->icount_decr.u16.low;
3585 /* Generate a new TB ending on the I/O insn. */
3586 n++;
3587 /* On MIPS and SH, delay slot instructions can only be restarted if
3588 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00003589 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00003590 branch. */
3591#if defined(TARGET_MIPS)
3592 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3593 env->active_tc.PC -= 4;
3594 env->icount_decr.u16.low++;
3595 env->hflags &= ~MIPS_HFLAG_BMASK;
3596 }
3597#elif defined(TARGET_SH4)
3598 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3599 && n > 1) {
3600 env->pc -= 2;
3601 env->icount_decr.u16.low++;
3602 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3603 }
3604#endif
3605 /* This should never happen. */
3606 if (n > CF_COUNT_MASK)
3607 cpu_abort(env, "TB too big during recompile");
3608
3609 cflags = n | CF_LAST_IO;
3610 pc = tb->pc;
3611 cs_base = tb->cs_base;
3612 flags = tb->flags;
3613 tb_phys_invalidate(tb, -1);
3614 /* FIXME: In theory this could raise an exception. In practice
3615 we have already translated the block once so it's probably ok. */
3616 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00003617 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00003618 the first in the TB) then we end up generating a whole new TB and
3619 repeating the fault, which is horribly inefficient.
3620 Better would be to execute just this insn uncached, or generate a
3621 second new TB. */
3622 cpu_resume_from_signal(env, NULL);
3623}
3624
bellarde3db7222005-01-26 22:00:47 +00003625void dump_exec_info(FILE *f,
3626 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3627{
3628 int i, target_code_size, max_target_code_size;
3629 int direct_jmp_count, direct_jmp2_count, cross_page;
3630 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00003631
bellarde3db7222005-01-26 22:00:47 +00003632 target_code_size = 0;
3633 max_target_code_size = 0;
3634 cross_page = 0;
3635 direct_jmp_count = 0;
3636 direct_jmp2_count = 0;
3637 for(i = 0; i < nb_tbs; i++) {
3638 tb = &tbs[i];
3639 target_code_size += tb->size;
3640 if (tb->size > max_target_code_size)
3641 max_target_code_size = tb->size;
3642 if (tb->page_addr[1] != -1)
3643 cross_page++;
3644 if (tb->tb_next_offset[0] != 0xffff) {
3645 direct_jmp_count++;
3646 if (tb->tb_next_offset[1] != 0xffff) {
3647 direct_jmp2_count++;
3648 }
3649 }
3650 }
3651 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00003652 cpu_fprintf(f, "Translation buffer state:\n");
bellard26a5f132008-05-28 12:30:31 +00003653 cpu_fprintf(f, "gen code size %ld/%ld\n",
3654 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3655 cpu_fprintf(f, "TB count %d/%d\n",
3656 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00003657 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00003658 nb_tbs ? target_code_size / nb_tbs : 0,
3659 max_target_code_size);
ths5fafdf22007-09-16 21:08:06 +00003660 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00003661 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3662 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00003663 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3664 cross_page,
bellarde3db7222005-01-26 22:00:47 +00003665 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3666 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00003667 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00003668 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3669 direct_jmp2_count,
3670 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00003671 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00003672 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3673 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3674 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00003675 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00003676}
3677
ths5fafdf22007-09-16 21:08:06 +00003678#if !defined(CONFIG_USER_ONLY)
bellard61382a52003-10-27 21:22:23 +00003679
3680#define MMUSUFFIX _cmmu
3681#define GETPC() NULL
3682#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00003683#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00003684
3685#define SHIFT 0
3686#include "softmmu_template.h"
3687
3688#define SHIFT 1
3689#include "softmmu_template.h"
3690
3691#define SHIFT 2
3692#include "softmmu_template.h"
3693
3694#define SHIFT 3
3695#include "softmmu_template.h"
3696
3697#undef env
3698
3699#endif