blob: 0fb363b49d184e0a4cc91b6e85b27e3ca6e9b2f8 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
bellard6180a182003-09-30 21:04:53 +000034#include "cpu.h"
35#include "exec-all.h"
aurel32ca10f862008-04-11 21:35:42 +000036#include "qemu-common.h"
bellardb67d9a52008-05-23 09:57:34 +000037#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000038#include "hw/hw.h"
aliguori74576192008-10-06 14:02:03 +000039#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000040#include "kvm.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Riku Voipiofd052bf2010-01-25 14:30:49 +020043#include <signal.h>
pbrook53a59602006-03-25 19:31:22 +000044#endif
bellard54936002003-05-13 00:25:15 +000045
bellardfd6ce8f2003-05-14 19:00:11 +000046//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000047//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000048//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000049//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000050
51/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000052//#define DEBUG_TB_CHECK
53//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000054
ths1196be32007-03-17 15:17:58 +000055//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000056//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000057
pbrook99773bd2006-04-16 15:14:59 +000058#if !defined(CONFIG_USER_ONLY)
59/* TB consistency checks only implemented for usermode emulation. */
60#undef DEBUG_TB_CHECK
61#endif
62
bellard9fa3e852004-01-04 18:06:42 +000063#define SMC_BITMAP_USE_THRESHOLD 10
64
bellard108c49b2005-07-24 12:55:09 +000065#if defined(TARGET_SPARC64)
66#define TARGET_PHYS_ADDR_SPACE_BITS 41
blueswir15dcb6b92007-05-19 12:58:30 +000067#elif defined(TARGET_SPARC)
68#define TARGET_PHYS_ADDR_SPACE_BITS 36
j_mayerbedb69e2007-04-05 20:08:21 +000069#elif defined(TARGET_ALPHA)
70#define TARGET_PHYS_ADDR_SPACE_BITS 42
71#define TARGET_VIRT_ADDR_SPACE_BITS 42
bellard108c49b2005-07-24 12:55:09 +000072#elif defined(TARGET_PPC64)
73#define TARGET_PHYS_ADDR_SPACE_BITS 42
Anthony Liguori4a1418e2009-08-10 17:07:24 -050074#elif defined(TARGET_X86_64)
aurel3200f82b82008-04-27 21:12:55 +000075#define TARGET_PHYS_ADDR_SPACE_BITS 42
Anthony Liguori4a1418e2009-08-10 17:07:24 -050076#elif defined(TARGET_I386)
aurel3200f82b82008-04-27 21:12:55 +000077#define TARGET_PHYS_ADDR_SPACE_BITS 36
bellard108c49b2005-07-24 12:55:09 +000078#else
bellard108c49b2005-07-24 12:55:09 +000079#define TARGET_PHYS_ADDR_SPACE_BITS 32
80#endif
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
bellard26a5f132008-05-28 12:30:31 +000083int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
bellardfd6ce8f2003-05-14 19:00:11 +0000110uint8_t *code_gen_ptr;
111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
bellard1ccde1c2004-02-06 19:46:14 +0000114uint8_t *phys_ram_dirty;
aliguori74576192008-10-06 14:02:03 +0000115static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000116
117typedef struct RAMBlock {
118 uint8_t *host;
Anthony Liguoric227f092009-10-01 16:12:16 -0500119 ram_addr_t offset;
120 ram_addr_t length;
pbrook94a6b542009-04-11 17:15:54 +0000121 struct RAMBlock *next;
122} RAMBlock;
123
124static RAMBlock *ram_blocks;
125/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100126 then we can no longer assume contiguous ram offsets, and external uses
pbrook94a6b542009-04-11 17:15:54 +0000127 of this variable will break. */
Anthony Liguoric227f092009-10-01 16:12:16 -0500128ram_addr_t last_ram_offset;
pbrooke2eef172008-06-08 01:09:01 +0000129#endif
bellard9fa3e852004-01-04 18:06:42 +0000130
bellard6a00d602005-11-21 23:25:50 +0000131CPUState *first_cpu;
132/* current CPU in the current thread. It is only valid inside
133 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000134CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000135/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000136 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000137 2 = Adaptive rate instruction counting. */
138int use_icount = 0;
139/* Current instruction counter. While executing translated code this may
140 include some instructions that have not yet been executed. */
141int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000142
bellard54936002003-05-13 00:25:15 +0000143typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000144 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000145 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000146 /* in order to optimize self modifying code, we count the number
147 of lookups we do to a given page to use a bitmap */
148 unsigned int code_write_count;
149 uint8_t *code_bitmap;
150#if defined(CONFIG_USER_ONLY)
151 unsigned long flags;
152#endif
bellard54936002003-05-13 00:25:15 +0000153} PageDesc;
154
bellard92e873b2004-05-21 14:52:29 +0000155typedef struct PhysPageDesc {
pbrook0f459d12008-06-09 00:20:13 +0000156 /* offset in host memory of the page + io_index in the low bits */
Anthony Liguoric227f092009-10-01 16:12:16 -0500157 ram_addr_t phys_offset;
158 ram_addr_t region_offset;
bellard92e873b2004-05-21 14:52:29 +0000159} PhysPageDesc;
160
bellard54936002003-05-13 00:25:15 +0000161#define L2_BITS 10
j_mayerbedb69e2007-04-05 20:08:21 +0000162#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163/* XXX: this is a temporary hack for alpha target.
164 * In the future, this is to be replaced by a multi-level table
165 * to actually be able to handle the complete 64 bits address space.
166 */
167#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168#else
aurel3203875442008-04-22 20:45:18 +0000169#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
j_mayerbedb69e2007-04-05 20:08:21 +0000170#endif
bellard54936002003-05-13 00:25:15 +0000171
172#define L1_SIZE (1 << L1_BITS)
173#define L2_SIZE (1 << L2_BITS)
174
bellard83fb7ad2004-07-05 21:25:26 +0000175unsigned long qemu_real_host_page_size;
176unsigned long qemu_host_page_bits;
177unsigned long qemu_host_page_size;
178unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000179
bellard92e873b2004-05-21 14:52:29 +0000180/* XXX: for system emulation, it could just be an array */
bellard54936002003-05-13 00:25:15 +0000181static PageDesc *l1_map[L1_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +0000182static PhysPageDesc **l1_phys_map;
bellard54936002003-05-13 00:25:15 +0000183
pbrooke2eef172008-06-08 01:09:01 +0000184#if !defined(CONFIG_USER_ONLY)
185static void io_mem_init(void);
186
bellard33417e72003-08-10 21:47:01 +0000187/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000188CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
189CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000190void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000191static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000192static int io_mem_watch;
193#endif
bellard33417e72003-08-10 21:47:01 +0000194
bellard34865132003-10-05 14:28:56 +0000195/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200196#ifdef WIN32
197static const char *logfilename = "qemu.log";
198#else
blueswir1d9b630f2008-10-05 09:57:08 +0000199static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200200#endif
bellard34865132003-10-05 14:28:56 +0000201FILE *logfile;
202int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000203static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000204
bellarde3db7222005-01-26 22:00:47 +0000205/* statistics */
206static int tlb_flush_count;
207static int tb_flush_count;
208static int tb_phys_invalidate_count;
209
blueswir1db7b5422007-05-26 17:36:03 +0000210#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
Anthony Liguoric227f092009-10-01 16:12:16 -0500211typedef struct subpage_t {
212 target_phys_addr_t base;
Blue Swirld60efc62009-08-25 18:29:31 +0000213 CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
214 CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
blueswir13ee89922008-01-02 19:45:26 +0000215 void *opaque[TARGET_PAGE_SIZE][2][4];
Anthony Liguoric227f092009-10-01 16:12:16 -0500216 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
217} subpage_t;
blueswir1db7b5422007-05-26 17:36:03 +0000218
bellard7cb69ca2008-05-10 10:55:51 +0000219#ifdef _WIN32
220static void map_exec(void *addr, long size)
221{
222 DWORD old_protect;
223 VirtualProtect(addr, size,
224 PAGE_EXECUTE_READWRITE, &old_protect);
225
226}
227#else
228static void map_exec(void *addr, long size)
229{
bellard43694152008-05-29 09:35:57 +0000230 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000231
bellard43694152008-05-29 09:35:57 +0000232 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000233 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000234 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000235
236 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000237 end += page_size - 1;
238 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000239
240 mprotect((void *)start, end - start,
241 PROT_READ | PROT_WRITE | PROT_EXEC);
242}
243#endif
244
bellardb346ff42003-06-15 20:05:50 +0000245static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000246{
bellard83fb7ad2004-07-05 21:25:26 +0000247 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000248 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000249#ifdef _WIN32
250 {
251 SYSTEM_INFO system_info;
252
253 GetSystemInfo(&system_info);
254 qemu_real_host_page_size = system_info.dwPageSize;
255 }
256#else
257 qemu_real_host_page_size = getpagesize();
258#endif
bellard83fb7ad2004-07-05 21:25:26 +0000259 if (qemu_host_page_size == 0)
260 qemu_host_page_size = qemu_real_host_page_size;
261 if (qemu_host_page_size < TARGET_PAGE_SIZE)
262 qemu_host_page_size = TARGET_PAGE_SIZE;
263 qemu_host_page_bits = 0;
264 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
265 qemu_host_page_bits++;
266 qemu_host_page_mask = ~(qemu_host_page_size - 1);
bellard108c49b2005-07-24 12:55:09 +0000267 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
268 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
balrog50a95692007-12-12 01:16:23 +0000269
270#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
271 {
272 long long startaddr, endaddr;
273 FILE *f;
274 int n;
275
pbrookc8a706f2008-06-02 16:16:42 +0000276 mmap_lock();
pbrook07765902008-05-31 16:33:53 +0000277 last_brk = (unsigned long)sbrk(0);
balrog50a95692007-12-12 01:16:23 +0000278 f = fopen("/proc/self/maps", "r");
279 if (f) {
280 do {
281 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
282 if (n == 2) {
blueswir1e0b8d652008-05-03 17:51:24 +0000283 startaddr = MIN(startaddr,
284 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
285 endaddr = MIN(endaddr,
286 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
pbrookb5fc9092008-05-29 13:56:10 +0000287 page_set_flags(startaddr & TARGET_PAGE_MASK,
balrog50a95692007-12-12 01:16:23 +0000288 TARGET_PAGE_ALIGN(endaddr),
289 PAGE_RESERVED);
290 }
291 } while (!feof(f));
292 fclose(f);
293 }
pbrookc8a706f2008-06-02 16:16:42 +0000294 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000295 }
296#endif
bellard54936002003-05-13 00:25:15 +0000297}
298
aliguori434929b2008-09-15 15:56:30 +0000299static inline PageDesc **page_l1_map(target_ulong index)
bellard54936002003-05-13 00:25:15 +0000300{
pbrook17e23772008-06-09 13:47:45 +0000301#if TARGET_LONG_BITS > 32
302 /* Host memory outside guest VM. For 32-bit targets we have already
303 excluded high addresses. */
thsd8173e02008-08-29 13:10:00 +0000304 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
pbrook17e23772008-06-09 13:47:45 +0000305 return NULL;
306#endif
aliguori434929b2008-09-15 15:56:30 +0000307 return &l1_map[index >> L2_BITS];
308}
309
310static inline PageDesc *page_find_alloc(target_ulong index)
311{
312 PageDesc **lp, *p;
313 lp = page_l1_map(index);
314 if (!lp)
315 return NULL;
316
bellard54936002003-05-13 00:25:15 +0000317 p = *lp;
318 if (!p) {
319 /* allocate if not found */
pbrook17e23772008-06-09 13:47:45 +0000320#if defined(CONFIG_USER_ONLY)
pbrook17e23772008-06-09 13:47:45 +0000321 size_t len = sizeof(PageDesc) * L2_SIZE;
322 /* Don't use qemu_malloc because it may recurse. */
Blue Swirl660f11b2009-07-31 21:16:51 +0000323 p = mmap(NULL, len, PROT_READ | PROT_WRITE,
pbrook17e23772008-06-09 13:47:45 +0000324 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
bellard54936002003-05-13 00:25:15 +0000325 *lp = p;
aurel32fb1c2cd2008-12-08 18:12:26 +0000326 if (h2g_valid(p)) {
327 unsigned long addr = h2g(p);
pbrook17e23772008-06-09 13:47:45 +0000328 page_set_flags(addr & TARGET_PAGE_MASK,
329 TARGET_PAGE_ALIGN(addr + len),
330 PAGE_RESERVED);
331 }
332#else
333 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
334 *lp = p;
335#endif
bellard54936002003-05-13 00:25:15 +0000336 }
337 return p + (index & (L2_SIZE - 1));
338}
339
aurel3200f82b82008-04-27 21:12:55 +0000340static inline PageDesc *page_find(target_ulong index)
bellard54936002003-05-13 00:25:15 +0000341{
aliguori434929b2008-09-15 15:56:30 +0000342 PageDesc **lp, *p;
343 lp = page_l1_map(index);
344 if (!lp)
345 return NULL;
bellard54936002003-05-13 00:25:15 +0000346
aliguori434929b2008-09-15 15:56:30 +0000347 p = *lp;
Blue Swirl660f11b2009-07-31 21:16:51 +0000348 if (!p) {
349 return NULL;
350 }
bellardfd6ce8f2003-05-14 19:00:11 +0000351 return p + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000352}
353
Anthony Liguoric227f092009-10-01 16:12:16 -0500354static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000355{
bellard108c49b2005-07-24 12:55:09 +0000356 void **lp, **p;
pbrooke3f4e2a2006-04-08 20:02:06 +0000357 PhysPageDesc *pd;
bellard92e873b2004-05-21 14:52:29 +0000358
bellard108c49b2005-07-24 12:55:09 +0000359 p = (void **)l1_phys_map;
360#if TARGET_PHYS_ADDR_SPACE_BITS > 32
361
362#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
363#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
364#endif
365 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000366 p = *lp;
367 if (!p) {
368 /* allocate if not found */
bellard108c49b2005-07-24 12:55:09 +0000369 if (!alloc)
370 return NULL;
371 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
372 memset(p, 0, sizeof(void *) * L1_SIZE);
373 *lp = p;
374 }
375#endif
376 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
pbrooke3f4e2a2006-04-08 20:02:06 +0000377 pd = *lp;
378 if (!pd) {
379 int i;
bellard108c49b2005-07-24 12:55:09 +0000380 /* allocate if not found */
381 if (!alloc)
382 return NULL;
pbrooke3f4e2a2006-04-08 20:02:06 +0000383 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
384 *lp = pd;
pbrook67c4d232009-02-23 13:16:07 +0000385 for (i = 0; i < L2_SIZE; i++) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000386 pd[i].phys_offset = IO_MEM_UNASSIGNED;
pbrook67c4d232009-02-23 13:16:07 +0000387 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
388 }
bellard92e873b2004-05-21 14:52:29 +0000389 }
pbrooke3f4e2a2006-04-08 20:02:06 +0000390 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000391}
392
Anthony Liguoric227f092009-10-01 16:12:16 -0500393static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000394{
bellard108c49b2005-07-24 12:55:09 +0000395 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000396}
397
bellard9fa3e852004-01-04 18:06:42 +0000398#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500399static void tlb_protect_code(ram_addr_t ram_addr);
400static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000401 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000402#define mmap_lock() do { } while(0)
403#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000404#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000405
bellard43694152008-05-29 09:35:57 +0000406#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
407
408#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100409/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000410 user mode. It will change when a dedicated libc will be used */
411#define USE_STATIC_CODE_GEN_BUFFER
412#endif
413
414#ifdef USE_STATIC_CODE_GEN_BUFFER
415static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
416#endif
417
blueswir18fcd3692008-08-17 20:26:25 +0000418static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000419{
bellard43694152008-05-29 09:35:57 +0000420#ifdef USE_STATIC_CODE_GEN_BUFFER
421 code_gen_buffer = static_code_gen_buffer;
422 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
423 map_exec(code_gen_buffer, code_gen_buffer_size);
424#else
bellard26a5f132008-05-28 12:30:31 +0000425 code_gen_buffer_size = tb_size;
426 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000427#if defined(CONFIG_USER_ONLY)
428 /* in user mode, phys_ram_size is not meaningful */
429 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
430#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100431 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000432 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000433#endif
bellard26a5f132008-05-28 12:30:31 +0000434 }
435 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
436 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
437 /* The code gen buffer location may have constraints depending on
438 the host cpu and OS */
439#if defined(__linux__)
440 {
441 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000442 void *start = NULL;
443
bellard26a5f132008-05-28 12:30:31 +0000444 flags = MAP_PRIVATE | MAP_ANONYMOUS;
445#if defined(__x86_64__)
446 flags |= MAP_32BIT;
447 /* Cannot map more than that */
448 if (code_gen_buffer_size > (800 * 1024 * 1024))
449 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000450#elif defined(__sparc_v9__)
451 // Map the buffer below 2G, so we can use direct calls and branches
452 flags |= MAP_FIXED;
453 start = (void *) 0x60000000UL;
454 if (code_gen_buffer_size > (512 * 1024 * 1024))
455 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000456#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000457 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000458 flags |= MAP_FIXED;
459 start = (void *) 0x01000000UL;
460 if (code_gen_buffer_size > 16 * 1024 * 1024)
461 code_gen_buffer_size = 16 * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000462#endif
blueswir1141ac462008-07-26 15:05:57 +0000463 code_gen_buffer = mmap(start, code_gen_buffer_size,
464 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000465 flags, -1, 0);
466 if (code_gen_buffer == MAP_FAILED) {
467 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
468 exit(1);
469 }
470 }
Aurelien Jarnoa167ba52009-11-29 18:00:41 +0100471#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
aliguori06e67a82008-09-27 15:32:41 +0000472 {
473 int flags;
474 void *addr = NULL;
475 flags = MAP_PRIVATE | MAP_ANONYMOUS;
476#if defined(__x86_64__)
477 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
478 * 0x40000000 is free */
479 flags |= MAP_FIXED;
480 addr = (void *)0x40000000;
481 /* Cannot map more than that */
482 if (code_gen_buffer_size > (800 * 1024 * 1024))
483 code_gen_buffer_size = (800 * 1024 * 1024);
484#endif
485 code_gen_buffer = mmap(addr, code_gen_buffer_size,
486 PROT_WRITE | PROT_READ | PROT_EXEC,
487 flags, -1, 0);
488 if (code_gen_buffer == MAP_FAILED) {
489 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
490 exit(1);
491 }
492 }
bellard26a5f132008-05-28 12:30:31 +0000493#else
494 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000495 map_exec(code_gen_buffer, code_gen_buffer_size);
496#endif
bellard43694152008-05-29 09:35:57 +0000497#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000498 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
499 code_gen_buffer_max_size = code_gen_buffer_size -
500 code_gen_max_block_size();
501 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
502 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
503}
504
505/* Must be called before using the QEMU cpus. 'tb_size' is the size
506 (in bytes) allocated to the translation buffer. Zero means default
507 size. */
508void cpu_exec_init_all(unsigned long tb_size)
509{
bellard26a5f132008-05-28 12:30:31 +0000510 cpu_gen_init();
511 code_gen_alloc(tb_size);
512 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000513 page_init();
pbrooke2eef172008-06-08 01:09:01 +0000514#if !defined(CONFIG_USER_ONLY)
bellard26a5f132008-05-28 12:30:31 +0000515 io_mem_init();
pbrooke2eef172008-06-08 01:09:01 +0000516#endif
bellard26a5f132008-05-28 12:30:31 +0000517}
518
pbrook9656f322008-07-01 20:01:19 +0000519#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
520
Juan Quintelad4bfa4d2009-09-29 22:48:22 +0200521static void cpu_common_pre_save(void *opaque)
pbrook9656f322008-07-01 20:01:19 +0000522{
Juan Quintelad4bfa4d2009-09-29 22:48:22 +0200523 CPUState *env = opaque;
pbrook9656f322008-07-01 20:01:19 +0000524
Avi Kivity4c0960c2009-08-17 23:19:53 +0300525 cpu_synchronize_state(env);
pbrook9656f322008-07-01 20:01:19 +0000526}
527
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200528static int cpu_common_pre_load(void *opaque)
pbrook9656f322008-07-01 20:01:19 +0000529{
530 CPUState *env = opaque;
531
Avi Kivity4c0960c2009-08-17 23:19:53 +0300532 cpu_synchronize_state(env);
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200533 return 0;
534}
pbrook9656f322008-07-01 20:01:19 +0000535
Juan Quintelae59fb372009-09-29 22:48:21 +0200536static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200537{
538 CPUState *env = opaque;
539
aurel323098dba2009-03-07 21:28:24 +0000540 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
541 version_id is increased. */
542 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000543 tlb_flush(env, 1);
544
545 return 0;
546}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200547
548static const VMStateDescription vmstate_cpu_common = {
549 .name = "cpu_common",
550 .version_id = 1,
551 .minimum_version_id = 1,
552 .minimum_version_id_old = 1,
553 .pre_save = cpu_common_pre_save,
554 .pre_load = cpu_common_pre_load,
555 .post_load = cpu_common_post_load,
556 .fields = (VMStateField []) {
557 VMSTATE_UINT32(halted, CPUState),
558 VMSTATE_UINT32(interrupt_request, CPUState),
559 VMSTATE_END_OF_LIST()
560 }
561};
pbrook9656f322008-07-01 20:01:19 +0000562#endif
563
Glauber Costa950f1472009-06-09 12:15:18 -0400564CPUState *qemu_get_cpu(int cpu)
565{
566 CPUState *env = first_cpu;
567
568 while (env) {
569 if (env->cpu_index == cpu)
570 break;
571 env = env->next_cpu;
572 }
573
574 return env;
575}
576
bellard6a00d602005-11-21 23:25:50 +0000577void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000578{
bellard6a00d602005-11-21 23:25:50 +0000579 CPUState **penv;
580 int cpu_index;
581
pbrookc2764712009-03-07 15:24:59 +0000582#if defined(CONFIG_USER_ONLY)
583 cpu_list_lock();
584#endif
bellard6a00d602005-11-21 23:25:50 +0000585 env->next_cpu = NULL;
586 penv = &first_cpu;
587 cpu_index = 0;
588 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700589 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000590 cpu_index++;
591 }
592 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000593 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000594 QTAILQ_INIT(&env->breakpoints);
595 QTAILQ_INIT(&env->watchpoints);
bellard6a00d602005-11-21 23:25:50 +0000596 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000597#if defined(CONFIG_USER_ONLY)
598 cpu_list_unlock();
599#endif
pbrookb3c77242008-06-30 16:31:04 +0000600#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200601 vmstate_register(cpu_index, &vmstate_cpu_common, env);
pbrookb3c77242008-06-30 16:31:04 +0000602 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
603 cpu_save, cpu_load, env);
604#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000605}
606
bellard9fa3e852004-01-04 18:06:42 +0000607static inline void invalidate_page_bitmap(PageDesc *p)
608{
609 if (p->code_bitmap) {
bellard59817cc2004-02-16 22:01:13 +0000610 qemu_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000611 p->code_bitmap = NULL;
612 }
613 p->code_write_count = 0;
614}
615
bellardfd6ce8f2003-05-14 19:00:11 +0000616/* set to NULL all the 'first_tb' fields in all PageDescs */
617static void page_flush_tb(void)
618{
619 int i, j;
620 PageDesc *p;
621
622 for(i = 0; i < L1_SIZE; i++) {
623 p = l1_map[i];
624 if (p) {
bellard9fa3e852004-01-04 18:06:42 +0000625 for(j = 0; j < L2_SIZE; j++) {
626 p->first_tb = NULL;
627 invalidate_page_bitmap(p);
628 p++;
629 }
bellardfd6ce8f2003-05-14 19:00:11 +0000630 }
631 }
632}
633
634/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000635/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000636void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000637{
bellard6a00d602005-11-21 23:25:50 +0000638 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000639#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000640 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
641 (unsigned long)(code_gen_ptr - code_gen_buffer),
642 nb_tbs, nb_tbs > 0 ?
643 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000644#endif
bellard26a5f132008-05-28 12:30:31 +0000645 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000646 cpu_abort(env1, "Internal error: code buffer overflow\n");
647
bellardfd6ce8f2003-05-14 19:00:11 +0000648 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000649
bellard6a00d602005-11-21 23:25:50 +0000650 for(env = first_cpu; env != NULL; env = env->next_cpu) {
651 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
652 }
bellard9fa3e852004-01-04 18:06:42 +0000653
bellard8a8a6082004-10-03 13:36:49 +0000654 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000655 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000656
bellardfd6ce8f2003-05-14 19:00:11 +0000657 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000658 /* XXX: flush processor icache at this point if cache flush is
659 expensive */
bellarde3db7222005-01-26 22:00:47 +0000660 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000661}
662
663#ifdef DEBUG_TB_CHECK
664
j_mayerbc98a7e2007-04-04 07:55:12 +0000665static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000666{
667 TranslationBlock *tb;
668 int i;
669 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000670 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
671 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000672 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
673 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000674 printf("ERROR invalidate: address=" TARGET_FMT_lx
675 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000676 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000677 }
678 }
679 }
680}
681
682/* verify that all the pages have correct rights for code */
683static void tb_page_check(void)
684{
685 TranslationBlock *tb;
686 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000687
pbrook99773bd2006-04-16 15:14:59 +0000688 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
689 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000690 flags1 = page_get_flags(tb->pc);
691 flags2 = page_get_flags(tb->pc + tb->size - 1);
692 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
693 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000694 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000695 }
696 }
697 }
698}
699
700#endif
701
702/* invalidate one TB */
703static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
704 int next_offset)
705{
706 TranslationBlock *tb1;
707 for(;;) {
708 tb1 = *ptb;
709 if (tb1 == tb) {
710 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
711 break;
712 }
713 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
714 }
715}
716
bellard9fa3e852004-01-04 18:06:42 +0000717static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
718{
719 TranslationBlock *tb1;
720 unsigned int n1;
721
722 for(;;) {
723 tb1 = *ptb;
724 n1 = (long)tb1 & 3;
725 tb1 = (TranslationBlock *)((long)tb1 & ~3);
726 if (tb1 == tb) {
727 *ptb = tb1->page_next[n1];
728 break;
729 }
730 ptb = &tb1->page_next[n1];
731 }
732}
733
bellardd4e81642003-05-25 16:46:15 +0000734static inline void tb_jmp_remove(TranslationBlock *tb, int n)
735{
736 TranslationBlock *tb1, **ptb;
737 unsigned int n1;
738
739 ptb = &tb->jmp_next[n];
740 tb1 = *ptb;
741 if (tb1) {
742 /* find tb(n) in circular list */
743 for(;;) {
744 tb1 = *ptb;
745 n1 = (long)tb1 & 3;
746 tb1 = (TranslationBlock *)((long)tb1 & ~3);
747 if (n1 == n && tb1 == tb)
748 break;
749 if (n1 == 2) {
750 ptb = &tb1->jmp_first;
751 } else {
752 ptb = &tb1->jmp_next[n1];
753 }
754 }
755 /* now we can suppress tb(n) from the list */
756 *ptb = tb->jmp_next[n];
757
758 tb->jmp_next[n] = NULL;
759 }
760}
761
762/* reset the jump entry 'n' of a TB so that it is not chained to
763 another TB */
764static inline void tb_reset_jump(TranslationBlock *tb, int n)
765{
766 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
767}
768
pbrook2e70f6e2008-06-29 01:03:05 +0000769void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000770{
bellard6a00d602005-11-21 23:25:50 +0000771 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000772 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000773 unsigned int h, n1;
Anthony Liguoric227f092009-10-01 16:12:16 -0500774 target_phys_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000775 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000776
bellard9fa3e852004-01-04 18:06:42 +0000777 /* remove the TB from the hash list */
778 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
779 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000780 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000781 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000782
bellard9fa3e852004-01-04 18:06:42 +0000783 /* remove the TB from the page list */
784 if (tb->page_addr[0] != page_addr) {
785 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
786 tb_page_remove(&p->first_tb, tb);
787 invalidate_page_bitmap(p);
788 }
789 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
790 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
791 tb_page_remove(&p->first_tb, tb);
792 invalidate_page_bitmap(p);
793 }
794
bellard8a40a182005-11-20 10:35:40 +0000795 tb_invalidated_flag = 1;
796
797 /* remove the TB from the hash list */
798 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000799 for(env = first_cpu; env != NULL; env = env->next_cpu) {
800 if (env->tb_jmp_cache[h] == tb)
801 env->tb_jmp_cache[h] = NULL;
802 }
bellard8a40a182005-11-20 10:35:40 +0000803
804 /* suppress this TB from the two jump lists */
805 tb_jmp_remove(tb, 0);
806 tb_jmp_remove(tb, 1);
807
808 /* suppress any remaining jumps to this TB */
809 tb1 = tb->jmp_first;
810 for(;;) {
811 n1 = (long)tb1 & 3;
812 if (n1 == 2)
813 break;
814 tb1 = (TranslationBlock *)((long)tb1 & ~3);
815 tb2 = tb1->jmp_next[n1];
816 tb_reset_jump(tb1, n1);
817 tb1->jmp_next[n1] = NULL;
818 tb1 = tb2;
819 }
820 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
821
bellarde3db7222005-01-26 22:00:47 +0000822 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000823}
824
825static inline void set_bits(uint8_t *tab, int start, int len)
826{
827 int end, mask, end1;
828
829 end = start + len;
830 tab += start >> 3;
831 mask = 0xff << (start & 7);
832 if ((start & ~7) == (end & ~7)) {
833 if (start < end) {
834 mask &= ~(0xff << (end & 7));
835 *tab |= mask;
836 }
837 } else {
838 *tab++ |= mask;
839 start = (start + 8) & ~7;
840 end1 = end & ~7;
841 while (start < end1) {
842 *tab++ = 0xff;
843 start += 8;
844 }
845 if (start < end) {
846 mask = ~(0xff << (end & 7));
847 *tab |= mask;
848 }
849 }
850}
851
852static void build_page_bitmap(PageDesc *p)
853{
854 int n, tb_start, tb_end;
855 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000856
pbrookb2a70812008-06-09 13:57:23 +0000857 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000858
859 tb = p->first_tb;
860 while (tb != NULL) {
861 n = (long)tb & 3;
862 tb = (TranslationBlock *)((long)tb & ~3);
863 /* NOTE: this is subtle as a TB may span two physical pages */
864 if (n == 0) {
865 /* NOTE: tb_end may be after the end of the page, but
866 it is not a problem */
867 tb_start = tb->pc & ~TARGET_PAGE_MASK;
868 tb_end = tb_start + tb->size;
869 if (tb_end > TARGET_PAGE_SIZE)
870 tb_end = TARGET_PAGE_SIZE;
871 } else {
872 tb_start = 0;
873 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
874 }
875 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
876 tb = tb->page_next[n];
877 }
878}
879
pbrook2e70f6e2008-06-29 01:03:05 +0000880TranslationBlock *tb_gen_code(CPUState *env,
881 target_ulong pc, target_ulong cs_base,
882 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000883{
884 TranslationBlock *tb;
885 uint8_t *tc_ptr;
886 target_ulong phys_pc, phys_page2, virt_page2;
887 int code_gen_size;
888
bellardc27004e2005-01-03 23:35:10 +0000889 phys_pc = get_phys_addr_code(env, pc);
890 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000891 if (!tb) {
892 /* flush must be done */
893 tb_flush(env);
894 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000895 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000896 /* Don't forget to invalidate previous TB info. */
897 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000898 }
899 tc_ptr = code_gen_ptr;
900 tb->tc_ptr = tc_ptr;
901 tb->cs_base = cs_base;
902 tb->flags = flags;
903 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +0000904 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +0000905 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +0000906
bellardd720b932004-04-25 17:57:43 +0000907 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +0000908 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +0000909 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +0000910 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
bellardd720b932004-04-25 17:57:43 +0000911 phys_page2 = get_phys_addr_code(env, virt_page2);
912 }
913 tb_link_phys(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +0000914 return tb;
bellardd720b932004-04-25 17:57:43 +0000915}
ths3b46e622007-09-17 08:09:54 +0000916
bellard9fa3e852004-01-04 18:06:42 +0000917/* invalidate all TBs which intersect with the target physical page
918 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +0000919 the same physical page. 'is_cpu_write_access' should be true if called
920 from a real cpu write access: the virtual CPU will exit the current
921 TB if code is modified inside this TB. */
Anthony Liguoric227f092009-10-01 16:12:16 -0500922void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
bellardd720b932004-04-25 17:57:43 +0000923 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +0000924{
aliguori6b917542008-11-18 19:46:41 +0000925 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +0000926 CPUState *env = cpu_single_env;
bellard9fa3e852004-01-04 18:06:42 +0000927 target_ulong tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +0000928 PageDesc *p;
929 int n;
930#ifdef TARGET_HAS_PRECISE_SMC
931 int current_tb_not_found = is_cpu_write_access;
932 TranslationBlock *current_tb = NULL;
933 int current_tb_modified = 0;
934 target_ulong current_pc = 0;
935 target_ulong current_cs_base = 0;
936 int current_flags = 0;
937#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +0000938
939 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +0000940 if (!p)
bellard9fa3e852004-01-04 18:06:42 +0000941 return;
ths5fafdf22007-09-16 21:08:06 +0000942 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +0000943 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
944 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +0000945 /* build code bitmap */
946 build_page_bitmap(p);
947 }
948
949 /* we remove all the TBs in the range [start, end[ */
950 /* XXX: see if in some cases it could be faster to invalidate all the code */
951 tb = p->first_tb;
952 while (tb != NULL) {
953 n = (long)tb & 3;
954 tb = (TranslationBlock *)((long)tb & ~3);
955 tb_next = tb->page_next[n];
956 /* NOTE: this is subtle as a TB may span two physical pages */
957 if (n == 0) {
958 /* NOTE: tb_end may be after the end of the page, but
959 it is not a problem */
960 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
961 tb_end = tb_start + tb->size;
962 } else {
963 tb_start = tb->page_addr[1];
964 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
965 }
966 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +0000967#ifdef TARGET_HAS_PRECISE_SMC
968 if (current_tb_not_found) {
969 current_tb_not_found = 0;
970 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +0000971 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +0000972 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +0000973 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +0000974 }
975 }
976 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +0000977 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +0000978 /* If we are modifying the current TB, we must stop
979 its execution. We could be more precise by checking
980 that the modification is after the current PC, but it
981 would require a specialized function to partially
982 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +0000983
bellardd720b932004-04-25 17:57:43 +0000984 current_tb_modified = 1;
ths5fafdf22007-09-16 21:08:06 +0000985 cpu_restore_state(current_tb, env,
pbrook2e70f6e2008-06-29 01:03:05 +0000986 env->mem_io_pc, NULL);
aliguori6b917542008-11-18 19:46:41 +0000987 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
988 &current_flags);
bellardd720b932004-04-25 17:57:43 +0000989 }
990#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +0000991 /* we need to do that to handle the case where a signal
992 occurs while doing tb_phys_invalidate() */
993 saved_tb = NULL;
994 if (env) {
995 saved_tb = env->current_tb;
996 env->current_tb = NULL;
997 }
bellard9fa3e852004-01-04 18:06:42 +0000998 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +0000999 if (env) {
1000 env->current_tb = saved_tb;
1001 if (env->interrupt_request && env->current_tb)
1002 cpu_interrupt(env, env->interrupt_request);
1003 }
bellard9fa3e852004-01-04 18:06:42 +00001004 }
1005 tb = tb_next;
1006 }
1007#if !defined(CONFIG_USER_ONLY)
1008 /* if no code remaining, no need to continue to use slow writes */
1009 if (!p->first_tb) {
1010 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001011 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001012 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001013 }
1014 }
1015#endif
1016#ifdef TARGET_HAS_PRECISE_SMC
1017 if (current_tb_modified) {
1018 /* we generate a block containing just the instruction
1019 modifying the memory. It will ensure that it cannot modify
1020 itself */
bellardea1c1802004-06-14 18:56:36 +00001021 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001022 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001023 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001024 }
1025#endif
1026}
1027
1028/* len must be <= 8 and start must be a multiple of len */
Anthony Liguoric227f092009-10-01 16:12:16 -05001029static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001030{
1031 PageDesc *p;
1032 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001033#if 0
bellarda4193c82004-06-03 14:01:43 +00001034 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001035 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1036 cpu_single_env->mem_io_vaddr, len,
1037 cpu_single_env->eip,
1038 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001039 }
1040#endif
bellard9fa3e852004-01-04 18:06:42 +00001041 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001042 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001043 return;
1044 if (p->code_bitmap) {
1045 offset = start & ~TARGET_PAGE_MASK;
1046 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1047 if (b & ((1 << len) - 1))
1048 goto do_invalidate;
1049 } else {
1050 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001051 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001052 }
1053}
1054
bellard9fa3e852004-01-04 18:06:42 +00001055#if !defined(CONFIG_SOFTMMU)
Anthony Liguoric227f092009-10-01 16:12:16 -05001056static void tb_invalidate_phys_page(target_phys_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001057 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001058{
aliguori6b917542008-11-18 19:46:41 +00001059 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001060 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001061 int n;
bellardd720b932004-04-25 17:57:43 +00001062#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001063 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001064 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001065 int current_tb_modified = 0;
1066 target_ulong current_pc = 0;
1067 target_ulong current_cs_base = 0;
1068 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001069#endif
bellard9fa3e852004-01-04 18:06:42 +00001070
1071 addr &= TARGET_PAGE_MASK;
1072 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001073 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001074 return;
1075 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001076#ifdef TARGET_HAS_PRECISE_SMC
1077 if (tb && pc != 0) {
1078 current_tb = tb_find_pc(pc);
1079 }
1080#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001081 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001082 n = (long)tb & 3;
1083 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001084#ifdef TARGET_HAS_PRECISE_SMC
1085 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001086 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001087 /* If we are modifying the current TB, we must stop
1088 its execution. We could be more precise by checking
1089 that the modification is after the current PC, but it
1090 would require a specialized function to partially
1091 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001092
bellardd720b932004-04-25 17:57:43 +00001093 current_tb_modified = 1;
1094 cpu_restore_state(current_tb, env, pc, puc);
aliguori6b917542008-11-18 19:46:41 +00001095 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1096 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001097 }
1098#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001099 tb_phys_invalidate(tb, addr);
1100 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001101 }
1102 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001103#ifdef TARGET_HAS_PRECISE_SMC
1104 if (current_tb_modified) {
1105 /* we generate a block containing just the instruction
1106 modifying the memory. It will ensure that it cannot modify
1107 itself */
bellardea1c1802004-06-14 18:56:36 +00001108 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001109 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001110 cpu_resume_from_signal(env, puc);
1111 }
1112#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001113}
bellard9fa3e852004-01-04 18:06:42 +00001114#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001115
1116/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001117static inline void tb_alloc_page(TranslationBlock *tb,
pbrook53a59602006-03-25 19:31:22 +00001118 unsigned int n, target_ulong page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001119{
1120 PageDesc *p;
bellard9fa3e852004-01-04 18:06:42 +00001121 TranslationBlock *last_first_tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001122
bellard9fa3e852004-01-04 18:06:42 +00001123 tb->page_addr[n] = page_addr;
bellard3a7d9292005-08-21 09:26:42 +00001124 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00001125 tb->page_next[n] = p->first_tb;
1126 last_first_tb = p->first_tb;
1127 p->first_tb = (TranslationBlock *)((long)tb | n);
1128 invalidate_page_bitmap(p);
1129
bellard107db442004-06-22 18:48:46 +00001130#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001131
bellard9fa3e852004-01-04 18:06:42 +00001132#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001133 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001134 target_ulong addr;
1135 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001136 int prot;
1137
bellardfd6ce8f2003-05-14 19:00:11 +00001138 /* force the host page as non writable (writes will have a
1139 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001140 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001141 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001142 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1143 addr += TARGET_PAGE_SIZE) {
1144
1145 p2 = page_find (addr >> TARGET_PAGE_BITS);
1146 if (!p2)
1147 continue;
1148 prot |= p2->flags;
1149 p2->flags &= ~PAGE_WRITE;
1150 page_get_flags(addr);
1151 }
ths5fafdf22007-09-16 21:08:06 +00001152 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001153 (prot & PAGE_BITS) & ~PAGE_WRITE);
1154#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001155 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001156 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001157#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001158 }
bellard9fa3e852004-01-04 18:06:42 +00001159#else
1160 /* if some code is already present, then the pages are already
1161 protected. So we handle the case where only the first TB is
1162 allocated in a physical page */
1163 if (!last_first_tb) {
bellard6a00d602005-11-21 23:25:50 +00001164 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001165 }
1166#endif
bellardd720b932004-04-25 17:57:43 +00001167
1168#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001169}
1170
1171/* Allocate a new translation block. Flush the translation buffer if
1172 too many translation blocks or too much generated code. */
bellardc27004e2005-01-03 23:35:10 +00001173TranslationBlock *tb_alloc(target_ulong pc)
bellardfd6ce8f2003-05-14 19:00:11 +00001174{
1175 TranslationBlock *tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001176
bellard26a5f132008-05-28 12:30:31 +00001177 if (nb_tbs >= code_gen_max_blocks ||
1178 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
bellardd4e81642003-05-25 16:46:15 +00001179 return NULL;
bellardfd6ce8f2003-05-14 19:00:11 +00001180 tb = &tbs[nb_tbs++];
1181 tb->pc = pc;
bellardb448f2f2004-02-25 23:24:04 +00001182 tb->cflags = 0;
bellardd4e81642003-05-25 16:46:15 +00001183 return tb;
1184}
1185
pbrook2e70f6e2008-06-29 01:03:05 +00001186void tb_free(TranslationBlock *tb)
1187{
thsbf20dc02008-06-30 17:22:19 +00001188 /* In practice this is mostly used for single use temporary TB
pbrook2e70f6e2008-06-29 01:03:05 +00001189 Ignore the hard cases and just back up if this TB happens to
1190 be the last one generated. */
1191 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1192 code_gen_ptr = tb->tc_ptr;
1193 nb_tbs--;
1194 }
1195}
1196
bellard9fa3e852004-01-04 18:06:42 +00001197/* add a new TB and link it to the physical page tables. phys_page2 is
1198 (-1) to indicate that only one page contains the TB. */
ths5fafdf22007-09-16 21:08:06 +00001199void tb_link_phys(TranslationBlock *tb,
bellard9fa3e852004-01-04 18:06:42 +00001200 target_ulong phys_pc, target_ulong phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001201{
bellard9fa3e852004-01-04 18:06:42 +00001202 unsigned int h;
1203 TranslationBlock **ptb;
1204
pbrookc8a706f2008-06-02 16:16:42 +00001205 /* Grab the mmap lock to stop another thread invalidating this TB
1206 before we are done. */
1207 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001208 /* add in the physical hash table */
1209 h = tb_phys_hash_func(phys_pc);
1210 ptb = &tb_phys_hash[h];
1211 tb->phys_hash_next = *ptb;
1212 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001213
1214 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001215 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1216 if (phys_page2 != -1)
1217 tb_alloc_page(tb, 1, phys_page2);
1218 else
1219 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001220
bellardd4e81642003-05-25 16:46:15 +00001221 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1222 tb->jmp_next[0] = NULL;
1223 tb->jmp_next[1] = NULL;
1224
1225 /* init original jump addresses */
1226 if (tb->tb_next_offset[0] != 0xffff)
1227 tb_reset_jump(tb, 0);
1228 if (tb->tb_next_offset[1] != 0xffff)
1229 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001230
1231#ifdef DEBUG_TB_CHECK
1232 tb_page_check();
1233#endif
pbrookc8a706f2008-06-02 16:16:42 +00001234 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001235}
1236
bellarda513fe12003-05-27 23:29:48 +00001237/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1238 tb[1].tc_ptr. Return NULL if not found */
1239TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1240{
1241 int m_min, m_max, m;
1242 unsigned long v;
1243 TranslationBlock *tb;
1244
1245 if (nb_tbs <= 0)
1246 return NULL;
1247 if (tc_ptr < (unsigned long)code_gen_buffer ||
1248 tc_ptr >= (unsigned long)code_gen_ptr)
1249 return NULL;
1250 /* binary search (cf Knuth) */
1251 m_min = 0;
1252 m_max = nb_tbs - 1;
1253 while (m_min <= m_max) {
1254 m = (m_min + m_max) >> 1;
1255 tb = &tbs[m];
1256 v = (unsigned long)tb->tc_ptr;
1257 if (v == tc_ptr)
1258 return tb;
1259 else if (tc_ptr < v) {
1260 m_max = m - 1;
1261 } else {
1262 m_min = m + 1;
1263 }
ths5fafdf22007-09-16 21:08:06 +00001264 }
bellarda513fe12003-05-27 23:29:48 +00001265 return &tbs[m_max];
1266}
bellard75012672003-06-21 13:11:07 +00001267
bellardea041c02003-06-25 16:16:50 +00001268static void tb_reset_jump_recursive(TranslationBlock *tb);
1269
1270static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1271{
1272 TranslationBlock *tb1, *tb_next, **ptb;
1273 unsigned int n1;
1274
1275 tb1 = tb->jmp_next[n];
1276 if (tb1 != NULL) {
1277 /* find head of list */
1278 for(;;) {
1279 n1 = (long)tb1 & 3;
1280 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1281 if (n1 == 2)
1282 break;
1283 tb1 = tb1->jmp_next[n1];
1284 }
1285 /* we are now sure now that tb jumps to tb1 */
1286 tb_next = tb1;
1287
1288 /* remove tb from the jmp_first list */
1289 ptb = &tb_next->jmp_first;
1290 for(;;) {
1291 tb1 = *ptb;
1292 n1 = (long)tb1 & 3;
1293 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1294 if (n1 == n && tb1 == tb)
1295 break;
1296 ptb = &tb1->jmp_next[n1];
1297 }
1298 *ptb = tb->jmp_next[n];
1299 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001300
bellardea041c02003-06-25 16:16:50 +00001301 /* suppress the jump to next tb in generated code */
1302 tb_reset_jump(tb, n);
1303
bellard01243112004-01-04 15:48:17 +00001304 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001305 tb_reset_jump_recursive(tb_next);
1306 }
1307}
1308
1309static void tb_reset_jump_recursive(TranslationBlock *tb)
1310{
1311 tb_reset_jump_recursive2(tb, 0);
1312 tb_reset_jump_recursive2(tb, 1);
1313}
1314
bellard1fddef42005-04-17 19:16:13 +00001315#if defined(TARGET_HAS_ICE)
bellardd720b932004-04-25 17:57:43 +00001316static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1317{
Anthony Liguoric227f092009-10-01 16:12:16 -05001318 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001319 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001320 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001321 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001322
pbrookc2f07f82006-04-08 17:14:56 +00001323 addr = cpu_get_phys_page_debug(env, pc);
1324 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1325 if (!p) {
1326 pd = IO_MEM_UNASSIGNED;
1327 } else {
1328 pd = p->phys_offset;
1329 }
1330 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001331 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001332}
bellardc27004e2005-01-03 23:35:10 +00001333#endif
bellardd720b932004-04-25 17:57:43 +00001334
pbrook6658ffb2007-03-16 23:58:11 +00001335/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001336int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1337 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001338{
aliguorib4051332008-11-18 20:14:20 +00001339 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001340 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001341
aliguorib4051332008-11-18 20:14:20 +00001342 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1343 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1344 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1345 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1346 return -EINVAL;
1347 }
aliguoria1d1bb32008-11-18 20:07:32 +00001348 wp = qemu_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001349
aliguoria1d1bb32008-11-18 20:07:32 +00001350 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001351 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001352 wp->flags = flags;
1353
aliguori2dc9f412008-11-18 20:56:59 +00001354 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001355 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001356 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001357 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001358 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001359
pbrook6658ffb2007-03-16 23:58:11 +00001360 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001361
1362 if (watchpoint)
1363 *watchpoint = wp;
1364 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001365}
1366
aliguoria1d1bb32008-11-18 20:07:32 +00001367/* Remove a specific watchpoint. */
1368int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1369 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001370{
aliguorib4051332008-11-18 20:14:20 +00001371 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001372 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001373
Blue Swirl72cf2d42009-09-12 07:36:22 +00001374 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001375 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001376 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001377 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001378 return 0;
1379 }
1380 }
aliguoria1d1bb32008-11-18 20:07:32 +00001381 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001382}
1383
aliguoria1d1bb32008-11-18 20:07:32 +00001384/* Remove a specific watchpoint by reference. */
1385void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1386{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001387 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001388
aliguoria1d1bb32008-11-18 20:07:32 +00001389 tlb_flush_page(env, watchpoint->vaddr);
1390
1391 qemu_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001392}
1393
aliguoria1d1bb32008-11-18 20:07:32 +00001394/* Remove all matching watchpoints. */
1395void cpu_watchpoint_remove_all(CPUState *env, int mask)
1396{
aliguoric0ce9982008-11-25 22:13:57 +00001397 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001398
Blue Swirl72cf2d42009-09-12 07:36:22 +00001399 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001400 if (wp->flags & mask)
1401 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001402 }
aliguoria1d1bb32008-11-18 20:07:32 +00001403}
1404
1405/* Add a breakpoint. */
1406int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1407 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001408{
bellard1fddef42005-04-17 19:16:13 +00001409#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001410 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001411
aliguoria1d1bb32008-11-18 20:07:32 +00001412 bp = qemu_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001413
1414 bp->pc = pc;
1415 bp->flags = flags;
1416
aliguori2dc9f412008-11-18 20:56:59 +00001417 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001418 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001419 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001420 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001421 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001422
1423 breakpoint_invalidate(env, pc);
1424
1425 if (breakpoint)
1426 *breakpoint = bp;
1427 return 0;
1428#else
1429 return -ENOSYS;
1430#endif
1431}
1432
1433/* Remove a specific breakpoint. */
1434int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1435{
1436#if defined(TARGET_HAS_ICE)
1437 CPUBreakpoint *bp;
1438
Blue Swirl72cf2d42009-09-12 07:36:22 +00001439 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001440 if (bp->pc == pc && bp->flags == flags) {
1441 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001442 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001443 }
bellard4c3a88a2003-07-26 12:06:08 +00001444 }
aliguoria1d1bb32008-11-18 20:07:32 +00001445 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001446#else
aliguoria1d1bb32008-11-18 20:07:32 +00001447 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001448#endif
1449}
1450
aliguoria1d1bb32008-11-18 20:07:32 +00001451/* Remove a specific breakpoint by reference. */
1452void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001453{
bellard1fddef42005-04-17 19:16:13 +00001454#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001455 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001456
aliguoria1d1bb32008-11-18 20:07:32 +00001457 breakpoint_invalidate(env, breakpoint->pc);
1458
1459 qemu_free(breakpoint);
1460#endif
1461}
1462
1463/* Remove all matching breakpoints. */
1464void cpu_breakpoint_remove_all(CPUState *env, int mask)
1465{
1466#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001467 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001468
Blue Swirl72cf2d42009-09-12 07:36:22 +00001469 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001470 if (bp->flags & mask)
1471 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001472 }
bellard4c3a88a2003-07-26 12:06:08 +00001473#endif
1474}
1475
bellardc33a3462003-07-29 20:50:33 +00001476/* enable or disable single step mode. EXCP_DEBUG is returned by the
1477 CPU loop after each instruction */
1478void cpu_single_step(CPUState *env, int enabled)
1479{
bellard1fddef42005-04-17 19:16:13 +00001480#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001481 if (env->singlestep_enabled != enabled) {
1482 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001483 if (kvm_enabled())
1484 kvm_update_guest_debug(env, 0);
1485 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001486 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001487 /* XXX: only flush what is necessary */
1488 tb_flush(env);
1489 }
bellardc33a3462003-07-29 20:50:33 +00001490 }
1491#endif
1492}
1493
bellard34865132003-10-05 14:28:56 +00001494/* enable or disable low levels log */
1495void cpu_set_log(int log_flags)
1496{
1497 loglevel = log_flags;
1498 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001499 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001500 if (!logfile) {
1501 perror(logfilename);
1502 _exit(1);
1503 }
bellard9fa3e852004-01-04 18:06:42 +00001504#if !defined(CONFIG_SOFTMMU)
1505 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1506 {
blueswir1b55266b2008-09-20 08:07:15 +00001507 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001508 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1509 }
Filip Navarabf65f532009-07-27 10:02:04 -05001510#elif !defined(_WIN32)
1511 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001512 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001513#endif
pbrooke735b912007-06-30 13:53:24 +00001514 log_append = 1;
1515 }
1516 if (!loglevel && logfile) {
1517 fclose(logfile);
1518 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001519 }
1520}
1521
1522void cpu_set_log_filename(const char *filename)
1523{
1524 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001525 if (logfile) {
1526 fclose(logfile);
1527 logfile = NULL;
1528 }
1529 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001530}
bellardc33a3462003-07-29 20:50:33 +00001531
aurel323098dba2009-03-07 21:28:24 +00001532static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001533{
pbrookd5975362008-06-07 20:50:51 +00001534 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1535 problem and hope the cpu will stop of its own accord. For userspace
1536 emulation this often isn't actually as bad as it sounds. Often
1537 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001538 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001539 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001540
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001541 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001542 tb = env->current_tb;
1543 /* if the cpu is currently executing code, we must unlink it and
1544 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001545 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001546 env->current_tb = NULL;
1547 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001548 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001549 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001550}
1551
1552/* mask must never be zero, except for A20 change call */
1553void cpu_interrupt(CPUState *env, int mask)
1554{
1555 int old_mask;
1556
1557 old_mask = env->interrupt_request;
1558 env->interrupt_request |= mask;
1559
aliguori8edac962009-04-24 18:03:45 +00001560#ifndef CONFIG_USER_ONLY
1561 /*
1562 * If called from iothread context, wake the target cpu in
1563 * case its halted.
1564 */
1565 if (!qemu_cpu_self(env)) {
1566 qemu_cpu_kick(env);
1567 return;
1568 }
1569#endif
1570
pbrook2e70f6e2008-06-29 01:03:05 +00001571 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001572 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001573#ifndef CONFIG_USER_ONLY
pbrook2e70f6e2008-06-29 01:03:05 +00001574 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001575 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001576 cpu_abort(env, "Raised interrupt while not in I/O function");
1577 }
1578#endif
1579 } else {
aurel323098dba2009-03-07 21:28:24 +00001580 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001581 }
1582}
1583
bellardb54ad042004-05-20 13:42:52 +00001584void cpu_reset_interrupt(CPUState *env, int mask)
1585{
1586 env->interrupt_request &= ~mask;
1587}
1588
aurel323098dba2009-03-07 21:28:24 +00001589void cpu_exit(CPUState *env)
1590{
1591 env->exit_request = 1;
1592 cpu_unlink_tb(env);
1593}
1594
blueswir1c7cd6a32008-10-02 18:27:46 +00001595const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001596 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001597 "show generated host assembly code for each compiled TB" },
1598 { CPU_LOG_TB_IN_ASM, "in_asm",
1599 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001600 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001601 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001602 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001603 "show micro ops "
1604#ifdef TARGET_I386
1605 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001606#endif
blueswir1e01a1152008-03-14 17:37:11 +00001607 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001608 { CPU_LOG_INT, "int",
1609 "show interrupts/exceptions in short format" },
1610 { CPU_LOG_EXEC, "exec",
1611 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001612 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001613 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001614#ifdef TARGET_I386
1615 { CPU_LOG_PCALL, "pcall",
1616 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001617 { CPU_LOG_RESET, "cpu_reset",
1618 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001619#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001620#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001621 { CPU_LOG_IOPORT, "ioport",
1622 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001623#endif
bellardf193c792004-03-21 17:06:25 +00001624 { 0, NULL, NULL },
1625};
1626
1627static int cmp1(const char *s1, int n, const char *s2)
1628{
1629 if (strlen(s2) != n)
1630 return 0;
1631 return memcmp(s1, s2, n) == 0;
1632}
ths3b46e622007-09-17 08:09:54 +00001633
bellardf193c792004-03-21 17:06:25 +00001634/* takes a comma separated list of log masks. Return 0 if error. */
1635int cpu_str_to_log_mask(const char *str)
1636{
blueswir1c7cd6a32008-10-02 18:27:46 +00001637 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001638 int mask;
1639 const char *p, *p1;
1640
1641 p = str;
1642 mask = 0;
1643 for(;;) {
1644 p1 = strchr(p, ',');
1645 if (!p1)
1646 p1 = p + strlen(p);
bellard8e3a9fd2004-10-09 17:32:58 +00001647 if(cmp1(p,p1-p,"all")) {
1648 for(item = cpu_log_items; item->mask != 0; item++) {
1649 mask |= item->mask;
1650 }
1651 } else {
bellardf193c792004-03-21 17:06:25 +00001652 for(item = cpu_log_items; item->mask != 0; item++) {
1653 if (cmp1(p, p1 - p, item->name))
1654 goto found;
1655 }
1656 return 0;
bellard8e3a9fd2004-10-09 17:32:58 +00001657 }
bellardf193c792004-03-21 17:06:25 +00001658 found:
1659 mask |= item->mask;
1660 if (*p1 != ',')
1661 break;
1662 p = p1 + 1;
1663 }
1664 return mask;
1665}
bellardea041c02003-06-25 16:16:50 +00001666
bellard75012672003-06-21 13:11:07 +00001667void cpu_abort(CPUState *env, const char *fmt, ...)
1668{
1669 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001670 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001671
1672 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001673 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001674 fprintf(stderr, "qemu: fatal: ");
1675 vfprintf(stderr, fmt, ap);
1676 fprintf(stderr, "\n");
1677#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001678 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1679#else
1680 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001681#endif
aliguori93fcfe32009-01-15 22:34:14 +00001682 if (qemu_log_enabled()) {
1683 qemu_log("qemu: fatal: ");
1684 qemu_log_vprintf(fmt, ap2);
1685 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001686#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001687 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001688#else
aliguori93fcfe32009-01-15 22:34:14 +00001689 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001690#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001691 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001692 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001693 }
pbrook493ae1f2007-11-23 16:53:59 +00001694 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001695 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001696#if defined(CONFIG_USER_ONLY)
1697 {
1698 struct sigaction act;
1699 sigfillset(&act.sa_mask);
1700 act.sa_handler = SIG_DFL;
1701 sigaction(SIGABRT, &act, NULL);
1702 }
1703#endif
bellard75012672003-06-21 13:11:07 +00001704 abort();
1705}
1706
thsc5be9f02007-02-28 20:20:53 +00001707CPUState *cpu_copy(CPUState *env)
1708{
ths01ba9812007-12-09 02:22:57 +00001709 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001710 CPUState *next_cpu = new_env->next_cpu;
1711 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001712#if defined(TARGET_HAS_ICE)
1713 CPUBreakpoint *bp;
1714 CPUWatchpoint *wp;
1715#endif
1716
thsc5be9f02007-02-28 20:20:53 +00001717 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001718
1719 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001720 new_env->next_cpu = next_cpu;
1721 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001722
1723 /* Clone all break/watchpoints.
1724 Note: Once we support ptrace with hw-debug register access, make sure
1725 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001726 QTAILQ_INIT(&env->breakpoints);
1727 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001728#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001729 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001730 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1731 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001732 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001733 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1734 wp->flags, NULL);
1735 }
1736#endif
1737
thsc5be9f02007-02-28 20:20:53 +00001738 return new_env;
1739}
1740
bellard01243112004-01-04 15:48:17 +00001741#if !defined(CONFIG_USER_ONLY)
1742
edgar_igl5c751e92008-05-06 08:44:21 +00001743static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1744{
1745 unsigned int i;
1746
1747 /* Discard jump cache entries for any tb which might potentially
1748 overlap the flushed page. */
1749 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1750 memset (&env->tb_jmp_cache[i], 0,
1751 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1752
1753 i = tb_jmp_cache_hash_page(addr);
1754 memset (&env->tb_jmp_cache[i], 0,
1755 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1756}
1757
Igor Kovalenko08738982009-07-12 02:15:40 +04001758static CPUTLBEntry s_cputlb_empty_entry = {
1759 .addr_read = -1,
1760 .addr_write = -1,
1761 .addr_code = -1,
1762 .addend = -1,
1763};
1764
bellardee8b7022004-02-03 23:35:10 +00001765/* NOTE: if flush_global is true, also flush global entries (not
1766 implemented yet) */
1767void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001768{
bellard33417e72003-08-10 21:47:01 +00001769 int i;
bellard01243112004-01-04 15:48:17 +00001770
bellard9fa3e852004-01-04 18:06:42 +00001771#if defined(DEBUG_TLB)
1772 printf("tlb_flush:\n");
1773#endif
bellard01243112004-01-04 15:48:17 +00001774 /* must reset current TB so that interrupts cannot modify the
1775 links while we are modifying them */
1776 env->current_tb = NULL;
1777
bellard33417e72003-08-10 21:47:01 +00001778 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001779 int mmu_idx;
1780 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001781 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001782 }
bellard33417e72003-08-10 21:47:01 +00001783 }
bellard9fa3e852004-01-04 18:06:42 +00001784
bellard8a40a182005-11-20 10:35:40 +00001785 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001786
bellarde3db7222005-01-26 22:00:47 +00001787 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001788}
1789
bellard274da6b2004-05-20 21:56:27 +00001790static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001791{
ths5fafdf22007-09-16 21:08:06 +00001792 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001793 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001794 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001795 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001796 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001797 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001798 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001799 }
bellard61382a52003-10-27 21:22:23 +00001800}
1801
bellard2e126692004-04-25 21:28:44 +00001802void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001803{
bellard8a40a182005-11-20 10:35:40 +00001804 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001805 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001806
bellard9fa3e852004-01-04 18:06:42 +00001807#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001808 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001809#endif
bellard01243112004-01-04 15:48:17 +00001810 /* must reset current TB so that interrupts cannot modify the
1811 links while we are modifying them */
1812 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001813
bellard61382a52003-10-27 21:22:23 +00001814 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001815 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001816 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1817 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001818
edgar_igl5c751e92008-05-06 08:44:21 +00001819 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001820}
1821
bellard9fa3e852004-01-04 18:06:42 +00001822/* update the TLBs so that writes to code in the virtual page 'addr'
1823 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001824static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001825{
ths5fafdf22007-09-16 21:08:06 +00001826 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001827 ram_addr + TARGET_PAGE_SIZE,
1828 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001829}
1830
bellard9fa3e852004-01-04 18:06:42 +00001831/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001832 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001833static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001834 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001835{
bellard3a7d9292005-08-21 09:26:42 +00001836 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
bellard1ccde1c2004-02-06 19:46:14 +00001837}
1838
ths5fafdf22007-09-16 21:08:06 +00001839static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001840 unsigned long start, unsigned long length)
1841{
1842 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00001843 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1844 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001845 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001846 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001847 }
1848 }
1849}
1850
pbrook5579c7f2009-04-11 14:47:08 +00001851/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001852void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001853 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001854{
1855 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001856 unsigned long length, start1;
bellard0a962c02005-02-10 22:00:27 +00001857 int i, mask, len;
1858 uint8_t *p;
bellard1ccde1c2004-02-06 19:46:14 +00001859
1860 start &= TARGET_PAGE_MASK;
1861 end = TARGET_PAGE_ALIGN(end);
1862
1863 length = end - start;
1864 if (length == 0)
1865 return;
bellard0a962c02005-02-10 22:00:27 +00001866 len = length >> TARGET_PAGE_BITS;
bellardf23db162005-08-21 19:12:28 +00001867 mask = ~dirty_flags;
1868 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1869 for(i = 0; i < len; i++)
1870 p[i] &= mask;
1871
bellard1ccde1c2004-02-06 19:46:14 +00001872 /* we modify the TLB cache so that the dirty bit will be set again
1873 when accessing the range */
pbrook5579c7f2009-04-11 14:47:08 +00001874 start1 = (unsigned long)qemu_get_ram_ptr(start);
1875 /* Chek that we don't span multiple blocks - this breaks the
1876 address comparisons below. */
1877 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1878 != (end - 1) - start) {
1879 abort();
1880 }
1881
bellard6a00d602005-11-21 23:25:50 +00001882 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001883 int mmu_idx;
1884 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1885 for(i = 0; i < CPU_TLB_SIZE; i++)
1886 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1887 start1, length);
1888 }
bellard6a00d602005-11-21 23:25:50 +00001889 }
bellard1ccde1c2004-02-06 19:46:14 +00001890}
1891
aliguori74576192008-10-06 14:02:03 +00001892int cpu_physical_memory_set_dirty_tracking(int enable)
1893{
1894 in_migration = enable;
Jan Kiszkab0a46a32009-05-02 00:22:51 +02001895 if (kvm_enabled()) {
1896 return kvm_set_migration_log(enable);
1897 }
aliguori74576192008-10-06 14:02:03 +00001898 return 0;
1899}
1900
1901int cpu_physical_memory_get_dirty_tracking(void)
1902{
1903 return in_migration;
1904}
1905
Anthony Liguoric227f092009-10-01 16:12:16 -05001906int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1907 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00001908{
Jan Kiszka151f7742009-05-01 20:52:47 +02001909 int ret = 0;
1910
aliguori2bec46d2008-11-24 20:21:41 +00001911 if (kvm_enabled())
Jan Kiszka151f7742009-05-01 20:52:47 +02001912 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1913 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00001914}
1915
bellard3a7d9292005-08-21 09:26:42 +00001916static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1917{
Anthony Liguoric227f092009-10-01 16:12:16 -05001918 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001919 void *p;
bellard3a7d9292005-08-21 09:26:42 +00001920
bellard84b7b8e2005-11-28 21:19:04 +00001921 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00001922 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1923 + tlb_entry->addend);
1924 ram_addr = qemu_ram_addr_from_host(p);
bellard3a7d9292005-08-21 09:26:42 +00001925 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00001926 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00001927 }
1928 }
1929}
1930
1931/* update the TLB according to the current state of the dirty bits */
1932void cpu_tlb_update_dirty(CPUState *env)
1933{
1934 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001935 int mmu_idx;
1936 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1937 for(i = 0; i < CPU_TLB_SIZE; i++)
1938 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1939 }
bellard3a7d9292005-08-21 09:26:42 +00001940}
1941
pbrook0f459d12008-06-09 00:20:13 +00001942static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00001943{
pbrook0f459d12008-06-09 00:20:13 +00001944 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1945 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00001946}
1947
pbrook0f459d12008-06-09 00:20:13 +00001948/* update the TLB corresponding to virtual page vaddr
1949 so that it is no longer dirty */
1950static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00001951{
bellard1ccde1c2004-02-06 19:46:14 +00001952 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001953 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00001954
pbrook0f459d12008-06-09 00:20:13 +00001955 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00001956 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001957 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1958 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00001959}
1960
bellard59817cc2004-02-16 22:01:13 +00001961/* add a new TLB entry. At most one entry for a given virtual address
1962 is permitted. Return 0 if OK or 2 if the page could not be mapped
1963 (can only happen in non SOFTMMU mode for I/O pages or pages
1964 conflicting with the host address space). */
ths5fafdf22007-09-16 21:08:06 +00001965int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
Anthony Liguoric227f092009-10-01 16:12:16 -05001966 target_phys_addr_t paddr, int prot,
j_mayer6ebbf392007-10-14 07:07:08 +00001967 int mmu_idx, int is_softmmu)
bellard9fa3e852004-01-04 18:06:42 +00001968{
bellard92e873b2004-05-21 14:52:29 +00001969 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00001970 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00001971 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00001972 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00001973 target_ulong code_address;
Anthony Liguoric227f092009-10-01 16:12:16 -05001974 target_phys_addr_t addend;
bellard9fa3e852004-01-04 18:06:42 +00001975 int ret;
bellard84b7b8e2005-11-28 21:19:04 +00001976 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00001977 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05001978 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00001979
bellard92e873b2004-05-21 14:52:29 +00001980 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00001981 if (!p) {
1982 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00001983 } else {
1984 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00001985 }
1986#if defined(DEBUG_TLB)
j_mayer6ebbf392007-10-14 07:07:08 +00001987 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1988 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
bellard9fa3e852004-01-04 18:06:42 +00001989#endif
1990
1991 ret = 0;
pbrook0f459d12008-06-09 00:20:13 +00001992 address = vaddr;
1993 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1994 /* IO memory case (romd handled later) */
1995 address |= TLB_MMIO;
1996 }
pbrook5579c7f2009-04-11 14:47:08 +00001997 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00001998 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1999 /* Normal RAM. */
2000 iotlb = pd & TARGET_PAGE_MASK;
2001 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2002 iotlb |= IO_MEM_NOTDIRTY;
2003 else
2004 iotlb |= IO_MEM_ROM;
2005 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002006 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002007 It would be nice to pass an offset from the base address
2008 of that region. This would avoid having to special case RAM,
2009 and avoid full address decoding in every device.
2010 We can't use the high bits of pd for this because
2011 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002012 iotlb = (pd & ~TARGET_PAGE_MASK);
2013 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002014 iotlb += p->region_offset;
2015 } else {
2016 iotlb += paddr;
2017 }
pbrook0f459d12008-06-09 00:20:13 +00002018 }
pbrook6658ffb2007-03-16 23:58:11 +00002019
pbrook0f459d12008-06-09 00:20:13 +00002020 code_address = address;
2021 /* Make accesses to pages with watchpoints go via the
2022 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002023 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002024 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
pbrook0f459d12008-06-09 00:20:13 +00002025 iotlb = io_mem_watch + paddr;
2026 /* TODO: The memory case can be optimized by not trapping
2027 reads of pages with a write breakpoint. */
2028 address |= TLB_MMIO;
pbrook6658ffb2007-03-16 23:58:11 +00002029 }
pbrook0f459d12008-06-09 00:20:13 +00002030 }
balrogd79acba2007-06-26 20:01:13 +00002031
pbrook0f459d12008-06-09 00:20:13 +00002032 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2033 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2034 te = &env->tlb_table[mmu_idx][index];
2035 te->addend = addend - vaddr;
2036 if (prot & PAGE_READ) {
2037 te->addr_read = address;
2038 } else {
2039 te->addr_read = -1;
2040 }
edgar_igl5c751e92008-05-06 08:44:21 +00002041
pbrook0f459d12008-06-09 00:20:13 +00002042 if (prot & PAGE_EXEC) {
2043 te->addr_code = code_address;
2044 } else {
2045 te->addr_code = -1;
2046 }
2047 if (prot & PAGE_WRITE) {
2048 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2049 (pd & IO_MEM_ROMD)) {
2050 /* Write access calls the I/O callback. */
2051 te->addr_write = address | TLB_MMIO;
2052 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2053 !cpu_physical_memory_is_dirty(pd)) {
2054 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002055 } else {
pbrook0f459d12008-06-09 00:20:13 +00002056 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002057 }
pbrook0f459d12008-06-09 00:20:13 +00002058 } else {
2059 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002060 }
bellard9fa3e852004-01-04 18:06:42 +00002061 return ret;
2062}
2063
bellard01243112004-01-04 15:48:17 +00002064#else
2065
bellardee8b7022004-02-03 23:35:10 +00002066void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002067{
2068}
2069
bellard2e126692004-04-25 21:28:44 +00002070void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002071{
2072}
2073
ths5fafdf22007-09-16 21:08:06 +00002074int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002075 target_phys_addr_t paddr, int prot,
j_mayer6ebbf392007-10-14 07:07:08 +00002076 int mmu_idx, int is_softmmu)
bellard33417e72003-08-10 21:47:01 +00002077{
bellard9fa3e852004-01-04 18:06:42 +00002078 return 0;
2079}
bellard33417e72003-08-10 21:47:01 +00002080
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002081/*
2082 * Walks guest process memory "regions" one by one
2083 * and calls callback function 'fn' for each region.
2084 */
2085int walk_memory_regions(void *priv,
2086 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
bellard9fa3e852004-01-04 18:06:42 +00002087{
2088 unsigned long start, end;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002089 PageDesc *p = NULL;
bellard9fa3e852004-01-04 18:06:42 +00002090 int i, j, prot, prot1;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002091 int rc = 0;
bellard9fa3e852004-01-04 18:06:42 +00002092
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002093 start = end = -1;
bellard9fa3e852004-01-04 18:06:42 +00002094 prot = 0;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002095
2096 for (i = 0; i <= L1_SIZE; i++) {
2097 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2098 for (j = 0; j < L2_SIZE; j++) {
2099 prot1 = (p == NULL) ? 0 : p[j].flags;
2100 /*
2101 * "region" is one continuous chunk of memory
2102 * that has same protection flags set.
2103 */
bellard9fa3e852004-01-04 18:06:42 +00002104 if (prot1 != prot) {
2105 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2106 if (start != -1) {
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002107 rc = (*fn)(priv, start, end, prot);
2108 /* callback can stop iteration by returning != 0 */
2109 if (rc != 0)
2110 return (rc);
bellard9fa3e852004-01-04 18:06:42 +00002111 }
2112 if (prot1 != 0)
2113 start = end;
2114 else
2115 start = -1;
2116 prot = prot1;
2117 }
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002118 if (p == NULL)
bellard9fa3e852004-01-04 18:06:42 +00002119 break;
2120 }
bellard33417e72003-08-10 21:47:01 +00002121 }
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002122 return (rc);
2123}
2124
2125static int dump_region(void *priv, unsigned long start,
2126 unsigned long end, unsigned long prot)
2127{
2128 FILE *f = (FILE *)priv;
2129
2130 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2131 start, end, end - start,
2132 ((prot & PAGE_READ) ? 'r' : '-'),
2133 ((prot & PAGE_WRITE) ? 'w' : '-'),
2134 ((prot & PAGE_EXEC) ? 'x' : '-'));
2135
2136 return (0);
2137}
2138
2139/* dump memory mappings */
2140void page_dump(FILE *f)
2141{
2142 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2143 "start", "end", "size", "prot");
2144 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002145}
2146
pbrook53a59602006-03-25 19:31:22 +00002147int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002148{
bellard9fa3e852004-01-04 18:06:42 +00002149 PageDesc *p;
2150
2151 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002152 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002153 return 0;
2154 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002155}
2156
bellard9fa3e852004-01-04 18:06:42 +00002157/* modify the flags of a page and invalidate the code if
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002158 necessary. The flag PAGE_WRITE_ORG is positioned automatically
bellard9fa3e852004-01-04 18:06:42 +00002159 depending on PAGE_WRITE */
pbrook53a59602006-03-25 19:31:22 +00002160void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002161{
2162 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002163 target_ulong addr;
bellard9fa3e852004-01-04 18:06:42 +00002164
pbrookc8a706f2008-06-02 16:16:42 +00002165 /* mmap_lock should already be held. */
bellard9fa3e852004-01-04 18:06:42 +00002166 start = start & TARGET_PAGE_MASK;
2167 end = TARGET_PAGE_ALIGN(end);
2168 if (flags & PAGE_WRITE)
2169 flags |= PAGE_WRITE_ORG;
bellard9fa3e852004-01-04 18:06:42 +00002170 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2171 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
pbrook17e23772008-06-09 13:47:45 +00002172 /* We may be called for host regions that are outside guest
2173 address space. */
2174 if (!p)
2175 return;
bellard9fa3e852004-01-04 18:06:42 +00002176 /* if the write protection is set, then we invalidate the code
2177 inside */
ths5fafdf22007-09-16 21:08:06 +00002178 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002179 (flags & PAGE_WRITE) &&
2180 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002181 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002182 }
2183 p->flags = flags;
2184 }
bellard9fa3e852004-01-04 18:06:42 +00002185}
2186
ths3d97b402007-11-02 19:02:07 +00002187int page_check_range(target_ulong start, target_ulong len, int flags)
2188{
2189 PageDesc *p;
2190 target_ulong end;
2191 target_ulong addr;
2192
balrog55f280c2008-10-28 10:24:11 +00002193 if (start + len < start)
2194 /* we've wrapped around */
2195 return -1;
2196
ths3d97b402007-11-02 19:02:07 +00002197 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2198 start = start & TARGET_PAGE_MASK;
2199
ths3d97b402007-11-02 19:02:07 +00002200 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2201 p = page_find(addr >> TARGET_PAGE_BITS);
2202 if( !p )
2203 return -1;
2204 if( !(p->flags & PAGE_VALID) )
2205 return -1;
2206
bellarddae32702007-11-14 10:51:00 +00002207 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002208 return -1;
bellarddae32702007-11-14 10:51:00 +00002209 if (flags & PAGE_WRITE) {
2210 if (!(p->flags & PAGE_WRITE_ORG))
2211 return -1;
2212 /* unprotect the page if it was put read-only because it
2213 contains translated code */
2214 if (!(p->flags & PAGE_WRITE)) {
2215 if (!page_unprotect(addr, 0, NULL))
2216 return -1;
2217 }
2218 return 0;
2219 }
ths3d97b402007-11-02 19:02:07 +00002220 }
2221 return 0;
2222}
2223
bellard9fa3e852004-01-04 18:06:42 +00002224/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002225 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002226int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002227{
2228 unsigned int page_index, prot, pindex;
2229 PageDesc *p, *p1;
pbrook53a59602006-03-25 19:31:22 +00002230 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002231
pbrookc8a706f2008-06-02 16:16:42 +00002232 /* Technically this isn't safe inside a signal handler. However we
2233 know this only ever happens in a synchronous SEGV handler, so in
2234 practice it seems to be ok. */
2235 mmap_lock();
2236
bellard83fb7ad2004-07-05 21:25:26 +00002237 host_start = address & qemu_host_page_mask;
bellard9fa3e852004-01-04 18:06:42 +00002238 page_index = host_start >> TARGET_PAGE_BITS;
2239 p1 = page_find(page_index);
pbrookc8a706f2008-06-02 16:16:42 +00002240 if (!p1) {
2241 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002242 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002243 }
bellard83fb7ad2004-07-05 21:25:26 +00002244 host_end = host_start + qemu_host_page_size;
bellard9fa3e852004-01-04 18:06:42 +00002245 p = p1;
2246 prot = 0;
2247 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2248 prot |= p->flags;
2249 p++;
2250 }
2251 /* if the page was really writable, then we change its
2252 protection back to writable */
2253 if (prot & PAGE_WRITE_ORG) {
2254 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2255 if (!(p1[pindex].flags & PAGE_WRITE)) {
ths5fafdf22007-09-16 21:08:06 +00002256 mprotect((void *)g2h(host_start), qemu_host_page_size,
bellard9fa3e852004-01-04 18:06:42 +00002257 (prot & PAGE_BITS) | PAGE_WRITE);
2258 p1[pindex].flags |= PAGE_WRITE;
2259 /* and since the content will be modified, we must invalidate
2260 the corresponding translated code. */
bellardd720b932004-04-25 17:57:43 +00002261 tb_invalidate_phys_page(address, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002262#ifdef DEBUG_TB_CHECK
2263 tb_invalidate_check(address);
2264#endif
pbrookc8a706f2008-06-02 16:16:42 +00002265 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002266 return 1;
2267 }
2268 }
pbrookc8a706f2008-06-02 16:16:42 +00002269 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002270 return 0;
2271}
2272
bellard6a00d602005-11-21 23:25:50 +00002273static inline void tlb_set_dirty(CPUState *env,
2274 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002275{
2276}
bellard9fa3e852004-01-04 18:06:42 +00002277#endif /* defined(CONFIG_USER_ONLY) */
2278
pbrooke2eef172008-06-08 01:09:01 +00002279#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002280
Anthony Liguoric227f092009-10-01 16:12:16 -05002281static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2282 ram_addr_t memory, ram_addr_t region_offset);
2283static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2284 ram_addr_t orig_memory, ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002285#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2286 need_subpage) \
2287 do { \
2288 if (addr > start_addr) \
2289 start_addr2 = 0; \
2290 else { \
2291 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2292 if (start_addr2 > 0) \
2293 need_subpage = 1; \
2294 } \
2295 \
blueswir149e9fba2007-05-30 17:25:06 +00002296 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002297 end_addr2 = TARGET_PAGE_SIZE - 1; \
2298 else { \
2299 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2300 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2301 need_subpage = 1; \
2302 } \
2303 } while (0)
2304
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002305/* register physical memory.
2306 For RAM, 'size' must be a multiple of the target page size.
2307 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002308 io memory page. The address used when calling the IO function is
2309 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002310 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002311 before calculating this offset. This should not be a problem unless
2312 the low bits of start_addr and region_offset differ. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002313void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2314 ram_addr_t size,
2315 ram_addr_t phys_offset,
2316 ram_addr_t region_offset)
bellard33417e72003-08-10 21:47:01 +00002317{
Anthony Liguoric227f092009-10-01 16:12:16 -05002318 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002319 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002320 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002321 ram_addr_t orig_size = size;
blueswir1db7b5422007-05-26 17:36:03 +00002322 void *subpage;
bellard33417e72003-08-10 21:47:01 +00002323
aliguori7ba1e612008-11-05 16:04:33 +00002324 if (kvm_enabled())
2325 kvm_set_phys_mem(start_addr, size, phys_offset);
2326
pbrook67c4d232009-02-23 13:16:07 +00002327 if (phys_offset == IO_MEM_UNASSIGNED) {
2328 region_offset = start_addr;
2329 }
pbrook8da3ff12008-12-01 18:59:50 +00002330 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002331 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002332 end_addr = start_addr + (target_phys_addr_t)size;
blueswir149e9fba2007-05-30 17:25:06 +00002333 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
blueswir1db7b5422007-05-26 17:36:03 +00002334 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2335 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002336 ram_addr_t orig_memory = p->phys_offset;
2337 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002338 int need_subpage = 0;
2339
2340 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2341 need_subpage);
blueswir14254fab2008-01-01 16:57:19 +00002342 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
blueswir1db7b5422007-05-26 17:36:03 +00002343 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2344 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002345 &p->phys_offset, orig_memory,
2346 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002347 } else {
2348 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2349 >> IO_MEM_SHIFT];
2350 }
pbrook8da3ff12008-12-01 18:59:50 +00002351 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2352 region_offset);
2353 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002354 } else {
2355 p->phys_offset = phys_offset;
2356 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2357 (phys_offset & IO_MEM_ROMD))
2358 phys_offset += TARGET_PAGE_SIZE;
2359 }
2360 } else {
2361 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2362 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002363 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002364 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002365 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002366 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002367 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002368 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002369 int need_subpage = 0;
2370
2371 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2372 end_addr2, need_subpage);
2373
blueswir14254fab2008-01-01 16:57:19 +00002374 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
blueswir1db7b5422007-05-26 17:36:03 +00002375 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002376 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002377 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002378 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002379 phys_offset, region_offset);
2380 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002381 }
2382 }
2383 }
pbrook8da3ff12008-12-01 18:59:50 +00002384 region_offset += TARGET_PAGE_SIZE;
bellard33417e72003-08-10 21:47:01 +00002385 }
ths3b46e622007-09-17 08:09:54 +00002386
bellard9d420372006-06-25 22:25:22 +00002387 /* since each CPU stores ram addresses in its TLB cache, we must
2388 reset the modified entries */
2389 /* XXX: slow ! */
2390 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2391 tlb_flush(env, 1);
2392 }
bellard33417e72003-08-10 21:47:01 +00002393}
2394
bellardba863452006-09-24 18:41:10 +00002395/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002396ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002397{
2398 PhysPageDesc *p;
2399
2400 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2401 if (!p)
2402 return IO_MEM_UNASSIGNED;
2403 return p->phys_offset;
2404}
2405
Anthony Liguoric227f092009-10-01 16:12:16 -05002406void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002407{
2408 if (kvm_enabled())
2409 kvm_coalesce_mmio_region(addr, size);
2410}
2411
Anthony Liguoric227f092009-10-01 16:12:16 -05002412void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002413{
2414 if (kvm_enabled())
2415 kvm_uncoalesce_mmio_region(addr, size);
2416}
2417
Sheng Yang62a27442010-01-26 19:21:16 +08002418void qemu_flush_coalesced_mmio_buffer(void)
2419{
2420 if (kvm_enabled())
2421 kvm_flush_coalesced_mmio_buffer();
2422}
2423
Anthony Liguoric227f092009-10-01 16:12:16 -05002424ram_addr_t qemu_ram_alloc(ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002425{
2426 RAMBlock *new_block;
2427
pbrook94a6b542009-04-11 17:15:54 +00002428 size = TARGET_PAGE_ALIGN(size);
2429 new_block = qemu_malloc(sizeof(*new_block));
2430
Alexander Graf6b024942009-12-05 12:44:25 +01002431#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2432 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2433 new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
2434 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2435#else
pbrook94a6b542009-04-11 17:15:54 +00002436 new_block->host = qemu_vmalloc(size);
Alexander Graf6b024942009-12-05 12:44:25 +01002437#endif
Izik Eidusccb167e2009-10-08 16:39:39 +02002438#ifdef MADV_MERGEABLE
2439 madvise(new_block->host, size, MADV_MERGEABLE);
2440#endif
pbrook94a6b542009-04-11 17:15:54 +00002441 new_block->offset = last_ram_offset;
2442 new_block->length = size;
2443
2444 new_block->next = ram_blocks;
2445 ram_blocks = new_block;
2446
2447 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2448 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2449 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2450 0xff, size >> TARGET_PAGE_BITS);
2451
2452 last_ram_offset += size;
2453
Jan Kiszka6f0437e2009-04-26 18:03:40 +02002454 if (kvm_enabled())
2455 kvm_setup_guest_memory(new_block->host, size);
2456
pbrook94a6b542009-04-11 17:15:54 +00002457 return new_block->offset;
2458}
bellarde9a1ab12007-02-08 23:08:38 +00002459
Anthony Liguoric227f092009-10-01 16:12:16 -05002460void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002461{
pbrook94a6b542009-04-11 17:15:54 +00002462 /* TODO: implement this. */
bellarde9a1ab12007-02-08 23:08:38 +00002463}
2464
pbrookdc828ca2009-04-09 22:21:07 +00002465/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002466 With the exception of the softmmu code in this file, this should
2467 only be used for local memory (e.g. video ram) that the device owns,
2468 and knows it isn't going to access beyond the end of the block.
2469
2470 It should not be used for general purpose DMA.
2471 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2472 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002473void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002474{
pbrook94a6b542009-04-11 17:15:54 +00002475 RAMBlock *prev;
2476 RAMBlock **prevp;
2477 RAMBlock *block;
2478
pbrook94a6b542009-04-11 17:15:54 +00002479 prev = NULL;
2480 prevp = &ram_blocks;
2481 block = ram_blocks;
2482 while (block && (block->offset > addr
2483 || block->offset + block->length <= addr)) {
2484 if (prev)
2485 prevp = &prev->next;
2486 prev = block;
2487 block = block->next;
2488 }
2489 if (!block) {
2490 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2491 abort();
2492 }
2493 /* Move this entry to to start of the list. */
2494 if (prev) {
2495 prev->next = block->next;
2496 block->next = *prevp;
2497 *prevp = block;
2498 }
2499 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00002500}
2501
pbrook5579c7f2009-04-11 14:47:08 +00002502/* Some of the softmmu routines need to translate from a host pointer
2503 (typically a TLB entry) back to a ram offset. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002504ram_addr_t qemu_ram_addr_from_host(void *ptr)
pbrook5579c7f2009-04-11 14:47:08 +00002505{
pbrook94a6b542009-04-11 17:15:54 +00002506 RAMBlock *prev;
pbrook94a6b542009-04-11 17:15:54 +00002507 RAMBlock *block;
2508 uint8_t *host = ptr;
2509
pbrook94a6b542009-04-11 17:15:54 +00002510 prev = NULL;
pbrook94a6b542009-04-11 17:15:54 +00002511 block = ram_blocks;
2512 while (block && (block->host > host
2513 || block->host + block->length <= host)) {
pbrook94a6b542009-04-11 17:15:54 +00002514 prev = block;
2515 block = block->next;
2516 }
2517 if (!block) {
2518 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2519 abort();
2520 }
2521 return block->offset + (host - block->host);
pbrook5579c7f2009-04-11 14:47:08 +00002522}
2523
Anthony Liguoric227f092009-10-01 16:12:16 -05002524static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00002525{
pbrook67d3b952006-12-18 05:03:52 +00002526#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002527 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002528#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002529#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002530 do_unassigned_access(addr, 0, 0, 0, 1);
2531#endif
2532 return 0;
2533}
2534
Anthony Liguoric227f092009-10-01 16:12:16 -05002535static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002536{
2537#ifdef DEBUG_UNASSIGNED
2538 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2539#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002540#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002541 do_unassigned_access(addr, 0, 0, 0, 2);
2542#endif
2543 return 0;
2544}
2545
Anthony Liguoric227f092009-10-01 16:12:16 -05002546static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002547{
2548#ifdef DEBUG_UNASSIGNED
2549 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2550#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002551#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002552 do_unassigned_access(addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002553#endif
bellard33417e72003-08-10 21:47:01 +00002554 return 0;
2555}
2556
Anthony Liguoric227f092009-10-01 16:12:16 -05002557static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00002558{
pbrook67d3b952006-12-18 05:03:52 +00002559#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002560 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00002561#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002562#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002563 do_unassigned_access(addr, 1, 0, 0, 1);
2564#endif
2565}
2566
Anthony Liguoric227f092009-10-01 16:12:16 -05002567static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002568{
2569#ifdef DEBUG_UNASSIGNED
2570 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2571#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002572#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002573 do_unassigned_access(addr, 1, 0, 0, 2);
2574#endif
2575}
2576
Anthony Liguoric227f092009-10-01 16:12:16 -05002577static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002578{
2579#ifdef DEBUG_UNASSIGNED
2580 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2581#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002582#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002583 do_unassigned_access(addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002584#endif
bellard33417e72003-08-10 21:47:01 +00002585}
2586
Blue Swirld60efc62009-08-25 18:29:31 +00002587static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00002588 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00002589 unassigned_mem_readw,
2590 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00002591};
2592
Blue Swirld60efc62009-08-25 18:29:31 +00002593static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00002594 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00002595 unassigned_mem_writew,
2596 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00002597};
2598
Anthony Liguoric227f092009-10-01 16:12:16 -05002599static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002600 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002601{
bellard3a7d9292005-08-21 09:26:42 +00002602 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002603 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2604 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2605#if !defined(CONFIG_USER_ONLY)
2606 tb_invalidate_phys_page_fast(ram_addr, 1);
2607 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2608#endif
2609 }
pbrook5579c7f2009-04-11 14:47:08 +00002610 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002611 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2612 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2613 /* we remove the notdirty callback only if the code has been
2614 flushed */
2615 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002616 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002617}
2618
Anthony Liguoric227f092009-10-01 16:12:16 -05002619static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002620 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002621{
bellard3a7d9292005-08-21 09:26:42 +00002622 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002623 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2624 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2625#if !defined(CONFIG_USER_ONLY)
2626 tb_invalidate_phys_page_fast(ram_addr, 2);
2627 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2628#endif
2629 }
pbrook5579c7f2009-04-11 14:47:08 +00002630 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002631 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2632 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2633 /* we remove the notdirty callback only if the code has been
2634 flushed */
2635 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002636 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002637}
2638
Anthony Liguoric227f092009-10-01 16:12:16 -05002639static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002640 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002641{
bellard3a7d9292005-08-21 09:26:42 +00002642 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002643 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2644 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2645#if !defined(CONFIG_USER_ONLY)
2646 tb_invalidate_phys_page_fast(ram_addr, 4);
2647 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2648#endif
2649 }
pbrook5579c7f2009-04-11 14:47:08 +00002650 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002651 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2652 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2653 /* we remove the notdirty callback only if the code has been
2654 flushed */
2655 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002656 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002657}
2658
Blue Swirld60efc62009-08-25 18:29:31 +00002659static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00002660 NULL, /* never used */
2661 NULL, /* never used */
2662 NULL, /* never used */
2663};
2664
Blue Swirld60efc62009-08-25 18:29:31 +00002665static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00002666 notdirty_mem_writeb,
2667 notdirty_mem_writew,
2668 notdirty_mem_writel,
2669};
2670
pbrook0f459d12008-06-09 00:20:13 +00002671/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002672static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002673{
2674 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002675 target_ulong pc, cs_base;
2676 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002677 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002678 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002679 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002680
aliguori06d55cc2008-11-18 20:24:06 +00002681 if (env->watchpoint_hit) {
2682 /* We re-entered the check after replacing the TB. Now raise
2683 * the debug interrupt so that is will trigger after the
2684 * current instruction. */
2685 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2686 return;
2687 }
pbrook2e70f6e2008-06-29 01:03:05 +00002688 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002689 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002690 if ((vaddr == (wp->vaddr & len_mask) ||
2691 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002692 wp->flags |= BP_WATCHPOINT_HIT;
2693 if (!env->watchpoint_hit) {
2694 env->watchpoint_hit = wp;
2695 tb = tb_find_pc(env->mem_io_pc);
2696 if (!tb) {
2697 cpu_abort(env, "check_watchpoint: could not find TB for "
2698 "pc=%p", (void *)env->mem_io_pc);
2699 }
2700 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2701 tb_phys_invalidate(tb, -1);
2702 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2703 env->exception_index = EXCP_DEBUG;
2704 } else {
2705 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2706 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2707 }
2708 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00002709 }
aliguori6e140f22008-11-18 20:37:55 +00002710 } else {
2711 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002712 }
2713 }
2714}
2715
pbrook6658ffb2007-03-16 23:58:11 +00002716/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2717 so these check for a hit then pass through to the normal out-of-line
2718 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002719static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00002720{
aliguorib4051332008-11-18 20:14:20 +00002721 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002722 return ldub_phys(addr);
2723}
2724
Anthony Liguoric227f092009-10-01 16:12:16 -05002725static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00002726{
aliguorib4051332008-11-18 20:14:20 +00002727 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002728 return lduw_phys(addr);
2729}
2730
Anthony Liguoric227f092009-10-01 16:12:16 -05002731static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00002732{
aliguorib4051332008-11-18 20:14:20 +00002733 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002734 return ldl_phys(addr);
2735}
2736
Anthony Liguoric227f092009-10-01 16:12:16 -05002737static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00002738 uint32_t val)
2739{
aliguorib4051332008-11-18 20:14:20 +00002740 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002741 stb_phys(addr, val);
2742}
2743
Anthony Liguoric227f092009-10-01 16:12:16 -05002744static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00002745 uint32_t val)
2746{
aliguorib4051332008-11-18 20:14:20 +00002747 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002748 stw_phys(addr, val);
2749}
2750
Anthony Liguoric227f092009-10-01 16:12:16 -05002751static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00002752 uint32_t val)
2753{
aliguorib4051332008-11-18 20:14:20 +00002754 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002755 stl_phys(addr, val);
2756}
2757
Blue Swirld60efc62009-08-25 18:29:31 +00002758static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00002759 watch_mem_readb,
2760 watch_mem_readw,
2761 watch_mem_readl,
2762};
2763
Blue Swirld60efc62009-08-25 18:29:31 +00002764static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00002765 watch_mem_writeb,
2766 watch_mem_writew,
2767 watch_mem_writel,
2768};
pbrook6658ffb2007-03-16 23:58:11 +00002769
Anthony Liguoric227f092009-10-01 16:12:16 -05002770static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002771 unsigned int len)
2772{
blueswir1db7b5422007-05-26 17:36:03 +00002773 uint32_t ret;
2774 unsigned int idx;
2775
pbrook8da3ff12008-12-01 18:59:50 +00002776 idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00002777#if defined(DEBUG_SUBPAGE)
2778 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2779 mmio, len, addr, idx);
2780#endif
pbrook8da3ff12008-12-01 18:59:50 +00002781 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2782 addr + mmio->region_offset[idx][0][len]);
blueswir1db7b5422007-05-26 17:36:03 +00002783
2784 return ret;
2785}
2786
Anthony Liguoric227f092009-10-01 16:12:16 -05002787static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002788 uint32_t value, unsigned int len)
2789{
blueswir1db7b5422007-05-26 17:36:03 +00002790 unsigned int idx;
2791
pbrook8da3ff12008-12-01 18:59:50 +00002792 idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00002793#if defined(DEBUG_SUBPAGE)
2794 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2795 mmio, len, addr, idx, value);
2796#endif
pbrook8da3ff12008-12-01 18:59:50 +00002797 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2798 addr + mmio->region_offset[idx][1][len],
2799 value);
blueswir1db7b5422007-05-26 17:36:03 +00002800}
2801
Anthony Liguoric227f092009-10-01 16:12:16 -05002802static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00002803{
2804#if defined(DEBUG_SUBPAGE)
2805 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2806#endif
2807
2808 return subpage_readlen(opaque, addr, 0);
2809}
2810
Anthony Liguoric227f092009-10-01 16:12:16 -05002811static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002812 uint32_t value)
2813{
2814#if defined(DEBUG_SUBPAGE)
2815 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2816#endif
2817 subpage_writelen(opaque, addr, value, 0);
2818}
2819
Anthony Liguoric227f092009-10-01 16:12:16 -05002820static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00002821{
2822#if defined(DEBUG_SUBPAGE)
2823 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2824#endif
2825
2826 return subpage_readlen(opaque, addr, 1);
2827}
2828
Anthony Liguoric227f092009-10-01 16:12:16 -05002829static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002830 uint32_t value)
2831{
2832#if defined(DEBUG_SUBPAGE)
2833 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2834#endif
2835 subpage_writelen(opaque, addr, value, 1);
2836}
2837
Anthony Liguoric227f092009-10-01 16:12:16 -05002838static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00002839{
2840#if defined(DEBUG_SUBPAGE)
2841 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2842#endif
2843
2844 return subpage_readlen(opaque, addr, 2);
2845}
2846
2847static void subpage_writel (void *opaque,
Anthony Liguoric227f092009-10-01 16:12:16 -05002848 target_phys_addr_t addr, uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00002849{
2850#if defined(DEBUG_SUBPAGE)
2851 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2852#endif
2853 subpage_writelen(opaque, addr, value, 2);
2854}
2855
Blue Swirld60efc62009-08-25 18:29:31 +00002856static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00002857 &subpage_readb,
2858 &subpage_readw,
2859 &subpage_readl,
2860};
2861
Blue Swirld60efc62009-08-25 18:29:31 +00002862static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00002863 &subpage_writeb,
2864 &subpage_writew,
2865 &subpage_writel,
2866};
2867
Anthony Liguoric227f092009-10-01 16:12:16 -05002868static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2869 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00002870{
2871 int idx, eidx;
blueswir14254fab2008-01-01 16:57:19 +00002872 unsigned int i;
blueswir1db7b5422007-05-26 17:36:03 +00002873
2874 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2875 return -1;
2876 idx = SUBPAGE_IDX(start);
2877 eidx = SUBPAGE_IDX(end);
2878#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00002879 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00002880 mmio, start, end, idx, eidx, memory);
2881#endif
2882 memory >>= IO_MEM_SHIFT;
2883 for (; idx <= eidx; idx++) {
blueswir14254fab2008-01-01 16:57:19 +00002884 for (i = 0; i < 4; i++) {
blueswir13ee89922008-01-02 19:45:26 +00002885 if (io_mem_read[memory][i]) {
2886 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2887 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
pbrook8da3ff12008-12-01 18:59:50 +00002888 mmio->region_offset[idx][0][i] = region_offset;
blueswir13ee89922008-01-02 19:45:26 +00002889 }
2890 if (io_mem_write[memory][i]) {
2891 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2892 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
pbrook8da3ff12008-12-01 18:59:50 +00002893 mmio->region_offset[idx][1][i] = region_offset;
blueswir13ee89922008-01-02 19:45:26 +00002894 }
blueswir14254fab2008-01-01 16:57:19 +00002895 }
blueswir1db7b5422007-05-26 17:36:03 +00002896 }
2897
2898 return 0;
2899}
2900
Anthony Liguoric227f092009-10-01 16:12:16 -05002901static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2902 ram_addr_t orig_memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00002903{
Anthony Liguoric227f092009-10-01 16:12:16 -05002904 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002905 int subpage_memory;
2906
Anthony Liguoric227f092009-10-01 16:12:16 -05002907 mmio = qemu_mallocz(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002908
2909 mmio->base = base;
Avi Kivity1eed09c2009-06-14 11:38:51 +03002910 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
blueswir1db7b5422007-05-26 17:36:03 +00002911#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00002912 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2913 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00002914#endif
aliguori1eec6142009-02-05 22:06:18 +00002915 *phys = subpage_memory | IO_MEM_SUBPAGE;
2916 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
pbrook8da3ff12008-12-01 18:59:50 +00002917 region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002918
2919 return mmio;
2920}
2921
aliguori88715652009-02-11 15:20:58 +00002922static int get_free_io_mem_idx(void)
2923{
2924 int i;
2925
2926 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2927 if (!io_mem_used[i]) {
2928 io_mem_used[i] = 1;
2929 return i;
2930 }
Riku Voipioc6703b42009-12-03 15:56:05 +02002931 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00002932 return -1;
2933}
2934
bellard33417e72003-08-10 21:47:01 +00002935/* mem_read and mem_write are arrays of functions containing the
2936 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01002937 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00002938 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00002939 modified. If it is zero, a new io zone is allocated. The return
2940 value can be used with cpu_register_physical_memory(). (-1) is
2941 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03002942static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00002943 CPUReadMemoryFunc * const *mem_read,
2944 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03002945 void *opaque)
bellard33417e72003-08-10 21:47:01 +00002946{
blueswir14254fab2008-01-01 16:57:19 +00002947 int i, subwidth = 0;
bellard33417e72003-08-10 21:47:01 +00002948
2949 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00002950 io_index = get_free_io_mem_idx();
2951 if (io_index == -1)
2952 return io_index;
bellard33417e72003-08-10 21:47:01 +00002953 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03002954 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00002955 if (io_index >= IO_MEM_NB_ENTRIES)
2956 return -1;
2957 }
bellardb5ff1b32005-11-26 10:38:39 +00002958
bellard33417e72003-08-10 21:47:01 +00002959 for(i = 0;i < 3; i++) {
blueswir14254fab2008-01-01 16:57:19 +00002960 if (!mem_read[i] || !mem_write[i])
2961 subwidth = IO_MEM_SUBWIDTH;
bellard33417e72003-08-10 21:47:01 +00002962 io_mem_read[io_index][i] = mem_read[i];
2963 io_mem_write[io_index][i] = mem_write[i];
2964 }
bellarda4193c82004-06-03 14:01:43 +00002965 io_mem_opaque[io_index] = opaque;
blueswir14254fab2008-01-01 16:57:19 +00002966 return (io_index << IO_MEM_SHIFT) | subwidth;
bellard33417e72003-08-10 21:47:01 +00002967}
bellard61382a52003-10-27 21:22:23 +00002968
Blue Swirld60efc62009-08-25 18:29:31 +00002969int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
2970 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03002971 void *opaque)
2972{
2973 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
2974}
2975
aliguori88715652009-02-11 15:20:58 +00002976void cpu_unregister_io_memory(int io_table_address)
2977{
2978 int i;
2979 int io_index = io_table_address >> IO_MEM_SHIFT;
2980
2981 for (i=0;i < 3; i++) {
2982 io_mem_read[io_index][i] = unassigned_mem_read[i];
2983 io_mem_write[io_index][i] = unassigned_mem_write[i];
2984 }
2985 io_mem_opaque[io_index] = NULL;
2986 io_mem_used[io_index] = 0;
2987}
2988
Avi Kivitye9179ce2009-06-14 11:38:52 +03002989static void io_mem_init(void)
2990{
2991 int i;
2992
2993 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
2994 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
2995 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
2996 for (i=0; i<5; i++)
2997 io_mem_used[i] = 1;
2998
2999 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3000 watch_mem_write, NULL);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003001}
3002
pbrooke2eef172008-06-08 01:09:01 +00003003#endif /* !defined(CONFIG_USER_ONLY) */
3004
bellard13eb76e2004-01-24 15:23:36 +00003005/* physical memory access (slow version, mainly for debug) */
3006#if defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -05003007void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003008 int len, int is_write)
3009{
3010 int l, flags;
3011 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003012 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003013
3014 while (len > 0) {
3015 page = addr & TARGET_PAGE_MASK;
3016 l = (page + TARGET_PAGE_SIZE) - addr;
3017 if (l > len)
3018 l = len;
3019 flags = page_get_flags(page);
3020 if (!(flags & PAGE_VALID))
3021 return;
3022 if (is_write) {
3023 if (!(flags & PAGE_WRITE))
3024 return;
bellard579a97f2007-11-11 14:26:47 +00003025 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003026 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
bellard579a97f2007-11-11 14:26:47 +00003027 /* FIXME - should this return an error rather than just fail? */
3028 return;
aurel3272fb7da2008-04-27 23:53:45 +00003029 memcpy(p, buf, l);
3030 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003031 } else {
3032 if (!(flags & PAGE_READ))
3033 return;
bellard579a97f2007-11-11 14:26:47 +00003034 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003035 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
bellard579a97f2007-11-11 14:26:47 +00003036 /* FIXME - should this return an error rather than just fail? */
3037 return;
aurel3272fb7da2008-04-27 23:53:45 +00003038 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003039 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003040 }
3041 len -= l;
3042 buf += l;
3043 addr += l;
3044 }
3045}
bellard8df1cd02005-01-28 22:37:22 +00003046
bellard13eb76e2004-01-24 15:23:36 +00003047#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003048void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003049 int len, int is_write)
3050{
3051 int l, io_index;
3052 uint8_t *ptr;
3053 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003054 target_phys_addr_t page;
bellard2e126692004-04-25 21:28:44 +00003055 unsigned long pd;
bellard92e873b2004-05-21 14:52:29 +00003056 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003057
bellard13eb76e2004-01-24 15:23:36 +00003058 while (len > 0) {
3059 page = addr & TARGET_PAGE_MASK;
3060 l = (page + TARGET_PAGE_SIZE) - addr;
3061 if (l > len)
3062 l = len;
bellard92e873b2004-05-21 14:52:29 +00003063 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003064 if (!p) {
3065 pd = IO_MEM_UNASSIGNED;
3066 } else {
3067 pd = p->phys_offset;
3068 }
ths3b46e622007-09-17 08:09:54 +00003069
bellard13eb76e2004-01-24 15:23:36 +00003070 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003071 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003072 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003073 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003074 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003075 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003076 /* XXX: could force cpu_single_env to NULL to avoid
3077 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003078 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003079 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003080 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003081 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003082 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003083 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003084 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003085 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003086 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003087 l = 2;
3088 } else {
bellard1c213d12005-09-03 10:49:04 +00003089 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003090 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003091 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003092 l = 1;
3093 }
3094 } else {
bellardb448f2f2004-02-25 23:24:04 +00003095 unsigned long addr1;
3096 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003097 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003098 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003099 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003100 if (!cpu_physical_memory_is_dirty(addr1)) {
3101 /* invalidate code */
3102 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3103 /* set dirty bit */
ths5fafdf22007-09-16 21:08:06 +00003104 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
bellardf23db162005-08-21 19:12:28 +00003105 (0xff & ~CODE_DIRTY_FLAG);
bellard3a7d9292005-08-21 09:26:42 +00003106 }
bellard13eb76e2004-01-24 15:23:36 +00003107 }
3108 } else {
ths5fafdf22007-09-16 21:08:06 +00003109 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003110 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003111 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003112 /* I/O case */
3113 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003114 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003115 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3116 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003117 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003118 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003119 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003120 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003121 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003122 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003123 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003124 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003125 l = 2;
3126 } else {
bellard1c213d12005-09-03 10:49:04 +00003127 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003128 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003129 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003130 l = 1;
3131 }
3132 } else {
3133 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003134 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard13eb76e2004-01-24 15:23:36 +00003135 (addr & ~TARGET_PAGE_MASK);
3136 memcpy(buf, ptr, l);
3137 }
3138 }
3139 len -= l;
3140 buf += l;
3141 addr += l;
3142 }
3143}
bellard8df1cd02005-01-28 22:37:22 +00003144
bellardd0ecd2a2006-04-23 17:14:48 +00003145/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003146void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003147 const uint8_t *buf, int len)
3148{
3149 int l;
3150 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003151 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003152 unsigned long pd;
3153 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003154
bellardd0ecd2a2006-04-23 17:14:48 +00003155 while (len > 0) {
3156 page = addr & TARGET_PAGE_MASK;
3157 l = (page + TARGET_PAGE_SIZE) - addr;
3158 if (l > len)
3159 l = len;
3160 p = phys_page_find(page >> TARGET_PAGE_BITS);
3161 if (!p) {
3162 pd = IO_MEM_UNASSIGNED;
3163 } else {
3164 pd = p->phys_offset;
3165 }
ths3b46e622007-09-17 08:09:54 +00003166
bellardd0ecd2a2006-04-23 17:14:48 +00003167 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00003168 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3169 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003170 /* do nothing */
3171 } else {
3172 unsigned long addr1;
3173 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3174 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003175 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003176 memcpy(ptr, buf, l);
3177 }
3178 len -= l;
3179 buf += l;
3180 addr += l;
3181 }
3182}
3183
aliguori6d16c2f2009-01-22 16:59:11 +00003184typedef struct {
3185 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003186 target_phys_addr_t addr;
3187 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003188} BounceBuffer;
3189
3190static BounceBuffer bounce;
3191
aliguoriba223c22009-01-22 16:59:16 +00003192typedef struct MapClient {
3193 void *opaque;
3194 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003195 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003196} MapClient;
3197
Blue Swirl72cf2d42009-09-12 07:36:22 +00003198static QLIST_HEAD(map_client_list, MapClient) map_client_list
3199 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003200
3201void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3202{
3203 MapClient *client = qemu_malloc(sizeof(*client));
3204
3205 client->opaque = opaque;
3206 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003207 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003208 return client;
3209}
3210
3211void cpu_unregister_map_client(void *_client)
3212{
3213 MapClient *client = (MapClient *)_client;
3214
Blue Swirl72cf2d42009-09-12 07:36:22 +00003215 QLIST_REMOVE(client, link);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003216 qemu_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003217}
3218
3219static void cpu_notify_map_clients(void)
3220{
3221 MapClient *client;
3222
Blue Swirl72cf2d42009-09-12 07:36:22 +00003223 while (!QLIST_EMPTY(&map_client_list)) {
3224 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003225 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003226 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003227 }
3228}
3229
aliguori6d16c2f2009-01-22 16:59:11 +00003230/* Map a physical memory region into a host virtual address.
3231 * May map a subset of the requested range, given by and returned in *plen.
3232 * May return NULL if resources needed to perform the mapping are exhausted.
3233 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003234 * Use cpu_register_map_client() to know when retrying the map operation is
3235 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003236 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003237void *cpu_physical_memory_map(target_phys_addr_t addr,
3238 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003239 int is_write)
3240{
Anthony Liguoric227f092009-10-01 16:12:16 -05003241 target_phys_addr_t len = *plen;
3242 target_phys_addr_t done = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003243 int l;
3244 uint8_t *ret = NULL;
3245 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003246 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003247 unsigned long pd;
3248 PhysPageDesc *p;
3249 unsigned long addr1;
3250
3251 while (len > 0) {
3252 page = addr & TARGET_PAGE_MASK;
3253 l = (page + TARGET_PAGE_SIZE) - addr;
3254 if (l > len)
3255 l = len;
3256 p = phys_page_find(page >> TARGET_PAGE_BITS);
3257 if (!p) {
3258 pd = IO_MEM_UNASSIGNED;
3259 } else {
3260 pd = p->phys_offset;
3261 }
3262
3263 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3264 if (done || bounce.buffer) {
3265 break;
3266 }
3267 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3268 bounce.addr = addr;
3269 bounce.len = l;
3270 if (!is_write) {
3271 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3272 }
3273 ptr = bounce.buffer;
3274 } else {
3275 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003276 ptr = qemu_get_ram_ptr(addr1);
aliguori6d16c2f2009-01-22 16:59:11 +00003277 }
3278 if (!done) {
3279 ret = ptr;
3280 } else if (ret + done != ptr) {
3281 break;
3282 }
3283
3284 len -= l;
3285 addr += l;
3286 done += l;
3287 }
3288 *plen = done;
3289 return ret;
3290}
3291
3292/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3293 * Will also mark the memory as dirty if is_write == 1. access_len gives
3294 * the amount of memory that was actually read or written by the caller.
3295 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003296void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3297 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003298{
3299 if (buffer != bounce.buffer) {
3300 if (is_write) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003301 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003302 while (access_len) {
3303 unsigned l;
3304 l = TARGET_PAGE_SIZE;
3305 if (l > access_len)
3306 l = access_len;
3307 if (!cpu_physical_memory_is_dirty(addr1)) {
3308 /* invalidate code */
3309 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3310 /* set dirty bit */
3311 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3312 (0xff & ~CODE_DIRTY_FLAG);
3313 }
3314 addr1 += l;
3315 access_len -= l;
3316 }
3317 }
3318 return;
3319 }
3320 if (is_write) {
3321 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3322 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003323 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003324 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003325 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003326}
bellardd0ecd2a2006-04-23 17:14:48 +00003327
bellard8df1cd02005-01-28 22:37:22 +00003328/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003329uint32_t ldl_phys(target_phys_addr_t addr)
bellard8df1cd02005-01-28 22:37:22 +00003330{
3331 int io_index;
3332 uint8_t *ptr;
3333 uint32_t val;
3334 unsigned long pd;
3335 PhysPageDesc *p;
3336
3337 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3338 if (!p) {
3339 pd = IO_MEM_UNASSIGNED;
3340 } else {
3341 pd = p->phys_offset;
3342 }
ths3b46e622007-09-17 08:09:54 +00003343
ths5fafdf22007-09-16 21:08:06 +00003344 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003345 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00003346 /* I/O case */
3347 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003348 if (p)
3349 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003350 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3351 } else {
3352 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003353 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00003354 (addr & ~TARGET_PAGE_MASK);
3355 val = ldl_p(ptr);
3356 }
3357 return val;
3358}
3359
bellard84b7b8e2005-11-28 21:19:04 +00003360/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003361uint64_t ldq_phys(target_phys_addr_t addr)
bellard84b7b8e2005-11-28 21:19:04 +00003362{
3363 int io_index;
3364 uint8_t *ptr;
3365 uint64_t val;
3366 unsigned long pd;
3367 PhysPageDesc *p;
3368
3369 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3370 if (!p) {
3371 pd = IO_MEM_UNASSIGNED;
3372 } else {
3373 pd = p->phys_offset;
3374 }
ths3b46e622007-09-17 08:09:54 +00003375
bellard2a4188a2006-06-25 21:54:59 +00003376 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3377 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00003378 /* I/O case */
3379 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003380 if (p)
3381 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard84b7b8e2005-11-28 21:19:04 +00003382#ifdef TARGET_WORDS_BIGENDIAN
3383 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3384 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3385#else
3386 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3387 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3388#endif
3389 } else {
3390 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003391 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00003392 (addr & ~TARGET_PAGE_MASK);
3393 val = ldq_p(ptr);
3394 }
3395 return val;
3396}
3397
bellardaab33092005-10-30 20:48:42 +00003398/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003399uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003400{
3401 uint8_t val;
3402 cpu_physical_memory_read(addr, &val, 1);
3403 return val;
3404}
3405
3406/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003407uint32_t lduw_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003408{
3409 uint16_t val;
3410 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3411 return tswap16(val);
3412}
3413
bellard8df1cd02005-01-28 22:37:22 +00003414/* warning: addr must be aligned. The ram page is not masked as dirty
3415 and the code inside is not invalidated. It is useful if the dirty
3416 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003417void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003418{
3419 int io_index;
3420 uint8_t *ptr;
3421 unsigned long pd;
3422 PhysPageDesc *p;
3423
3424 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3425 if (!p) {
3426 pd = IO_MEM_UNASSIGNED;
3427 } else {
3428 pd = p->phys_offset;
3429 }
ths3b46e622007-09-17 08:09:54 +00003430
bellard3a7d9292005-08-21 09:26:42 +00003431 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003432 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003433 if (p)
3434 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003435 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3436 } else {
aliguori74576192008-10-06 14:02:03 +00003437 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003438 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003439 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003440
3441 if (unlikely(in_migration)) {
3442 if (!cpu_physical_memory_is_dirty(addr1)) {
3443 /* invalidate code */
3444 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3445 /* set dirty bit */
3446 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3447 (0xff & ~CODE_DIRTY_FLAG);
3448 }
3449 }
bellard8df1cd02005-01-28 22:37:22 +00003450 }
3451}
3452
Anthony Liguoric227f092009-10-01 16:12:16 -05003453void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003454{
3455 int io_index;
3456 uint8_t *ptr;
3457 unsigned long pd;
3458 PhysPageDesc *p;
3459
3460 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3461 if (!p) {
3462 pd = IO_MEM_UNASSIGNED;
3463 } else {
3464 pd = p->phys_offset;
3465 }
ths3b46e622007-09-17 08:09:54 +00003466
j_mayerbc98a7e2007-04-04 07:55:12 +00003467 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3468 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003469 if (p)
3470 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00003471#ifdef TARGET_WORDS_BIGENDIAN
3472 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3473 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3474#else
3475 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3476 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3477#endif
3478 } else {
pbrook5579c7f2009-04-11 14:47:08 +00003479 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00003480 (addr & ~TARGET_PAGE_MASK);
3481 stq_p(ptr, val);
3482 }
3483}
3484
bellard8df1cd02005-01-28 22:37:22 +00003485/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003486void stl_phys(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003487{
3488 int io_index;
3489 uint8_t *ptr;
3490 unsigned long pd;
3491 PhysPageDesc *p;
3492
3493 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3494 if (!p) {
3495 pd = IO_MEM_UNASSIGNED;
3496 } else {
3497 pd = p->phys_offset;
3498 }
ths3b46e622007-09-17 08:09:54 +00003499
bellard3a7d9292005-08-21 09:26:42 +00003500 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003501 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003502 if (p)
3503 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003504 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3505 } else {
3506 unsigned long addr1;
3507 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3508 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003509 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003510 stl_p(ptr, val);
bellard3a7d9292005-08-21 09:26:42 +00003511 if (!cpu_physical_memory_is_dirty(addr1)) {
3512 /* invalidate code */
3513 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3514 /* set dirty bit */
bellardf23db162005-08-21 19:12:28 +00003515 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3516 (0xff & ~CODE_DIRTY_FLAG);
bellard3a7d9292005-08-21 09:26:42 +00003517 }
bellard8df1cd02005-01-28 22:37:22 +00003518 }
3519}
3520
bellardaab33092005-10-30 20:48:42 +00003521/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003522void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003523{
3524 uint8_t v = val;
3525 cpu_physical_memory_write(addr, &v, 1);
3526}
3527
3528/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003529void stw_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003530{
3531 uint16_t v = tswap16(val);
3532 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3533}
3534
3535/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003536void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003537{
3538 val = tswap64(val);
3539 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3540}
3541
bellard13eb76e2004-01-24 15:23:36 +00003542#endif
3543
aliguori5e2972f2009-03-28 17:51:36 +00003544/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00003545int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003546 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003547{
3548 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003549 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003550 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003551
3552 while (len > 0) {
3553 page = addr & TARGET_PAGE_MASK;
3554 phys_addr = cpu_get_phys_page_debug(env, page);
3555 /* if no physical page mapped, return an error */
3556 if (phys_addr == -1)
3557 return -1;
3558 l = (page + TARGET_PAGE_SIZE) - addr;
3559 if (l > len)
3560 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003561 phys_addr += (addr & ~TARGET_PAGE_MASK);
3562#if !defined(CONFIG_USER_ONLY)
3563 if (is_write)
3564 cpu_physical_memory_write_rom(phys_addr, buf, l);
3565 else
3566#endif
3567 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00003568 len -= l;
3569 buf += l;
3570 addr += l;
3571 }
3572 return 0;
3573}
3574
pbrook2e70f6e2008-06-29 01:03:05 +00003575/* in deterministic execution mode, instructions doing device I/Os
3576 must be at the end of the TB */
3577void cpu_io_recompile(CPUState *env, void *retaddr)
3578{
3579 TranslationBlock *tb;
3580 uint32_t n, cflags;
3581 target_ulong pc, cs_base;
3582 uint64_t flags;
3583
3584 tb = tb_find_pc((unsigned long)retaddr);
3585 if (!tb) {
3586 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3587 retaddr);
3588 }
3589 n = env->icount_decr.u16.low + tb->icount;
3590 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3591 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00003592 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00003593 n = n - env->icount_decr.u16.low;
3594 /* Generate a new TB ending on the I/O insn. */
3595 n++;
3596 /* On MIPS and SH, delay slot instructions can only be restarted if
3597 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00003598 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00003599 branch. */
3600#if defined(TARGET_MIPS)
3601 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3602 env->active_tc.PC -= 4;
3603 env->icount_decr.u16.low++;
3604 env->hflags &= ~MIPS_HFLAG_BMASK;
3605 }
3606#elif defined(TARGET_SH4)
3607 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3608 && n > 1) {
3609 env->pc -= 2;
3610 env->icount_decr.u16.low++;
3611 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3612 }
3613#endif
3614 /* This should never happen. */
3615 if (n > CF_COUNT_MASK)
3616 cpu_abort(env, "TB too big during recompile");
3617
3618 cflags = n | CF_LAST_IO;
3619 pc = tb->pc;
3620 cs_base = tb->cs_base;
3621 flags = tb->flags;
3622 tb_phys_invalidate(tb, -1);
3623 /* FIXME: In theory this could raise an exception. In practice
3624 we have already translated the block once so it's probably ok. */
3625 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00003626 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00003627 the first in the TB) then we end up generating a whole new TB and
3628 repeating the fault, which is horribly inefficient.
3629 Better would be to execute just this insn uncached, or generate a
3630 second new TB. */
3631 cpu_resume_from_signal(env, NULL);
3632}
3633
bellarde3db7222005-01-26 22:00:47 +00003634void dump_exec_info(FILE *f,
3635 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3636{
3637 int i, target_code_size, max_target_code_size;
3638 int direct_jmp_count, direct_jmp2_count, cross_page;
3639 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00003640
bellarde3db7222005-01-26 22:00:47 +00003641 target_code_size = 0;
3642 max_target_code_size = 0;
3643 cross_page = 0;
3644 direct_jmp_count = 0;
3645 direct_jmp2_count = 0;
3646 for(i = 0; i < nb_tbs; i++) {
3647 tb = &tbs[i];
3648 target_code_size += tb->size;
3649 if (tb->size > max_target_code_size)
3650 max_target_code_size = tb->size;
3651 if (tb->page_addr[1] != -1)
3652 cross_page++;
3653 if (tb->tb_next_offset[0] != 0xffff) {
3654 direct_jmp_count++;
3655 if (tb->tb_next_offset[1] != 0xffff) {
3656 direct_jmp2_count++;
3657 }
3658 }
3659 }
3660 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00003661 cpu_fprintf(f, "Translation buffer state:\n");
bellard26a5f132008-05-28 12:30:31 +00003662 cpu_fprintf(f, "gen code size %ld/%ld\n",
3663 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3664 cpu_fprintf(f, "TB count %d/%d\n",
3665 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00003666 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00003667 nb_tbs ? target_code_size / nb_tbs : 0,
3668 max_target_code_size);
ths5fafdf22007-09-16 21:08:06 +00003669 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00003670 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3671 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00003672 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3673 cross_page,
bellarde3db7222005-01-26 22:00:47 +00003674 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3675 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00003676 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00003677 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3678 direct_jmp2_count,
3679 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00003680 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00003681 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3682 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3683 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00003684 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00003685}
3686
ths5fafdf22007-09-16 21:08:06 +00003687#if !defined(CONFIG_USER_ONLY)
bellard61382a52003-10-27 21:22:23 +00003688
3689#define MMUSUFFIX _cmmu
3690#define GETPC() NULL
3691#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00003692#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00003693
3694#define SHIFT 0
3695#include "softmmu_template.h"
3696
3697#define SHIFT 1
3698#include "softmmu_template.h"
3699
3700#define SHIFT 2
3701#include "softmmu_template.h"
3702
3703#define SHIFT 3
3704#include "softmmu_template.h"
3705
3706#undef env
3707
3708#endif