blob: 431f5b2976d0e67544715e2e54a00813eef3a30a [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
bellard6180a182003-09-30 21:04:53 +000034#include "cpu.h"
35#include "exec-all.h"
aurel32ca10f862008-04-11 21:35:42 +000036#include "qemu-common.h"
bellardb67d9a52008-05-23 09:57:34 +000037#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000038#include "hw/hw.h"
aliguori74576192008-10-06 14:02:03 +000039#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000040#include "kvm.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Riku Voipiofd052bf2010-01-25 14:30:49 +020043#include <signal.h>
pbrook53a59602006-03-25 19:31:22 +000044#endif
bellard54936002003-05-13 00:25:15 +000045
bellardfd6ce8f2003-05-14 19:00:11 +000046//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000047//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000048//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000049//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000050
51/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000052//#define DEBUG_TB_CHECK
53//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000054
ths1196be32007-03-17 15:17:58 +000055//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000056//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000057
pbrook99773bd2006-04-16 15:14:59 +000058#if !defined(CONFIG_USER_ONLY)
59/* TB consistency checks only implemented for usermode emulation. */
60#undef DEBUG_TB_CHECK
61#endif
62
bellard9fa3e852004-01-04 18:06:42 +000063#define SMC_BITMAP_USE_THRESHOLD 10
64
blueswir1bdaf78e2008-10-04 07:24:27 +000065static TranslationBlock *tbs;
bellard26a5f132008-05-28 12:30:31 +000066int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000067TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000068static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000069/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050070spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000071
blueswir1141ac462008-07-26 15:05:57 +000072#if defined(__arm__) || defined(__sparc_v9__)
73/* The prologue must be reachable with a direct jump. ARM and Sparc64
74 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000075 section close to code segment. */
76#define code_gen_section \
77 __attribute__((__section__(".gen_code"))) \
78 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020079#elif defined(_WIN32)
80/* Maximum alignment for Win32 is 16. */
81#define code_gen_section \
82 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000083#else
84#define code_gen_section \
85 __attribute__((aligned (32)))
86#endif
87
88uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +000089static uint8_t *code_gen_buffer;
90static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +000091/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +000092static unsigned long code_gen_buffer_max_size;
bellardfd6ce8f2003-05-14 19:00:11 +000093uint8_t *code_gen_ptr;
94
pbrooke2eef172008-06-08 01:09:01 +000095#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000096int phys_ram_fd;
bellard1ccde1c2004-02-06 19:46:14 +000097uint8_t *phys_ram_dirty;
aliguori74576192008-10-06 14:02:03 +000098static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000099
100typedef struct RAMBlock {
101 uint8_t *host;
Anthony Liguoric227f092009-10-01 16:12:16 -0500102 ram_addr_t offset;
103 ram_addr_t length;
pbrook94a6b542009-04-11 17:15:54 +0000104 struct RAMBlock *next;
105} RAMBlock;
106
107static RAMBlock *ram_blocks;
108/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100109 then we can no longer assume contiguous ram offsets, and external uses
pbrook94a6b542009-04-11 17:15:54 +0000110 of this variable will break. */
Anthony Liguoric227f092009-10-01 16:12:16 -0500111ram_addr_t last_ram_offset;
pbrooke2eef172008-06-08 01:09:01 +0000112#endif
bellard9fa3e852004-01-04 18:06:42 +0000113
bellard6a00d602005-11-21 23:25:50 +0000114CPUState *first_cpu;
115/* current CPU in the current thread. It is only valid inside
116 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000117CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000118/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000119 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000120 2 = Adaptive rate instruction counting. */
121int use_icount = 0;
122/* Current instruction counter. While executing translated code this may
123 include some instructions that have not yet been executed. */
124int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000125
bellard54936002003-05-13 00:25:15 +0000126typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000127 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000128 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000129 /* in order to optimize self modifying code, we count the number
130 of lookups we do to a given page to use a bitmap */
131 unsigned int code_write_count;
132 uint8_t *code_bitmap;
133#if defined(CONFIG_USER_ONLY)
134 unsigned long flags;
135#endif
bellard54936002003-05-13 00:25:15 +0000136} PageDesc;
137
bellard92e873b2004-05-21 14:52:29 +0000138typedef struct PhysPageDesc {
pbrook0f459d12008-06-09 00:20:13 +0000139 /* offset in host memory of the page + io_index in the low bits */
Anthony Liguoric227f092009-10-01 16:12:16 -0500140 ram_addr_t phys_offset;
141 ram_addr_t region_offset;
bellard92e873b2004-05-21 14:52:29 +0000142} PhysPageDesc;
143
bellard54936002003-05-13 00:25:15 +0000144#define L2_BITS 10
j_mayerbedb69e2007-04-05 20:08:21 +0000145#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
146/* XXX: this is a temporary hack for alpha target.
147 * In the future, this is to be replaced by a multi-level table
148 * to actually be able to handle the complete 64 bits address space.
149 */
150#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
151#else
aurel3203875442008-04-22 20:45:18 +0000152#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
j_mayerbedb69e2007-04-05 20:08:21 +0000153#endif
bellard54936002003-05-13 00:25:15 +0000154
155#define L1_SIZE (1 << L1_BITS)
156#define L2_SIZE (1 << L2_BITS)
157
bellard83fb7ad2004-07-05 21:25:26 +0000158unsigned long qemu_real_host_page_size;
159unsigned long qemu_host_page_bits;
160unsigned long qemu_host_page_size;
161unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000162
bellard92e873b2004-05-21 14:52:29 +0000163/* XXX: for system emulation, it could just be an array */
bellard54936002003-05-13 00:25:15 +0000164static PageDesc *l1_map[L1_SIZE];
165
pbrooke2eef172008-06-08 01:09:01 +0000166#if !defined(CONFIG_USER_ONLY)
Paul Brook6d9a1302010-02-28 23:55:53 +0000167static PhysPageDesc **l1_phys_map;
168
pbrooke2eef172008-06-08 01:09:01 +0000169static void io_mem_init(void);
170
bellard33417e72003-08-10 21:47:01 +0000171/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000172CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
173CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000174void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000175static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000176static int io_mem_watch;
177#endif
bellard33417e72003-08-10 21:47:01 +0000178
bellard34865132003-10-05 14:28:56 +0000179/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200180#ifdef WIN32
181static const char *logfilename = "qemu.log";
182#else
blueswir1d9b630f2008-10-05 09:57:08 +0000183static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200184#endif
bellard34865132003-10-05 14:28:56 +0000185FILE *logfile;
186int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000187static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000188
bellarde3db7222005-01-26 22:00:47 +0000189/* statistics */
190static int tlb_flush_count;
191static int tb_flush_count;
192static int tb_phys_invalidate_count;
193
bellard7cb69ca2008-05-10 10:55:51 +0000194#ifdef _WIN32
195static void map_exec(void *addr, long size)
196{
197 DWORD old_protect;
198 VirtualProtect(addr, size,
199 PAGE_EXECUTE_READWRITE, &old_protect);
200
201}
202#else
203static void map_exec(void *addr, long size)
204{
bellard43694152008-05-29 09:35:57 +0000205 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000206
bellard43694152008-05-29 09:35:57 +0000207 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000208 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000209 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000210
211 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000212 end += page_size - 1;
213 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000214
215 mprotect((void *)start, end - start,
216 PROT_READ | PROT_WRITE | PROT_EXEC);
217}
218#endif
219
bellardb346ff42003-06-15 20:05:50 +0000220static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000221{
bellard83fb7ad2004-07-05 21:25:26 +0000222 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000223 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000224#ifdef _WIN32
225 {
226 SYSTEM_INFO system_info;
227
228 GetSystemInfo(&system_info);
229 qemu_real_host_page_size = system_info.dwPageSize;
230 }
231#else
232 qemu_real_host_page_size = getpagesize();
233#endif
bellard83fb7ad2004-07-05 21:25:26 +0000234 if (qemu_host_page_size == 0)
235 qemu_host_page_size = qemu_real_host_page_size;
236 if (qemu_host_page_size < TARGET_PAGE_SIZE)
237 qemu_host_page_size = TARGET_PAGE_SIZE;
238 qemu_host_page_bits = 0;
239 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
240 qemu_host_page_bits++;
241 qemu_host_page_mask = ~(qemu_host_page_size - 1);
Paul Brook6d9a1302010-02-28 23:55:53 +0000242#if !defined(CONFIG_USER_ONLY)
bellard108c49b2005-07-24 12:55:09 +0000243 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
244 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
Paul Brook6d9a1302010-02-28 23:55:53 +0000245#endif
balrog50a95692007-12-12 01:16:23 +0000246
247#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
248 {
249 long long startaddr, endaddr;
250 FILE *f;
251 int n;
252
pbrookc8a706f2008-06-02 16:16:42 +0000253 mmap_lock();
pbrook07765902008-05-31 16:33:53 +0000254 last_brk = (unsigned long)sbrk(0);
balrog50a95692007-12-12 01:16:23 +0000255 f = fopen("/proc/self/maps", "r");
256 if (f) {
257 do {
258 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
259 if (n == 2) {
blueswir1e0b8d652008-05-03 17:51:24 +0000260 startaddr = MIN(startaddr,
261 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
262 endaddr = MIN(endaddr,
263 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
pbrookb5fc9092008-05-29 13:56:10 +0000264 page_set_flags(startaddr & TARGET_PAGE_MASK,
balrog50a95692007-12-12 01:16:23 +0000265 TARGET_PAGE_ALIGN(endaddr),
266 PAGE_RESERVED);
267 }
268 } while (!feof(f));
269 fclose(f);
270 }
pbrookc8a706f2008-06-02 16:16:42 +0000271 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000272 }
273#endif
bellard54936002003-05-13 00:25:15 +0000274}
275
aliguori434929b2008-09-15 15:56:30 +0000276static inline PageDesc **page_l1_map(target_ulong index)
bellard54936002003-05-13 00:25:15 +0000277{
pbrook17e23772008-06-09 13:47:45 +0000278#if TARGET_LONG_BITS > 32
279 /* Host memory outside guest VM. For 32-bit targets we have already
280 excluded high addresses. */
thsd8173e02008-08-29 13:10:00 +0000281 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
pbrook17e23772008-06-09 13:47:45 +0000282 return NULL;
283#endif
aliguori434929b2008-09-15 15:56:30 +0000284 return &l1_map[index >> L2_BITS];
285}
286
287static inline PageDesc *page_find_alloc(target_ulong index)
288{
289 PageDesc **lp, *p;
290 lp = page_l1_map(index);
291 if (!lp)
292 return NULL;
293
bellard54936002003-05-13 00:25:15 +0000294 p = *lp;
295 if (!p) {
296 /* allocate if not found */
pbrook17e23772008-06-09 13:47:45 +0000297#if defined(CONFIG_USER_ONLY)
pbrook17e23772008-06-09 13:47:45 +0000298 size_t len = sizeof(PageDesc) * L2_SIZE;
299 /* Don't use qemu_malloc because it may recurse. */
Blue Swirl660f11b2009-07-31 21:16:51 +0000300 p = mmap(NULL, len, PROT_READ | PROT_WRITE,
pbrook17e23772008-06-09 13:47:45 +0000301 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
bellard54936002003-05-13 00:25:15 +0000302 *lp = p;
aurel32fb1c2cd2008-12-08 18:12:26 +0000303 if (h2g_valid(p)) {
304 unsigned long addr = h2g(p);
pbrook17e23772008-06-09 13:47:45 +0000305 page_set_flags(addr & TARGET_PAGE_MASK,
306 TARGET_PAGE_ALIGN(addr + len),
307 PAGE_RESERVED);
308 }
309#else
310 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
311 *lp = p;
312#endif
bellard54936002003-05-13 00:25:15 +0000313 }
314 return p + (index & (L2_SIZE - 1));
315}
316
aurel3200f82b82008-04-27 21:12:55 +0000317static inline PageDesc *page_find(target_ulong index)
bellard54936002003-05-13 00:25:15 +0000318{
aliguori434929b2008-09-15 15:56:30 +0000319 PageDesc **lp, *p;
320 lp = page_l1_map(index);
321 if (!lp)
322 return NULL;
bellard54936002003-05-13 00:25:15 +0000323
aliguori434929b2008-09-15 15:56:30 +0000324 p = *lp;
Blue Swirl660f11b2009-07-31 21:16:51 +0000325 if (!p) {
326 return NULL;
327 }
bellardfd6ce8f2003-05-14 19:00:11 +0000328 return p + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000329}
330
Paul Brook6d9a1302010-02-28 23:55:53 +0000331#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500332static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000333{
bellard108c49b2005-07-24 12:55:09 +0000334 void **lp, **p;
pbrooke3f4e2a2006-04-08 20:02:06 +0000335 PhysPageDesc *pd;
bellard92e873b2004-05-21 14:52:29 +0000336
bellard108c49b2005-07-24 12:55:09 +0000337 p = (void **)l1_phys_map;
338#if TARGET_PHYS_ADDR_SPACE_BITS > 32
339
340#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
341#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
342#endif
343 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000344 p = *lp;
345 if (!p) {
346 /* allocate if not found */
bellard108c49b2005-07-24 12:55:09 +0000347 if (!alloc)
348 return NULL;
349 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
350 memset(p, 0, sizeof(void *) * L1_SIZE);
351 *lp = p;
352 }
353#endif
354 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
pbrooke3f4e2a2006-04-08 20:02:06 +0000355 pd = *lp;
356 if (!pd) {
357 int i;
bellard108c49b2005-07-24 12:55:09 +0000358 /* allocate if not found */
359 if (!alloc)
360 return NULL;
pbrooke3f4e2a2006-04-08 20:02:06 +0000361 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
362 *lp = pd;
pbrook67c4d232009-02-23 13:16:07 +0000363 for (i = 0; i < L2_SIZE; i++) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000364 pd[i].phys_offset = IO_MEM_UNASSIGNED;
pbrook67c4d232009-02-23 13:16:07 +0000365 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
366 }
bellard92e873b2004-05-21 14:52:29 +0000367 }
pbrooke3f4e2a2006-04-08 20:02:06 +0000368 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000369}
370
Anthony Liguoric227f092009-10-01 16:12:16 -0500371static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000372{
bellard108c49b2005-07-24 12:55:09 +0000373 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000374}
375
Anthony Liguoric227f092009-10-01 16:12:16 -0500376static void tlb_protect_code(ram_addr_t ram_addr);
377static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000378 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000379#define mmap_lock() do { } while(0)
380#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000381#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000382
bellard43694152008-05-29 09:35:57 +0000383#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
384
385#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100386/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000387 user mode. It will change when a dedicated libc will be used */
388#define USE_STATIC_CODE_GEN_BUFFER
389#endif
390
391#ifdef USE_STATIC_CODE_GEN_BUFFER
392static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
393#endif
394
blueswir18fcd3692008-08-17 20:26:25 +0000395static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000396{
bellard43694152008-05-29 09:35:57 +0000397#ifdef USE_STATIC_CODE_GEN_BUFFER
398 code_gen_buffer = static_code_gen_buffer;
399 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
400 map_exec(code_gen_buffer, code_gen_buffer_size);
401#else
bellard26a5f132008-05-28 12:30:31 +0000402 code_gen_buffer_size = tb_size;
403 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000404#if defined(CONFIG_USER_ONLY)
405 /* in user mode, phys_ram_size is not meaningful */
406 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
407#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100408 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000409 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000410#endif
bellard26a5f132008-05-28 12:30:31 +0000411 }
412 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
413 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
414 /* The code gen buffer location may have constraints depending on
415 the host cpu and OS */
416#if defined(__linux__)
417 {
418 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000419 void *start = NULL;
420
bellard26a5f132008-05-28 12:30:31 +0000421 flags = MAP_PRIVATE | MAP_ANONYMOUS;
422#if defined(__x86_64__)
423 flags |= MAP_32BIT;
424 /* Cannot map more than that */
425 if (code_gen_buffer_size > (800 * 1024 * 1024))
426 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000427#elif defined(__sparc_v9__)
428 // Map the buffer below 2G, so we can use direct calls and branches
429 flags |= MAP_FIXED;
430 start = (void *) 0x60000000UL;
431 if (code_gen_buffer_size > (512 * 1024 * 1024))
432 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000433#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000434 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000435 flags |= MAP_FIXED;
436 start = (void *) 0x01000000UL;
437 if (code_gen_buffer_size > 16 * 1024 * 1024)
438 code_gen_buffer_size = 16 * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000439#endif
blueswir1141ac462008-07-26 15:05:57 +0000440 code_gen_buffer = mmap(start, code_gen_buffer_size,
441 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000442 flags, -1, 0);
443 if (code_gen_buffer == MAP_FAILED) {
444 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
445 exit(1);
446 }
447 }
Aurelien Jarnoa167ba52009-11-29 18:00:41 +0100448#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
aliguori06e67a82008-09-27 15:32:41 +0000449 {
450 int flags;
451 void *addr = NULL;
452 flags = MAP_PRIVATE | MAP_ANONYMOUS;
453#if defined(__x86_64__)
454 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
455 * 0x40000000 is free */
456 flags |= MAP_FIXED;
457 addr = (void *)0x40000000;
458 /* Cannot map more than that */
459 if (code_gen_buffer_size > (800 * 1024 * 1024))
460 code_gen_buffer_size = (800 * 1024 * 1024);
461#endif
462 code_gen_buffer = mmap(addr, code_gen_buffer_size,
463 PROT_WRITE | PROT_READ | PROT_EXEC,
464 flags, -1, 0);
465 if (code_gen_buffer == MAP_FAILED) {
466 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
467 exit(1);
468 }
469 }
bellard26a5f132008-05-28 12:30:31 +0000470#else
471 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000472 map_exec(code_gen_buffer, code_gen_buffer_size);
473#endif
bellard43694152008-05-29 09:35:57 +0000474#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000475 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
476 code_gen_buffer_max_size = code_gen_buffer_size -
477 code_gen_max_block_size();
478 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
479 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
480}
481
482/* Must be called before using the QEMU cpus. 'tb_size' is the size
483 (in bytes) allocated to the translation buffer. Zero means default
484 size. */
485void cpu_exec_init_all(unsigned long tb_size)
486{
bellard26a5f132008-05-28 12:30:31 +0000487 cpu_gen_init();
488 code_gen_alloc(tb_size);
489 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000490 page_init();
pbrooke2eef172008-06-08 01:09:01 +0000491#if !defined(CONFIG_USER_ONLY)
bellard26a5f132008-05-28 12:30:31 +0000492 io_mem_init();
pbrooke2eef172008-06-08 01:09:01 +0000493#endif
bellard26a5f132008-05-28 12:30:31 +0000494}
495
pbrook9656f322008-07-01 20:01:19 +0000496#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
497
Juan Quintelae59fb372009-09-29 22:48:21 +0200498static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200499{
500 CPUState *env = opaque;
501
aurel323098dba2009-03-07 21:28:24 +0000502 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
503 version_id is increased. */
504 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000505 tlb_flush(env, 1);
506
507 return 0;
508}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200509
510static const VMStateDescription vmstate_cpu_common = {
511 .name = "cpu_common",
512 .version_id = 1,
513 .minimum_version_id = 1,
514 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200515 .post_load = cpu_common_post_load,
516 .fields = (VMStateField []) {
517 VMSTATE_UINT32(halted, CPUState),
518 VMSTATE_UINT32(interrupt_request, CPUState),
519 VMSTATE_END_OF_LIST()
520 }
521};
pbrook9656f322008-07-01 20:01:19 +0000522#endif
523
Glauber Costa950f1472009-06-09 12:15:18 -0400524CPUState *qemu_get_cpu(int cpu)
525{
526 CPUState *env = first_cpu;
527
528 while (env) {
529 if (env->cpu_index == cpu)
530 break;
531 env = env->next_cpu;
532 }
533
534 return env;
535}
536
bellard6a00d602005-11-21 23:25:50 +0000537void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000538{
bellard6a00d602005-11-21 23:25:50 +0000539 CPUState **penv;
540 int cpu_index;
541
pbrookc2764712009-03-07 15:24:59 +0000542#if defined(CONFIG_USER_ONLY)
543 cpu_list_lock();
544#endif
bellard6a00d602005-11-21 23:25:50 +0000545 env->next_cpu = NULL;
546 penv = &first_cpu;
547 cpu_index = 0;
548 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700549 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000550 cpu_index++;
551 }
552 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000553 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000554 QTAILQ_INIT(&env->breakpoints);
555 QTAILQ_INIT(&env->watchpoints);
bellard6a00d602005-11-21 23:25:50 +0000556 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000557#if defined(CONFIG_USER_ONLY)
558 cpu_list_unlock();
559#endif
pbrookb3c77242008-06-30 16:31:04 +0000560#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200561 vmstate_register(cpu_index, &vmstate_cpu_common, env);
pbrookb3c77242008-06-30 16:31:04 +0000562 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
563 cpu_save, cpu_load, env);
564#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000565}
566
bellard9fa3e852004-01-04 18:06:42 +0000567static inline void invalidate_page_bitmap(PageDesc *p)
568{
569 if (p->code_bitmap) {
bellard59817cc2004-02-16 22:01:13 +0000570 qemu_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000571 p->code_bitmap = NULL;
572 }
573 p->code_write_count = 0;
574}
575
bellardfd6ce8f2003-05-14 19:00:11 +0000576/* set to NULL all the 'first_tb' fields in all PageDescs */
577static void page_flush_tb(void)
578{
579 int i, j;
580 PageDesc *p;
581
582 for(i = 0; i < L1_SIZE; i++) {
583 p = l1_map[i];
584 if (p) {
bellard9fa3e852004-01-04 18:06:42 +0000585 for(j = 0; j < L2_SIZE; j++) {
586 p->first_tb = NULL;
587 invalidate_page_bitmap(p);
588 p++;
589 }
bellardfd6ce8f2003-05-14 19:00:11 +0000590 }
591 }
592}
593
594/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000595/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000596void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000597{
bellard6a00d602005-11-21 23:25:50 +0000598 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000599#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000600 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
601 (unsigned long)(code_gen_ptr - code_gen_buffer),
602 nb_tbs, nb_tbs > 0 ?
603 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000604#endif
bellard26a5f132008-05-28 12:30:31 +0000605 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000606 cpu_abort(env1, "Internal error: code buffer overflow\n");
607
bellardfd6ce8f2003-05-14 19:00:11 +0000608 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000609
bellard6a00d602005-11-21 23:25:50 +0000610 for(env = first_cpu; env != NULL; env = env->next_cpu) {
611 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
612 }
bellard9fa3e852004-01-04 18:06:42 +0000613
bellard8a8a6082004-10-03 13:36:49 +0000614 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000615 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000616
bellardfd6ce8f2003-05-14 19:00:11 +0000617 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000618 /* XXX: flush processor icache at this point if cache flush is
619 expensive */
bellarde3db7222005-01-26 22:00:47 +0000620 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000621}
622
623#ifdef DEBUG_TB_CHECK
624
j_mayerbc98a7e2007-04-04 07:55:12 +0000625static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000626{
627 TranslationBlock *tb;
628 int i;
629 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000630 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
631 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000632 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
633 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000634 printf("ERROR invalidate: address=" TARGET_FMT_lx
635 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000636 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000637 }
638 }
639 }
640}
641
642/* verify that all the pages have correct rights for code */
643static void tb_page_check(void)
644{
645 TranslationBlock *tb;
646 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000647
pbrook99773bd2006-04-16 15:14:59 +0000648 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
649 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000650 flags1 = page_get_flags(tb->pc);
651 flags2 = page_get_flags(tb->pc + tb->size - 1);
652 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
653 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000654 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000655 }
656 }
657 }
658}
659
660#endif
661
662/* invalidate one TB */
663static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
664 int next_offset)
665{
666 TranslationBlock *tb1;
667 for(;;) {
668 tb1 = *ptb;
669 if (tb1 == tb) {
670 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
671 break;
672 }
673 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
674 }
675}
676
bellard9fa3e852004-01-04 18:06:42 +0000677static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
678{
679 TranslationBlock *tb1;
680 unsigned int n1;
681
682 for(;;) {
683 tb1 = *ptb;
684 n1 = (long)tb1 & 3;
685 tb1 = (TranslationBlock *)((long)tb1 & ~3);
686 if (tb1 == tb) {
687 *ptb = tb1->page_next[n1];
688 break;
689 }
690 ptb = &tb1->page_next[n1];
691 }
692}
693
bellardd4e81642003-05-25 16:46:15 +0000694static inline void tb_jmp_remove(TranslationBlock *tb, int n)
695{
696 TranslationBlock *tb1, **ptb;
697 unsigned int n1;
698
699 ptb = &tb->jmp_next[n];
700 tb1 = *ptb;
701 if (tb1) {
702 /* find tb(n) in circular list */
703 for(;;) {
704 tb1 = *ptb;
705 n1 = (long)tb1 & 3;
706 tb1 = (TranslationBlock *)((long)tb1 & ~3);
707 if (n1 == n && tb1 == tb)
708 break;
709 if (n1 == 2) {
710 ptb = &tb1->jmp_first;
711 } else {
712 ptb = &tb1->jmp_next[n1];
713 }
714 }
715 /* now we can suppress tb(n) from the list */
716 *ptb = tb->jmp_next[n];
717
718 tb->jmp_next[n] = NULL;
719 }
720}
721
722/* reset the jump entry 'n' of a TB so that it is not chained to
723 another TB */
724static inline void tb_reset_jump(TranslationBlock *tb, int n)
725{
726 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
727}
728
pbrook2e70f6e2008-06-29 01:03:05 +0000729void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000730{
bellard6a00d602005-11-21 23:25:50 +0000731 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000732 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000733 unsigned int h, n1;
Anthony Liguoric227f092009-10-01 16:12:16 -0500734 target_phys_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000735 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000736
bellard9fa3e852004-01-04 18:06:42 +0000737 /* remove the TB from the hash list */
738 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
739 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000740 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000741 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000742
bellard9fa3e852004-01-04 18:06:42 +0000743 /* remove the TB from the page list */
744 if (tb->page_addr[0] != page_addr) {
745 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
746 tb_page_remove(&p->first_tb, tb);
747 invalidate_page_bitmap(p);
748 }
749 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
750 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
751 tb_page_remove(&p->first_tb, tb);
752 invalidate_page_bitmap(p);
753 }
754
bellard8a40a182005-11-20 10:35:40 +0000755 tb_invalidated_flag = 1;
756
757 /* remove the TB from the hash list */
758 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000759 for(env = first_cpu; env != NULL; env = env->next_cpu) {
760 if (env->tb_jmp_cache[h] == tb)
761 env->tb_jmp_cache[h] = NULL;
762 }
bellard8a40a182005-11-20 10:35:40 +0000763
764 /* suppress this TB from the two jump lists */
765 tb_jmp_remove(tb, 0);
766 tb_jmp_remove(tb, 1);
767
768 /* suppress any remaining jumps to this TB */
769 tb1 = tb->jmp_first;
770 for(;;) {
771 n1 = (long)tb1 & 3;
772 if (n1 == 2)
773 break;
774 tb1 = (TranslationBlock *)((long)tb1 & ~3);
775 tb2 = tb1->jmp_next[n1];
776 tb_reset_jump(tb1, n1);
777 tb1->jmp_next[n1] = NULL;
778 tb1 = tb2;
779 }
780 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
781
bellarde3db7222005-01-26 22:00:47 +0000782 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000783}
784
785static inline void set_bits(uint8_t *tab, int start, int len)
786{
787 int end, mask, end1;
788
789 end = start + len;
790 tab += start >> 3;
791 mask = 0xff << (start & 7);
792 if ((start & ~7) == (end & ~7)) {
793 if (start < end) {
794 mask &= ~(0xff << (end & 7));
795 *tab |= mask;
796 }
797 } else {
798 *tab++ |= mask;
799 start = (start + 8) & ~7;
800 end1 = end & ~7;
801 while (start < end1) {
802 *tab++ = 0xff;
803 start += 8;
804 }
805 if (start < end) {
806 mask = ~(0xff << (end & 7));
807 *tab |= mask;
808 }
809 }
810}
811
812static void build_page_bitmap(PageDesc *p)
813{
814 int n, tb_start, tb_end;
815 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000816
pbrookb2a70812008-06-09 13:57:23 +0000817 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000818
819 tb = p->first_tb;
820 while (tb != NULL) {
821 n = (long)tb & 3;
822 tb = (TranslationBlock *)((long)tb & ~3);
823 /* NOTE: this is subtle as a TB may span two physical pages */
824 if (n == 0) {
825 /* NOTE: tb_end may be after the end of the page, but
826 it is not a problem */
827 tb_start = tb->pc & ~TARGET_PAGE_MASK;
828 tb_end = tb_start + tb->size;
829 if (tb_end > TARGET_PAGE_SIZE)
830 tb_end = TARGET_PAGE_SIZE;
831 } else {
832 tb_start = 0;
833 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
834 }
835 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
836 tb = tb->page_next[n];
837 }
838}
839
pbrook2e70f6e2008-06-29 01:03:05 +0000840TranslationBlock *tb_gen_code(CPUState *env,
841 target_ulong pc, target_ulong cs_base,
842 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000843{
844 TranslationBlock *tb;
845 uint8_t *tc_ptr;
846 target_ulong phys_pc, phys_page2, virt_page2;
847 int code_gen_size;
848
bellardc27004e2005-01-03 23:35:10 +0000849 phys_pc = get_phys_addr_code(env, pc);
850 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000851 if (!tb) {
852 /* flush must be done */
853 tb_flush(env);
854 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000855 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000856 /* Don't forget to invalidate previous TB info. */
857 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000858 }
859 tc_ptr = code_gen_ptr;
860 tb->tc_ptr = tc_ptr;
861 tb->cs_base = cs_base;
862 tb->flags = flags;
863 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +0000864 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +0000865 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +0000866
bellardd720b932004-04-25 17:57:43 +0000867 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +0000868 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +0000869 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +0000870 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
bellardd720b932004-04-25 17:57:43 +0000871 phys_page2 = get_phys_addr_code(env, virt_page2);
872 }
873 tb_link_phys(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +0000874 return tb;
bellardd720b932004-04-25 17:57:43 +0000875}
ths3b46e622007-09-17 08:09:54 +0000876
bellard9fa3e852004-01-04 18:06:42 +0000877/* invalidate all TBs which intersect with the target physical page
878 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +0000879 the same physical page. 'is_cpu_write_access' should be true if called
880 from a real cpu write access: the virtual CPU will exit the current
881 TB if code is modified inside this TB. */
Anthony Liguoric227f092009-10-01 16:12:16 -0500882void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
bellardd720b932004-04-25 17:57:43 +0000883 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +0000884{
aliguori6b917542008-11-18 19:46:41 +0000885 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +0000886 CPUState *env = cpu_single_env;
bellard9fa3e852004-01-04 18:06:42 +0000887 target_ulong tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +0000888 PageDesc *p;
889 int n;
890#ifdef TARGET_HAS_PRECISE_SMC
891 int current_tb_not_found = is_cpu_write_access;
892 TranslationBlock *current_tb = NULL;
893 int current_tb_modified = 0;
894 target_ulong current_pc = 0;
895 target_ulong current_cs_base = 0;
896 int current_flags = 0;
897#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +0000898
899 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +0000900 if (!p)
bellard9fa3e852004-01-04 18:06:42 +0000901 return;
ths5fafdf22007-09-16 21:08:06 +0000902 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +0000903 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
904 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +0000905 /* build code bitmap */
906 build_page_bitmap(p);
907 }
908
909 /* we remove all the TBs in the range [start, end[ */
910 /* XXX: see if in some cases it could be faster to invalidate all the code */
911 tb = p->first_tb;
912 while (tb != NULL) {
913 n = (long)tb & 3;
914 tb = (TranslationBlock *)((long)tb & ~3);
915 tb_next = tb->page_next[n];
916 /* NOTE: this is subtle as a TB may span two physical pages */
917 if (n == 0) {
918 /* NOTE: tb_end may be after the end of the page, but
919 it is not a problem */
920 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
921 tb_end = tb_start + tb->size;
922 } else {
923 tb_start = tb->page_addr[1];
924 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
925 }
926 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +0000927#ifdef TARGET_HAS_PRECISE_SMC
928 if (current_tb_not_found) {
929 current_tb_not_found = 0;
930 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +0000931 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +0000932 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +0000933 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +0000934 }
935 }
936 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +0000937 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +0000938 /* If we are modifying the current TB, we must stop
939 its execution. We could be more precise by checking
940 that the modification is after the current PC, but it
941 would require a specialized function to partially
942 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +0000943
bellardd720b932004-04-25 17:57:43 +0000944 current_tb_modified = 1;
ths5fafdf22007-09-16 21:08:06 +0000945 cpu_restore_state(current_tb, env,
pbrook2e70f6e2008-06-29 01:03:05 +0000946 env->mem_io_pc, NULL);
aliguori6b917542008-11-18 19:46:41 +0000947 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
948 &current_flags);
bellardd720b932004-04-25 17:57:43 +0000949 }
950#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +0000951 /* we need to do that to handle the case where a signal
952 occurs while doing tb_phys_invalidate() */
953 saved_tb = NULL;
954 if (env) {
955 saved_tb = env->current_tb;
956 env->current_tb = NULL;
957 }
bellard9fa3e852004-01-04 18:06:42 +0000958 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +0000959 if (env) {
960 env->current_tb = saved_tb;
961 if (env->interrupt_request && env->current_tb)
962 cpu_interrupt(env, env->interrupt_request);
963 }
bellard9fa3e852004-01-04 18:06:42 +0000964 }
965 tb = tb_next;
966 }
967#if !defined(CONFIG_USER_ONLY)
968 /* if no code remaining, no need to continue to use slow writes */
969 if (!p->first_tb) {
970 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +0000971 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +0000972 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +0000973 }
974 }
975#endif
976#ifdef TARGET_HAS_PRECISE_SMC
977 if (current_tb_modified) {
978 /* we generate a block containing just the instruction
979 modifying the memory. It will ensure that it cannot modify
980 itself */
bellardea1c1802004-06-14 18:56:36 +0000981 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +0000982 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +0000983 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +0000984 }
985#endif
986}
987
988/* len must be <= 8 and start must be a multiple of len */
Anthony Liguoric227f092009-10-01 16:12:16 -0500989static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +0000990{
991 PageDesc *p;
992 int offset, b;
bellard59817cc2004-02-16 22:01:13 +0000993#if 0
bellarda4193c82004-06-03 14:01:43 +0000994 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +0000995 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
996 cpu_single_env->mem_io_vaddr, len,
997 cpu_single_env->eip,
998 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +0000999 }
1000#endif
bellard9fa3e852004-01-04 18:06:42 +00001001 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001002 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001003 return;
1004 if (p->code_bitmap) {
1005 offset = start & ~TARGET_PAGE_MASK;
1006 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1007 if (b & ((1 << len) - 1))
1008 goto do_invalidate;
1009 } else {
1010 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001011 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001012 }
1013}
1014
bellard9fa3e852004-01-04 18:06:42 +00001015#if !defined(CONFIG_SOFTMMU)
Anthony Liguoric227f092009-10-01 16:12:16 -05001016static void tb_invalidate_phys_page(target_phys_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001017 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001018{
aliguori6b917542008-11-18 19:46:41 +00001019 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001020 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001021 int n;
bellardd720b932004-04-25 17:57:43 +00001022#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001023 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001024 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001025 int current_tb_modified = 0;
1026 target_ulong current_pc = 0;
1027 target_ulong current_cs_base = 0;
1028 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001029#endif
bellard9fa3e852004-01-04 18:06:42 +00001030
1031 addr &= TARGET_PAGE_MASK;
1032 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001033 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001034 return;
1035 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001036#ifdef TARGET_HAS_PRECISE_SMC
1037 if (tb && pc != 0) {
1038 current_tb = tb_find_pc(pc);
1039 }
1040#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001041 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001042 n = (long)tb & 3;
1043 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001044#ifdef TARGET_HAS_PRECISE_SMC
1045 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001046 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001047 /* If we are modifying the current TB, we must stop
1048 its execution. We could be more precise by checking
1049 that the modification is after the current PC, but it
1050 would require a specialized function to partially
1051 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001052
bellardd720b932004-04-25 17:57:43 +00001053 current_tb_modified = 1;
1054 cpu_restore_state(current_tb, env, pc, puc);
aliguori6b917542008-11-18 19:46:41 +00001055 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1056 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001057 }
1058#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001059 tb_phys_invalidate(tb, addr);
1060 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001061 }
1062 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001063#ifdef TARGET_HAS_PRECISE_SMC
1064 if (current_tb_modified) {
1065 /* we generate a block containing just the instruction
1066 modifying the memory. It will ensure that it cannot modify
1067 itself */
bellardea1c1802004-06-14 18:56:36 +00001068 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001069 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001070 cpu_resume_from_signal(env, puc);
1071 }
1072#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001073}
bellard9fa3e852004-01-04 18:06:42 +00001074#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001075
1076/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001077static inline void tb_alloc_page(TranslationBlock *tb,
pbrook53a59602006-03-25 19:31:22 +00001078 unsigned int n, target_ulong page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001079{
1080 PageDesc *p;
bellard9fa3e852004-01-04 18:06:42 +00001081 TranslationBlock *last_first_tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001082
bellard9fa3e852004-01-04 18:06:42 +00001083 tb->page_addr[n] = page_addr;
bellard3a7d9292005-08-21 09:26:42 +00001084 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00001085 tb->page_next[n] = p->first_tb;
1086 last_first_tb = p->first_tb;
1087 p->first_tb = (TranslationBlock *)((long)tb | n);
1088 invalidate_page_bitmap(p);
1089
bellard107db442004-06-22 18:48:46 +00001090#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001091
bellard9fa3e852004-01-04 18:06:42 +00001092#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001093 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001094 target_ulong addr;
1095 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001096 int prot;
1097
bellardfd6ce8f2003-05-14 19:00:11 +00001098 /* force the host page as non writable (writes will have a
1099 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001100 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001101 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001102 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1103 addr += TARGET_PAGE_SIZE) {
1104
1105 p2 = page_find (addr >> TARGET_PAGE_BITS);
1106 if (!p2)
1107 continue;
1108 prot |= p2->flags;
1109 p2->flags &= ~PAGE_WRITE;
1110 page_get_flags(addr);
1111 }
ths5fafdf22007-09-16 21:08:06 +00001112 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001113 (prot & PAGE_BITS) & ~PAGE_WRITE);
1114#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001115 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001116 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001117#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001118 }
bellard9fa3e852004-01-04 18:06:42 +00001119#else
1120 /* if some code is already present, then the pages are already
1121 protected. So we handle the case where only the first TB is
1122 allocated in a physical page */
1123 if (!last_first_tb) {
bellard6a00d602005-11-21 23:25:50 +00001124 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001125 }
1126#endif
bellardd720b932004-04-25 17:57:43 +00001127
1128#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001129}
1130
1131/* Allocate a new translation block. Flush the translation buffer if
1132 too many translation blocks or too much generated code. */
bellardc27004e2005-01-03 23:35:10 +00001133TranslationBlock *tb_alloc(target_ulong pc)
bellardfd6ce8f2003-05-14 19:00:11 +00001134{
1135 TranslationBlock *tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001136
bellard26a5f132008-05-28 12:30:31 +00001137 if (nb_tbs >= code_gen_max_blocks ||
1138 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
bellardd4e81642003-05-25 16:46:15 +00001139 return NULL;
bellardfd6ce8f2003-05-14 19:00:11 +00001140 tb = &tbs[nb_tbs++];
1141 tb->pc = pc;
bellardb448f2f2004-02-25 23:24:04 +00001142 tb->cflags = 0;
bellardd4e81642003-05-25 16:46:15 +00001143 return tb;
1144}
1145
pbrook2e70f6e2008-06-29 01:03:05 +00001146void tb_free(TranslationBlock *tb)
1147{
thsbf20dc02008-06-30 17:22:19 +00001148 /* In practice this is mostly used for single use temporary TB
pbrook2e70f6e2008-06-29 01:03:05 +00001149 Ignore the hard cases and just back up if this TB happens to
1150 be the last one generated. */
1151 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1152 code_gen_ptr = tb->tc_ptr;
1153 nb_tbs--;
1154 }
1155}
1156
bellard9fa3e852004-01-04 18:06:42 +00001157/* add a new TB and link it to the physical page tables. phys_page2 is
1158 (-1) to indicate that only one page contains the TB. */
ths5fafdf22007-09-16 21:08:06 +00001159void tb_link_phys(TranslationBlock *tb,
bellard9fa3e852004-01-04 18:06:42 +00001160 target_ulong phys_pc, target_ulong phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001161{
bellard9fa3e852004-01-04 18:06:42 +00001162 unsigned int h;
1163 TranslationBlock **ptb;
1164
pbrookc8a706f2008-06-02 16:16:42 +00001165 /* Grab the mmap lock to stop another thread invalidating this TB
1166 before we are done. */
1167 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001168 /* add in the physical hash table */
1169 h = tb_phys_hash_func(phys_pc);
1170 ptb = &tb_phys_hash[h];
1171 tb->phys_hash_next = *ptb;
1172 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001173
1174 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001175 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1176 if (phys_page2 != -1)
1177 tb_alloc_page(tb, 1, phys_page2);
1178 else
1179 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001180
bellardd4e81642003-05-25 16:46:15 +00001181 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1182 tb->jmp_next[0] = NULL;
1183 tb->jmp_next[1] = NULL;
1184
1185 /* init original jump addresses */
1186 if (tb->tb_next_offset[0] != 0xffff)
1187 tb_reset_jump(tb, 0);
1188 if (tb->tb_next_offset[1] != 0xffff)
1189 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001190
1191#ifdef DEBUG_TB_CHECK
1192 tb_page_check();
1193#endif
pbrookc8a706f2008-06-02 16:16:42 +00001194 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001195}
1196
bellarda513fe12003-05-27 23:29:48 +00001197/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1198 tb[1].tc_ptr. Return NULL if not found */
1199TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1200{
1201 int m_min, m_max, m;
1202 unsigned long v;
1203 TranslationBlock *tb;
1204
1205 if (nb_tbs <= 0)
1206 return NULL;
1207 if (tc_ptr < (unsigned long)code_gen_buffer ||
1208 tc_ptr >= (unsigned long)code_gen_ptr)
1209 return NULL;
1210 /* binary search (cf Knuth) */
1211 m_min = 0;
1212 m_max = nb_tbs - 1;
1213 while (m_min <= m_max) {
1214 m = (m_min + m_max) >> 1;
1215 tb = &tbs[m];
1216 v = (unsigned long)tb->tc_ptr;
1217 if (v == tc_ptr)
1218 return tb;
1219 else if (tc_ptr < v) {
1220 m_max = m - 1;
1221 } else {
1222 m_min = m + 1;
1223 }
ths5fafdf22007-09-16 21:08:06 +00001224 }
bellarda513fe12003-05-27 23:29:48 +00001225 return &tbs[m_max];
1226}
bellard75012672003-06-21 13:11:07 +00001227
bellardea041c02003-06-25 16:16:50 +00001228static void tb_reset_jump_recursive(TranslationBlock *tb);
1229
1230static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1231{
1232 TranslationBlock *tb1, *tb_next, **ptb;
1233 unsigned int n1;
1234
1235 tb1 = tb->jmp_next[n];
1236 if (tb1 != NULL) {
1237 /* find head of list */
1238 for(;;) {
1239 n1 = (long)tb1 & 3;
1240 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1241 if (n1 == 2)
1242 break;
1243 tb1 = tb1->jmp_next[n1];
1244 }
1245 /* we are now sure now that tb jumps to tb1 */
1246 tb_next = tb1;
1247
1248 /* remove tb from the jmp_first list */
1249 ptb = &tb_next->jmp_first;
1250 for(;;) {
1251 tb1 = *ptb;
1252 n1 = (long)tb1 & 3;
1253 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1254 if (n1 == n && tb1 == tb)
1255 break;
1256 ptb = &tb1->jmp_next[n1];
1257 }
1258 *ptb = tb->jmp_next[n];
1259 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001260
bellardea041c02003-06-25 16:16:50 +00001261 /* suppress the jump to next tb in generated code */
1262 tb_reset_jump(tb, n);
1263
bellard01243112004-01-04 15:48:17 +00001264 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001265 tb_reset_jump_recursive(tb_next);
1266 }
1267}
1268
1269static void tb_reset_jump_recursive(TranslationBlock *tb)
1270{
1271 tb_reset_jump_recursive2(tb, 0);
1272 tb_reset_jump_recursive2(tb, 1);
1273}
1274
bellard1fddef42005-04-17 19:16:13 +00001275#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001276#if defined(CONFIG_USER_ONLY)
1277static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1278{
1279 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1280}
1281#else
bellardd720b932004-04-25 17:57:43 +00001282static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1283{
Anthony Liguoric227f092009-10-01 16:12:16 -05001284 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001285 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001286 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001287 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001288
pbrookc2f07f82006-04-08 17:14:56 +00001289 addr = cpu_get_phys_page_debug(env, pc);
1290 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1291 if (!p) {
1292 pd = IO_MEM_UNASSIGNED;
1293 } else {
1294 pd = p->phys_offset;
1295 }
1296 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001297 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001298}
bellardc27004e2005-01-03 23:35:10 +00001299#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001300#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001301
Paul Brookc527ee82010-03-01 03:31:14 +00001302#if defined(CONFIG_USER_ONLY)
1303void cpu_watchpoint_remove_all(CPUState *env, int mask)
1304
1305{
1306}
1307
1308int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1309 int flags, CPUWatchpoint **watchpoint)
1310{
1311 return -ENOSYS;
1312}
1313#else
pbrook6658ffb2007-03-16 23:58:11 +00001314/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001315int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1316 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001317{
aliguorib4051332008-11-18 20:14:20 +00001318 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001319 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001320
aliguorib4051332008-11-18 20:14:20 +00001321 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1322 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1323 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1324 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1325 return -EINVAL;
1326 }
aliguoria1d1bb32008-11-18 20:07:32 +00001327 wp = qemu_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001328
aliguoria1d1bb32008-11-18 20:07:32 +00001329 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001330 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001331 wp->flags = flags;
1332
aliguori2dc9f412008-11-18 20:56:59 +00001333 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001334 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001335 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001336 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001337 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001338
pbrook6658ffb2007-03-16 23:58:11 +00001339 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001340
1341 if (watchpoint)
1342 *watchpoint = wp;
1343 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001344}
1345
aliguoria1d1bb32008-11-18 20:07:32 +00001346/* Remove a specific watchpoint. */
1347int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1348 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001349{
aliguorib4051332008-11-18 20:14:20 +00001350 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001351 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001352
Blue Swirl72cf2d42009-09-12 07:36:22 +00001353 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001354 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001355 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001356 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001357 return 0;
1358 }
1359 }
aliguoria1d1bb32008-11-18 20:07:32 +00001360 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001361}
1362
aliguoria1d1bb32008-11-18 20:07:32 +00001363/* Remove a specific watchpoint by reference. */
1364void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1365{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001366 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001367
aliguoria1d1bb32008-11-18 20:07:32 +00001368 tlb_flush_page(env, watchpoint->vaddr);
1369
1370 qemu_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001371}
1372
aliguoria1d1bb32008-11-18 20:07:32 +00001373/* Remove all matching watchpoints. */
1374void cpu_watchpoint_remove_all(CPUState *env, int mask)
1375{
aliguoric0ce9982008-11-25 22:13:57 +00001376 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001377
Blue Swirl72cf2d42009-09-12 07:36:22 +00001378 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001379 if (wp->flags & mask)
1380 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001381 }
aliguoria1d1bb32008-11-18 20:07:32 +00001382}
Paul Brookc527ee82010-03-01 03:31:14 +00001383#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001384
1385/* Add a breakpoint. */
1386int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1387 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001388{
bellard1fddef42005-04-17 19:16:13 +00001389#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001390 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001391
aliguoria1d1bb32008-11-18 20:07:32 +00001392 bp = qemu_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001393
1394 bp->pc = pc;
1395 bp->flags = flags;
1396
aliguori2dc9f412008-11-18 20:56:59 +00001397 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001398 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001399 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001400 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001401 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001402
1403 breakpoint_invalidate(env, pc);
1404
1405 if (breakpoint)
1406 *breakpoint = bp;
1407 return 0;
1408#else
1409 return -ENOSYS;
1410#endif
1411}
1412
1413/* Remove a specific breakpoint. */
1414int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1415{
1416#if defined(TARGET_HAS_ICE)
1417 CPUBreakpoint *bp;
1418
Blue Swirl72cf2d42009-09-12 07:36:22 +00001419 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001420 if (bp->pc == pc && bp->flags == flags) {
1421 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001422 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001423 }
bellard4c3a88a2003-07-26 12:06:08 +00001424 }
aliguoria1d1bb32008-11-18 20:07:32 +00001425 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001426#else
aliguoria1d1bb32008-11-18 20:07:32 +00001427 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001428#endif
1429}
1430
aliguoria1d1bb32008-11-18 20:07:32 +00001431/* Remove a specific breakpoint by reference. */
1432void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001433{
bellard1fddef42005-04-17 19:16:13 +00001434#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001435 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001436
aliguoria1d1bb32008-11-18 20:07:32 +00001437 breakpoint_invalidate(env, breakpoint->pc);
1438
1439 qemu_free(breakpoint);
1440#endif
1441}
1442
1443/* Remove all matching breakpoints. */
1444void cpu_breakpoint_remove_all(CPUState *env, int mask)
1445{
1446#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001447 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001448
Blue Swirl72cf2d42009-09-12 07:36:22 +00001449 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001450 if (bp->flags & mask)
1451 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001452 }
bellard4c3a88a2003-07-26 12:06:08 +00001453#endif
1454}
1455
bellardc33a3462003-07-29 20:50:33 +00001456/* enable or disable single step mode. EXCP_DEBUG is returned by the
1457 CPU loop after each instruction */
1458void cpu_single_step(CPUState *env, int enabled)
1459{
bellard1fddef42005-04-17 19:16:13 +00001460#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001461 if (env->singlestep_enabled != enabled) {
1462 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001463 if (kvm_enabled())
1464 kvm_update_guest_debug(env, 0);
1465 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001466 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001467 /* XXX: only flush what is necessary */
1468 tb_flush(env);
1469 }
bellardc33a3462003-07-29 20:50:33 +00001470 }
1471#endif
1472}
1473
bellard34865132003-10-05 14:28:56 +00001474/* enable or disable low levels log */
1475void cpu_set_log(int log_flags)
1476{
1477 loglevel = log_flags;
1478 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001479 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001480 if (!logfile) {
1481 perror(logfilename);
1482 _exit(1);
1483 }
bellard9fa3e852004-01-04 18:06:42 +00001484#if !defined(CONFIG_SOFTMMU)
1485 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1486 {
blueswir1b55266b2008-09-20 08:07:15 +00001487 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001488 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1489 }
Filip Navarabf65f532009-07-27 10:02:04 -05001490#elif !defined(_WIN32)
1491 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001492 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001493#endif
pbrooke735b912007-06-30 13:53:24 +00001494 log_append = 1;
1495 }
1496 if (!loglevel && logfile) {
1497 fclose(logfile);
1498 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001499 }
1500}
1501
1502void cpu_set_log_filename(const char *filename)
1503{
1504 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001505 if (logfile) {
1506 fclose(logfile);
1507 logfile = NULL;
1508 }
1509 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001510}
bellardc33a3462003-07-29 20:50:33 +00001511
aurel323098dba2009-03-07 21:28:24 +00001512static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001513{
pbrookd5975362008-06-07 20:50:51 +00001514 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1515 problem and hope the cpu will stop of its own accord. For userspace
1516 emulation this often isn't actually as bad as it sounds. Often
1517 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001518 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001519 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001520
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001521 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001522 tb = env->current_tb;
1523 /* if the cpu is currently executing code, we must unlink it and
1524 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001525 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001526 env->current_tb = NULL;
1527 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001528 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001529 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001530}
1531
1532/* mask must never be zero, except for A20 change call */
1533void cpu_interrupt(CPUState *env, int mask)
1534{
1535 int old_mask;
1536
1537 old_mask = env->interrupt_request;
1538 env->interrupt_request |= mask;
1539
aliguori8edac962009-04-24 18:03:45 +00001540#ifndef CONFIG_USER_ONLY
1541 /*
1542 * If called from iothread context, wake the target cpu in
1543 * case its halted.
1544 */
1545 if (!qemu_cpu_self(env)) {
1546 qemu_cpu_kick(env);
1547 return;
1548 }
1549#endif
1550
pbrook2e70f6e2008-06-29 01:03:05 +00001551 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001552 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001553#ifndef CONFIG_USER_ONLY
pbrook2e70f6e2008-06-29 01:03:05 +00001554 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001555 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001556 cpu_abort(env, "Raised interrupt while not in I/O function");
1557 }
1558#endif
1559 } else {
aurel323098dba2009-03-07 21:28:24 +00001560 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001561 }
1562}
1563
bellardb54ad042004-05-20 13:42:52 +00001564void cpu_reset_interrupt(CPUState *env, int mask)
1565{
1566 env->interrupt_request &= ~mask;
1567}
1568
aurel323098dba2009-03-07 21:28:24 +00001569void cpu_exit(CPUState *env)
1570{
1571 env->exit_request = 1;
1572 cpu_unlink_tb(env);
1573}
1574
blueswir1c7cd6a32008-10-02 18:27:46 +00001575const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001576 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001577 "show generated host assembly code for each compiled TB" },
1578 { CPU_LOG_TB_IN_ASM, "in_asm",
1579 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001580 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001581 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001582 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001583 "show micro ops "
1584#ifdef TARGET_I386
1585 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001586#endif
blueswir1e01a1152008-03-14 17:37:11 +00001587 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001588 { CPU_LOG_INT, "int",
1589 "show interrupts/exceptions in short format" },
1590 { CPU_LOG_EXEC, "exec",
1591 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001592 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001593 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001594#ifdef TARGET_I386
1595 { CPU_LOG_PCALL, "pcall",
1596 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001597 { CPU_LOG_RESET, "cpu_reset",
1598 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001599#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001600#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001601 { CPU_LOG_IOPORT, "ioport",
1602 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001603#endif
bellardf193c792004-03-21 17:06:25 +00001604 { 0, NULL, NULL },
1605};
1606
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001607#ifndef CONFIG_USER_ONLY
1608static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1609 = QLIST_HEAD_INITIALIZER(memory_client_list);
1610
1611static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1612 ram_addr_t size,
1613 ram_addr_t phys_offset)
1614{
1615 CPUPhysMemoryClient *client;
1616 QLIST_FOREACH(client, &memory_client_list, list) {
1617 client->set_memory(client, start_addr, size, phys_offset);
1618 }
1619}
1620
1621static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1622 target_phys_addr_t end)
1623{
1624 CPUPhysMemoryClient *client;
1625 QLIST_FOREACH(client, &memory_client_list, list) {
1626 int r = client->sync_dirty_bitmap(client, start, end);
1627 if (r < 0)
1628 return r;
1629 }
1630 return 0;
1631}
1632
1633static int cpu_notify_migration_log(int enable)
1634{
1635 CPUPhysMemoryClient *client;
1636 QLIST_FOREACH(client, &memory_client_list, list) {
1637 int r = client->migration_log(client, enable);
1638 if (r < 0)
1639 return r;
1640 }
1641 return 0;
1642}
1643
1644static void phys_page_for_each_in_l1_map(PhysPageDesc **phys_map,
1645 CPUPhysMemoryClient *client)
1646{
1647 PhysPageDesc *pd;
1648 int l1, l2;
1649
1650 for (l1 = 0; l1 < L1_SIZE; ++l1) {
1651 pd = phys_map[l1];
1652 if (!pd) {
1653 continue;
1654 }
1655 for (l2 = 0; l2 < L2_SIZE; ++l2) {
1656 if (pd[l2].phys_offset == IO_MEM_UNASSIGNED) {
1657 continue;
1658 }
1659 client->set_memory(client, pd[l2].region_offset,
1660 TARGET_PAGE_SIZE, pd[l2].phys_offset);
1661 }
1662 }
1663}
1664
1665static void phys_page_for_each(CPUPhysMemoryClient *client)
1666{
1667#if TARGET_PHYS_ADDR_SPACE_BITS > 32
1668
1669#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
1670#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
1671#endif
1672 void **phys_map = (void **)l1_phys_map;
1673 int l1;
1674 if (!l1_phys_map) {
1675 return;
1676 }
1677 for (l1 = 0; l1 < L1_SIZE; ++l1) {
1678 if (phys_map[l1]) {
1679 phys_page_for_each_in_l1_map(phys_map[l1], client);
1680 }
1681 }
1682#else
1683 if (!l1_phys_map) {
1684 return;
1685 }
1686 phys_page_for_each_in_l1_map(l1_phys_map, client);
1687#endif
1688}
1689
1690void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1691{
1692 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1693 phys_page_for_each(client);
1694}
1695
1696void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1697{
1698 QLIST_REMOVE(client, list);
1699}
1700#endif
1701
bellardf193c792004-03-21 17:06:25 +00001702static int cmp1(const char *s1, int n, const char *s2)
1703{
1704 if (strlen(s2) != n)
1705 return 0;
1706 return memcmp(s1, s2, n) == 0;
1707}
ths3b46e622007-09-17 08:09:54 +00001708
bellardf193c792004-03-21 17:06:25 +00001709/* takes a comma separated list of log masks. Return 0 if error. */
1710int cpu_str_to_log_mask(const char *str)
1711{
blueswir1c7cd6a32008-10-02 18:27:46 +00001712 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001713 int mask;
1714 const char *p, *p1;
1715
1716 p = str;
1717 mask = 0;
1718 for(;;) {
1719 p1 = strchr(p, ',');
1720 if (!p1)
1721 p1 = p + strlen(p);
bellard8e3a9fd2004-10-09 17:32:58 +00001722 if(cmp1(p,p1-p,"all")) {
1723 for(item = cpu_log_items; item->mask != 0; item++) {
1724 mask |= item->mask;
1725 }
1726 } else {
bellardf193c792004-03-21 17:06:25 +00001727 for(item = cpu_log_items; item->mask != 0; item++) {
1728 if (cmp1(p, p1 - p, item->name))
1729 goto found;
1730 }
1731 return 0;
bellard8e3a9fd2004-10-09 17:32:58 +00001732 }
bellardf193c792004-03-21 17:06:25 +00001733 found:
1734 mask |= item->mask;
1735 if (*p1 != ',')
1736 break;
1737 p = p1 + 1;
1738 }
1739 return mask;
1740}
bellardea041c02003-06-25 16:16:50 +00001741
bellard75012672003-06-21 13:11:07 +00001742void cpu_abort(CPUState *env, const char *fmt, ...)
1743{
1744 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001745 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001746
1747 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001748 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001749 fprintf(stderr, "qemu: fatal: ");
1750 vfprintf(stderr, fmt, ap);
1751 fprintf(stderr, "\n");
1752#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001753 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1754#else
1755 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001756#endif
aliguori93fcfe32009-01-15 22:34:14 +00001757 if (qemu_log_enabled()) {
1758 qemu_log("qemu: fatal: ");
1759 qemu_log_vprintf(fmt, ap2);
1760 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001761#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001762 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001763#else
aliguori93fcfe32009-01-15 22:34:14 +00001764 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001765#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001766 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001767 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001768 }
pbrook493ae1f2007-11-23 16:53:59 +00001769 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001770 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001771#if defined(CONFIG_USER_ONLY)
1772 {
1773 struct sigaction act;
1774 sigfillset(&act.sa_mask);
1775 act.sa_handler = SIG_DFL;
1776 sigaction(SIGABRT, &act, NULL);
1777 }
1778#endif
bellard75012672003-06-21 13:11:07 +00001779 abort();
1780}
1781
thsc5be9f02007-02-28 20:20:53 +00001782CPUState *cpu_copy(CPUState *env)
1783{
ths01ba9812007-12-09 02:22:57 +00001784 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001785 CPUState *next_cpu = new_env->next_cpu;
1786 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001787#if defined(TARGET_HAS_ICE)
1788 CPUBreakpoint *bp;
1789 CPUWatchpoint *wp;
1790#endif
1791
thsc5be9f02007-02-28 20:20:53 +00001792 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001793
1794 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001795 new_env->next_cpu = next_cpu;
1796 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001797
1798 /* Clone all break/watchpoints.
1799 Note: Once we support ptrace with hw-debug register access, make sure
1800 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001801 QTAILQ_INIT(&env->breakpoints);
1802 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001803#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001804 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001805 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1806 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001807 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001808 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1809 wp->flags, NULL);
1810 }
1811#endif
1812
thsc5be9f02007-02-28 20:20:53 +00001813 return new_env;
1814}
1815
bellard01243112004-01-04 15:48:17 +00001816#if !defined(CONFIG_USER_ONLY)
1817
edgar_igl5c751e92008-05-06 08:44:21 +00001818static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1819{
1820 unsigned int i;
1821
1822 /* Discard jump cache entries for any tb which might potentially
1823 overlap the flushed page. */
1824 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1825 memset (&env->tb_jmp_cache[i], 0,
1826 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1827
1828 i = tb_jmp_cache_hash_page(addr);
1829 memset (&env->tb_jmp_cache[i], 0,
1830 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1831}
1832
Igor Kovalenko08738982009-07-12 02:15:40 +04001833static CPUTLBEntry s_cputlb_empty_entry = {
1834 .addr_read = -1,
1835 .addr_write = -1,
1836 .addr_code = -1,
1837 .addend = -1,
1838};
1839
bellardee8b7022004-02-03 23:35:10 +00001840/* NOTE: if flush_global is true, also flush global entries (not
1841 implemented yet) */
1842void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001843{
bellard33417e72003-08-10 21:47:01 +00001844 int i;
bellard01243112004-01-04 15:48:17 +00001845
bellard9fa3e852004-01-04 18:06:42 +00001846#if defined(DEBUG_TLB)
1847 printf("tlb_flush:\n");
1848#endif
bellard01243112004-01-04 15:48:17 +00001849 /* must reset current TB so that interrupts cannot modify the
1850 links while we are modifying them */
1851 env->current_tb = NULL;
1852
bellard33417e72003-08-10 21:47:01 +00001853 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001854 int mmu_idx;
1855 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001856 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001857 }
bellard33417e72003-08-10 21:47:01 +00001858 }
bellard9fa3e852004-01-04 18:06:42 +00001859
bellard8a40a182005-11-20 10:35:40 +00001860 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001861
bellarde3db7222005-01-26 22:00:47 +00001862 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001863}
1864
bellard274da6b2004-05-20 21:56:27 +00001865static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001866{
ths5fafdf22007-09-16 21:08:06 +00001867 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001868 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001869 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001870 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001871 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001872 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001873 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001874 }
bellard61382a52003-10-27 21:22:23 +00001875}
1876
bellard2e126692004-04-25 21:28:44 +00001877void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001878{
bellard8a40a182005-11-20 10:35:40 +00001879 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001880 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001881
bellard9fa3e852004-01-04 18:06:42 +00001882#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001883 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001884#endif
bellard01243112004-01-04 15:48:17 +00001885 /* must reset current TB so that interrupts cannot modify the
1886 links while we are modifying them */
1887 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001888
bellard61382a52003-10-27 21:22:23 +00001889 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001890 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001891 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1892 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001893
edgar_igl5c751e92008-05-06 08:44:21 +00001894 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001895}
1896
bellard9fa3e852004-01-04 18:06:42 +00001897/* update the TLBs so that writes to code in the virtual page 'addr'
1898 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001899static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001900{
ths5fafdf22007-09-16 21:08:06 +00001901 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001902 ram_addr + TARGET_PAGE_SIZE,
1903 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001904}
1905
bellard9fa3e852004-01-04 18:06:42 +00001906/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001907 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001908static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001909 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001910{
bellard3a7d9292005-08-21 09:26:42 +00001911 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
bellard1ccde1c2004-02-06 19:46:14 +00001912}
1913
ths5fafdf22007-09-16 21:08:06 +00001914static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001915 unsigned long start, unsigned long length)
1916{
1917 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00001918 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1919 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001920 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001921 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001922 }
1923 }
1924}
1925
pbrook5579c7f2009-04-11 14:47:08 +00001926/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001927void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001928 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001929{
1930 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001931 unsigned long length, start1;
bellard0a962c02005-02-10 22:00:27 +00001932 int i, mask, len;
1933 uint8_t *p;
bellard1ccde1c2004-02-06 19:46:14 +00001934
1935 start &= TARGET_PAGE_MASK;
1936 end = TARGET_PAGE_ALIGN(end);
1937
1938 length = end - start;
1939 if (length == 0)
1940 return;
bellard0a962c02005-02-10 22:00:27 +00001941 len = length >> TARGET_PAGE_BITS;
bellardf23db162005-08-21 19:12:28 +00001942 mask = ~dirty_flags;
1943 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1944 for(i = 0; i < len; i++)
1945 p[i] &= mask;
1946
bellard1ccde1c2004-02-06 19:46:14 +00001947 /* we modify the TLB cache so that the dirty bit will be set again
1948 when accessing the range */
pbrook5579c7f2009-04-11 14:47:08 +00001949 start1 = (unsigned long)qemu_get_ram_ptr(start);
1950 /* Chek that we don't span multiple blocks - this breaks the
1951 address comparisons below. */
1952 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1953 != (end - 1) - start) {
1954 abort();
1955 }
1956
bellard6a00d602005-11-21 23:25:50 +00001957 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001958 int mmu_idx;
1959 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1960 for(i = 0; i < CPU_TLB_SIZE; i++)
1961 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1962 start1, length);
1963 }
bellard6a00d602005-11-21 23:25:50 +00001964 }
bellard1ccde1c2004-02-06 19:46:14 +00001965}
1966
aliguori74576192008-10-06 14:02:03 +00001967int cpu_physical_memory_set_dirty_tracking(int enable)
1968{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001969 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001970 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001971 ret = cpu_notify_migration_log(!!enable);
1972 return ret;
aliguori74576192008-10-06 14:02:03 +00001973}
1974
1975int cpu_physical_memory_get_dirty_tracking(void)
1976{
1977 return in_migration;
1978}
1979
Anthony Liguoric227f092009-10-01 16:12:16 -05001980int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1981 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00001982{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02001983 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02001984
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001985 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02001986 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00001987}
1988
bellard3a7d9292005-08-21 09:26:42 +00001989static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1990{
Anthony Liguoric227f092009-10-01 16:12:16 -05001991 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001992 void *p;
bellard3a7d9292005-08-21 09:26:42 +00001993
bellard84b7b8e2005-11-28 21:19:04 +00001994 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00001995 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1996 + tlb_entry->addend);
1997 ram_addr = qemu_ram_addr_from_host(p);
bellard3a7d9292005-08-21 09:26:42 +00001998 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00001999 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002000 }
2001 }
2002}
2003
2004/* update the TLB according to the current state of the dirty bits */
2005void cpu_tlb_update_dirty(CPUState *env)
2006{
2007 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002008 int mmu_idx;
2009 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2010 for(i = 0; i < CPU_TLB_SIZE; i++)
2011 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2012 }
bellard3a7d9292005-08-21 09:26:42 +00002013}
2014
pbrook0f459d12008-06-09 00:20:13 +00002015static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002016{
pbrook0f459d12008-06-09 00:20:13 +00002017 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2018 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002019}
2020
pbrook0f459d12008-06-09 00:20:13 +00002021/* update the TLB corresponding to virtual page vaddr
2022 so that it is no longer dirty */
2023static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002024{
bellard1ccde1c2004-02-06 19:46:14 +00002025 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002026 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002027
pbrook0f459d12008-06-09 00:20:13 +00002028 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002029 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002030 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2031 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002032}
2033
bellard59817cc2004-02-16 22:01:13 +00002034/* add a new TLB entry. At most one entry for a given virtual address
2035 is permitted. Return 0 if OK or 2 if the page could not be mapped
2036 (can only happen in non SOFTMMU mode for I/O pages or pages
2037 conflicting with the host address space). */
ths5fafdf22007-09-16 21:08:06 +00002038int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002039 target_phys_addr_t paddr, int prot,
j_mayer6ebbf392007-10-14 07:07:08 +00002040 int mmu_idx, int is_softmmu)
bellard9fa3e852004-01-04 18:06:42 +00002041{
bellard92e873b2004-05-21 14:52:29 +00002042 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002043 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002044 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002045 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002046 target_ulong code_address;
Anthony Liguoric227f092009-10-01 16:12:16 -05002047 target_phys_addr_t addend;
bellard9fa3e852004-01-04 18:06:42 +00002048 int ret;
bellard84b7b8e2005-11-28 21:19:04 +00002049 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002050 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002051 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002052
bellard92e873b2004-05-21 14:52:29 +00002053 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002054 if (!p) {
2055 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002056 } else {
2057 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002058 }
2059#if defined(DEBUG_TLB)
j_mayer6ebbf392007-10-14 07:07:08 +00002060 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2061 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
bellard9fa3e852004-01-04 18:06:42 +00002062#endif
2063
2064 ret = 0;
pbrook0f459d12008-06-09 00:20:13 +00002065 address = vaddr;
2066 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2067 /* IO memory case (romd handled later) */
2068 address |= TLB_MMIO;
2069 }
pbrook5579c7f2009-04-11 14:47:08 +00002070 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002071 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2072 /* Normal RAM. */
2073 iotlb = pd & TARGET_PAGE_MASK;
2074 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2075 iotlb |= IO_MEM_NOTDIRTY;
2076 else
2077 iotlb |= IO_MEM_ROM;
2078 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002079 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002080 It would be nice to pass an offset from the base address
2081 of that region. This would avoid having to special case RAM,
2082 and avoid full address decoding in every device.
2083 We can't use the high bits of pd for this because
2084 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002085 iotlb = (pd & ~TARGET_PAGE_MASK);
2086 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002087 iotlb += p->region_offset;
2088 } else {
2089 iotlb += paddr;
2090 }
pbrook0f459d12008-06-09 00:20:13 +00002091 }
pbrook6658ffb2007-03-16 23:58:11 +00002092
pbrook0f459d12008-06-09 00:20:13 +00002093 code_address = address;
2094 /* Make accesses to pages with watchpoints go via the
2095 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002096 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002097 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
pbrook0f459d12008-06-09 00:20:13 +00002098 iotlb = io_mem_watch + paddr;
2099 /* TODO: The memory case can be optimized by not trapping
2100 reads of pages with a write breakpoint. */
2101 address |= TLB_MMIO;
pbrook6658ffb2007-03-16 23:58:11 +00002102 }
pbrook0f459d12008-06-09 00:20:13 +00002103 }
balrogd79acba2007-06-26 20:01:13 +00002104
pbrook0f459d12008-06-09 00:20:13 +00002105 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2106 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2107 te = &env->tlb_table[mmu_idx][index];
2108 te->addend = addend - vaddr;
2109 if (prot & PAGE_READ) {
2110 te->addr_read = address;
2111 } else {
2112 te->addr_read = -1;
2113 }
edgar_igl5c751e92008-05-06 08:44:21 +00002114
pbrook0f459d12008-06-09 00:20:13 +00002115 if (prot & PAGE_EXEC) {
2116 te->addr_code = code_address;
2117 } else {
2118 te->addr_code = -1;
2119 }
2120 if (prot & PAGE_WRITE) {
2121 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2122 (pd & IO_MEM_ROMD)) {
2123 /* Write access calls the I/O callback. */
2124 te->addr_write = address | TLB_MMIO;
2125 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2126 !cpu_physical_memory_is_dirty(pd)) {
2127 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002128 } else {
pbrook0f459d12008-06-09 00:20:13 +00002129 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002130 }
pbrook0f459d12008-06-09 00:20:13 +00002131 } else {
2132 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002133 }
bellard9fa3e852004-01-04 18:06:42 +00002134 return ret;
2135}
2136
bellard01243112004-01-04 15:48:17 +00002137#else
2138
bellardee8b7022004-02-03 23:35:10 +00002139void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002140{
2141}
2142
bellard2e126692004-04-25 21:28:44 +00002143void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002144{
2145}
2146
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002147/*
2148 * Walks guest process memory "regions" one by one
2149 * and calls callback function 'fn' for each region.
2150 */
2151int walk_memory_regions(void *priv,
2152 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
bellard9fa3e852004-01-04 18:06:42 +00002153{
2154 unsigned long start, end;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002155 PageDesc *p = NULL;
bellard9fa3e852004-01-04 18:06:42 +00002156 int i, j, prot, prot1;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002157 int rc = 0;
bellard9fa3e852004-01-04 18:06:42 +00002158
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002159 start = end = -1;
bellard9fa3e852004-01-04 18:06:42 +00002160 prot = 0;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002161
2162 for (i = 0; i <= L1_SIZE; i++) {
2163 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2164 for (j = 0; j < L2_SIZE; j++) {
2165 prot1 = (p == NULL) ? 0 : p[j].flags;
2166 /*
2167 * "region" is one continuous chunk of memory
2168 * that has same protection flags set.
2169 */
bellard9fa3e852004-01-04 18:06:42 +00002170 if (prot1 != prot) {
2171 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2172 if (start != -1) {
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002173 rc = (*fn)(priv, start, end, prot);
2174 /* callback can stop iteration by returning != 0 */
2175 if (rc != 0)
2176 return (rc);
bellard9fa3e852004-01-04 18:06:42 +00002177 }
2178 if (prot1 != 0)
2179 start = end;
2180 else
2181 start = -1;
2182 prot = prot1;
2183 }
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002184 if (p == NULL)
bellard9fa3e852004-01-04 18:06:42 +00002185 break;
2186 }
bellard33417e72003-08-10 21:47:01 +00002187 }
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002188 return (rc);
2189}
2190
2191static int dump_region(void *priv, unsigned long start,
2192 unsigned long end, unsigned long prot)
2193{
2194 FILE *f = (FILE *)priv;
2195
2196 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2197 start, end, end - start,
2198 ((prot & PAGE_READ) ? 'r' : '-'),
2199 ((prot & PAGE_WRITE) ? 'w' : '-'),
2200 ((prot & PAGE_EXEC) ? 'x' : '-'));
2201
2202 return (0);
2203}
2204
2205/* dump memory mappings */
2206void page_dump(FILE *f)
2207{
2208 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2209 "start", "end", "size", "prot");
2210 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002211}
2212
pbrook53a59602006-03-25 19:31:22 +00002213int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002214{
bellard9fa3e852004-01-04 18:06:42 +00002215 PageDesc *p;
2216
2217 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002218 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002219 return 0;
2220 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002221}
2222
bellard9fa3e852004-01-04 18:06:42 +00002223/* modify the flags of a page and invalidate the code if
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002224 necessary. The flag PAGE_WRITE_ORG is positioned automatically
bellard9fa3e852004-01-04 18:06:42 +00002225 depending on PAGE_WRITE */
pbrook53a59602006-03-25 19:31:22 +00002226void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002227{
2228 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002229 target_ulong addr;
bellard9fa3e852004-01-04 18:06:42 +00002230
pbrookc8a706f2008-06-02 16:16:42 +00002231 /* mmap_lock should already be held. */
bellard9fa3e852004-01-04 18:06:42 +00002232 start = start & TARGET_PAGE_MASK;
2233 end = TARGET_PAGE_ALIGN(end);
2234 if (flags & PAGE_WRITE)
2235 flags |= PAGE_WRITE_ORG;
bellard9fa3e852004-01-04 18:06:42 +00002236 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2237 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
pbrook17e23772008-06-09 13:47:45 +00002238 /* We may be called for host regions that are outside guest
2239 address space. */
2240 if (!p)
2241 return;
bellard9fa3e852004-01-04 18:06:42 +00002242 /* if the write protection is set, then we invalidate the code
2243 inside */
ths5fafdf22007-09-16 21:08:06 +00002244 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002245 (flags & PAGE_WRITE) &&
2246 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002247 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002248 }
2249 p->flags = flags;
2250 }
bellard9fa3e852004-01-04 18:06:42 +00002251}
2252
ths3d97b402007-11-02 19:02:07 +00002253int page_check_range(target_ulong start, target_ulong len, int flags)
2254{
2255 PageDesc *p;
2256 target_ulong end;
2257 target_ulong addr;
2258
balrog55f280c2008-10-28 10:24:11 +00002259 if (start + len < start)
2260 /* we've wrapped around */
2261 return -1;
2262
ths3d97b402007-11-02 19:02:07 +00002263 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2264 start = start & TARGET_PAGE_MASK;
2265
ths3d97b402007-11-02 19:02:07 +00002266 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2267 p = page_find(addr >> TARGET_PAGE_BITS);
2268 if( !p )
2269 return -1;
2270 if( !(p->flags & PAGE_VALID) )
2271 return -1;
2272
bellarddae32702007-11-14 10:51:00 +00002273 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002274 return -1;
bellarddae32702007-11-14 10:51:00 +00002275 if (flags & PAGE_WRITE) {
2276 if (!(p->flags & PAGE_WRITE_ORG))
2277 return -1;
2278 /* unprotect the page if it was put read-only because it
2279 contains translated code */
2280 if (!(p->flags & PAGE_WRITE)) {
2281 if (!page_unprotect(addr, 0, NULL))
2282 return -1;
2283 }
2284 return 0;
2285 }
ths3d97b402007-11-02 19:02:07 +00002286 }
2287 return 0;
2288}
2289
bellard9fa3e852004-01-04 18:06:42 +00002290/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002291 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002292int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002293{
2294 unsigned int page_index, prot, pindex;
2295 PageDesc *p, *p1;
pbrook53a59602006-03-25 19:31:22 +00002296 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002297
pbrookc8a706f2008-06-02 16:16:42 +00002298 /* Technically this isn't safe inside a signal handler. However we
2299 know this only ever happens in a synchronous SEGV handler, so in
2300 practice it seems to be ok. */
2301 mmap_lock();
2302
bellard83fb7ad2004-07-05 21:25:26 +00002303 host_start = address & qemu_host_page_mask;
bellard9fa3e852004-01-04 18:06:42 +00002304 page_index = host_start >> TARGET_PAGE_BITS;
2305 p1 = page_find(page_index);
pbrookc8a706f2008-06-02 16:16:42 +00002306 if (!p1) {
2307 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002308 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002309 }
bellard83fb7ad2004-07-05 21:25:26 +00002310 host_end = host_start + qemu_host_page_size;
bellard9fa3e852004-01-04 18:06:42 +00002311 p = p1;
2312 prot = 0;
2313 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2314 prot |= p->flags;
2315 p++;
2316 }
2317 /* if the page was really writable, then we change its
2318 protection back to writable */
2319 if (prot & PAGE_WRITE_ORG) {
2320 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2321 if (!(p1[pindex].flags & PAGE_WRITE)) {
ths5fafdf22007-09-16 21:08:06 +00002322 mprotect((void *)g2h(host_start), qemu_host_page_size,
bellard9fa3e852004-01-04 18:06:42 +00002323 (prot & PAGE_BITS) | PAGE_WRITE);
2324 p1[pindex].flags |= PAGE_WRITE;
2325 /* and since the content will be modified, we must invalidate
2326 the corresponding translated code. */
bellardd720b932004-04-25 17:57:43 +00002327 tb_invalidate_phys_page(address, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002328#ifdef DEBUG_TB_CHECK
2329 tb_invalidate_check(address);
2330#endif
pbrookc8a706f2008-06-02 16:16:42 +00002331 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002332 return 1;
2333 }
2334 }
pbrookc8a706f2008-06-02 16:16:42 +00002335 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002336 return 0;
2337}
2338
bellard6a00d602005-11-21 23:25:50 +00002339static inline void tlb_set_dirty(CPUState *env,
2340 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002341{
2342}
bellard9fa3e852004-01-04 18:06:42 +00002343#endif /* defined(CONFIG_USER_ONLY) */
2344
pbrooke2eef172008-06-08 01:09:01 +00002345#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002346
Paul Brookc04b2b72010-03-01 03:31:14 +00002347#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2348typedef struct subpage_t {
2349 target_phys_addr_t base;
2350 CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
2351 CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
2352 void *opaque[TARGET_PAGE_SIZE][2][4];
2353 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
2354} subpage_t;
2355
Anthony Liguoric227f092009-10-01 16:12:16 -05002356static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2357 ram_addr_t memory, ram_addr_t region_offset);
2358static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2359 ram_addr_t orig_memory, ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002360#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2361 need_subpage) \
2362 do { \
2363 if (addr > start_addr) \
2364 start_addr2 = 0; \
2365 else { \
2366 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2367 if (start_addr2 > 0) \
2368 need_subpage = 1; \
2369 } \
2370 \
blueswir149e9fba2007-05-30 17:25:06 +00002371 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002372 end_addr2 = TARGET_PAGE_SIZE - 1; \
2373 else { \
2374 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2375 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2376 need_subpage = 1; \
2377 } \
2378 } while (0)
2379
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002380/* register physical memory.
2381 For RAM, 'size' must be a multiple of the target page size.
2382 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002383 io memory page. The address used when calling the IO function is
2384 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002385 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002386 before calculating this offset. This should not be a problem unless
2387 the low bits of start_addr and region_offset differ. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002388void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2389 ram_addr_t size,
2390 ram_addr_t phys_offset,
2391 ram_addr_t region_offset)
bellard33417e72003-08-10 21:47:01 +00002392{
Anthony Liguoric227f092009-10-01 16:12:16 -05002393 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002394 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002395 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002396 ram_addr_t orig_size = size;
blueswir1db7b5422007-05-26 17:36:03 +00002397 void *subpage;
bellard33417e72003-08-10 21:47:01 +00002398
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002399 cpu_notify_set_memory(start_addr, size, phys_offset);
2400
pbrook67c4d232009-02-23 13:16:07 +00002401 if (phys_offset == IO_MEM_UNASSIGNED) {
2402 region_offset = start_addr;
2403 }
pbrook8da3ff12008-12-01 18:59:50 +00002404 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002405 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002406 end_addr = start_addr + (target_phys_addr_t)size;
blueswir149e9fba2007-05-30 17:25:06 +00002407 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
blueswir1db7b5422007-05-26 17:36:03 +00002408 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2409 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002410 ram_addr_t orig_memory = p->phys_offset;
2411 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002412 int need_subpage = 0;
2413
2414 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2415 need_subpage);
blueswir14254fab2008-01-01 16:57:19 +00002416 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
blueswir1db7b5422007-05-26 17:36:03 +00002417 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2418 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002419 &p->phys_offset, orig_memory,
2420 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002421 } else {
2422 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2423 >> IO_MEM_SHIFT];
2424 }
pbrook8da3ff12008-12-01 18:59:50 +00002425 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2426 region_offset);
2427 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002428 } else {
2429 p->phys_offset = phys_offset;
2430 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2431 (phys_offset & IO_MEM_ROMD))
2432 phys_offset += TARGET_PAGE_SIZE;
2433 }
2434 } else {
2435 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2436 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002437 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002438 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002439 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002440 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002441 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002442 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002443 int need_subpage = 0;
2444
2445 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2446 end_addr2, need_subpage);
2447
blueswir14254fab2008-01-01 16:57:19 +00002448 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
blueswir1db7b5422007-05-26 17:36:03 +00002449 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002450 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002451 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002452 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002453 phys_offset, region_offset);
2454 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002455 }
2456 }
2457 }
pbrook8da3ff12008-12-01 18:59:50 +00002458 region_offset += TARGET_PAGE_SIZE;
bellard33417e72003-08-10 21:47:01 +00002459 }
ths3b46e622007-09-17 08:09:54 +00002460
bellard9d420372006-06-25 22:25:22 +00002461 /* since each CPU stores ram addresses in its TLB cache, we must
2462 reset the modified entries */
2463 /* XXX: slow ! */
2464 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2465 tlb_flush(env, 1);
2466 }
bellard33417e72003-08-10 21:47:01 +00002467}
2468
bellardba863452006-09-24 18:41:10 +00002469/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002470ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002471{
2472 PhysPageDesc *p;
2473
2474 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2475 if (!p)
2476 return IO_MEM_UNASSIGNED;
2477 return p->phys_offset;
2478}
2479
Anthony Liguoric227f092009-10-01 16:12:16 -05002480void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002481{
2482 if (kvm_enabled())
2483 kvm_coalesce_mmio_region(addr, size);
2484}
2485
Anthony Liguoric227f092009-10-01 16:12:16 -05002486void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002487{
2488 if (kvm_enabled())
2489 kvm_uncoalesce_mmio_region(addr, size);
2490}
2491
Sheng Yang62a27442010-01-26 19:21:16 +08002492void qemu_flush_coalesced_mmio_buffer(void)
2493{
2494 if (kvm_enabled())
2495 kvm_flush_coalesced_mmio_buffer();
2496}
2497
Marcelo Tosattic9027602010-03-01 20:25:08 -03002498#if defined(__linux__) && !defined(TARGET_S390X)
2499
2500#include <sys/vfs.h>
2501
2502#define HUGETLBFS_MAGIC 0x958458f6
2503
2504static long gethugepagesize(const char *path)
2505{
2506 struct statfs fs;
2507 int ret;
2508
2509 do {
2510 ret = statfs(path, &fs);
2511 } while (ret != 0 && errno == EINTR);
2512
2513 if (ret != 0) {
2514 perror("statfs");
2515 return 0;
2516 }
2517
2518 if (fs.f_type != HUGETLBFS_MAGIC)
2519 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2520
2521 return fs.f_bsize;
2522}
2523
2524static void *file_ram_alloc(ram_addr_t memory, const char *path)
2525{
2526 char *filename;
2527 void *area;
2528 int fd;
2529#ifdef MAP_POPULATE
2530 int flags;
2531#endif
2532 unsigned long hpagesize;
2533
2534 hpagesize = gethugepagesize(path);
2535 if (!hpagesize) {
2536 return NULL;
2537 }
2538
2539 if (memory < hpagesize) {
2540 return NULL;
2541 }
2542
2543 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2544 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2545 return NULL;
2546 }
2547
2548 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2549 return NULL;
2550 }
2551
2552 fd = mkstemp(filename);
2553 if (fd < 0) {
2554 perror("mkstemp");
2555 free(filename);
2556 return NULL;
2557 }
2558 unlink(filename);
2559 free(filename);
2560
2561 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2562
2563 /*
2564 * ftruncate is not supported by hugetlbfs in older
2565 * hosts, so don't bother bailing out on errors.
2566 * If anything goes wrong with it under other filesystems,
2567 * mmap will fail.
2568 */
2569 if (ftruncate(fd, memory))
2570 perror("ftruncate");
2571
2572#ifdef MAP_POPULATE
2573 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2574 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2575 * to sidestep this quirk.
2576 */
2577 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2578 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2579#else
2580 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2581#endif
2582 if (area == MAP_FAILED) {
2583 perror("file_ram_alloc: can't mmap RAM pages");
2584 close(fd);
2585 return (NULL);
2586 }
2587 return area;
2588}
2589#endif
2590
Anthony Liguoric227f092009-10-01 16:12:16 -05002591ram_addr_t qemu_ram_alloc(ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002592{
2593 RAMBlock *new_block;
2594
pbrook94a6b542009-04-11 17:15:54 +00002595 size = TARGET_PAGE_ALIGN(size);
2596 new_block = qemu_malloc(sizeof(*new_block));
2597
Marcelo Tosattic9027602010-03-01 20:25:08 -03002598 if (mem_path) {
2599#if defined (__linux__) && !defined(TARGET_S390X)
2600 new_block->host = file_ram_alloc(size, mem_path);
2601 if (!new_block->host)
2602 exit(1);
Alexander Graf6b024942009-12-05 12:44:25 +01002603#else
Marcelo Tosattic9027602010-03-01 20:25:08 -03002604 fprintf(stderr, "-mem-path option unsupported\n");
2605 exit(1);
2606#endif
2607 } else {
2608#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2609 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2610 new_block->host = mmap((void*)0x1000000, size,
2611 PROT_EXEC|PROT_READ|PROT_WRITE,
2612 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2613#else
2614 new_block->host = qemu_vmalloc(size);
Alexander Graf6b024942009-12-05 12:44:25 +01002615#endif
Izik Eidusccb167e2009-10-08 16:39:39 +02002616#ifdef MADV_MERGEABLE
Marcelo Tosattic9027602010-03-01 20:25:08 -03002617 madvise(new_block->host, size, MADV_MERGEABLE);
Izik Eidusccb167e2009-10-08 16:39:39 +02002618#endif
Marcelo Tosattic9027602010-03-01 20:25:08 -03002619 }
pbrook94a6b542009-04-11 17:15:54 +00002620 new_block->offset = last_ram_offset;
2621 new_block->length = size;
2622
2623 new_block->next = ram_blocks;
2624 ram_blocks = new_block;
2625
2626 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2627 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2628 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2629 0xff, size >> TARGET_PAGE_BITS);
2630
2631 last_ram_offset += size;
2632
Jan Kiszka6f0437e2009-04-26 18:03:40 +02002633 if (kvm_enabled())
2634 kvm_setup_guest_memory(new_block->host, size);
2635
pbrook94a6b542009-04-11 17:15:54 +00002636 return new_block->offset;
2637}
bellarde9a1ab12007-02-08 23:08:38 +00002638
Anthony Liguoric227f092009-10-01 16:12:16 -05002639void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002640{
pbrook94a6b542009-04-11 17:15:54 +00002641 /* TODO: implement this. */
bellarde9a1ab12007-02-08 23:08:38 +00002642}
2643
pbrookdc828ca2009-04-09 22:21:07 +00002644/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002645 With the exception of the softmmu code in this file, this should
2646 only be used for local memory (e.g. video ram) that the device owns,
2647 and knows it isn't going to access beyond the end of the block.
2648
2649 It should not be used for general purpose DMA.
2650 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2651 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002652void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002653{
pbrook94a6b542009-04-11 17:15:54 +00002654 RAMBlock *prev;
2655 RAMBlock **prevp;
2656 RAMBlock *block;
2657
pbrook94a6b542009-04-11 17:15:54 +00002658 prev = NULL;
2659 prevp = &ram_blocks;
2660 block = ram_blocks;
2661 while (block && (block->offset > addr
2662 || block->offset + block->length <= addr)) {
2663 if (prev)
2664 prevp = &prev->next;
2665 prev = block;
2666 block = block->next;
2667 }
2668 if (!block) {
2669 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2670 abort();
2671 }
2672 /* Move this entry to to start of the list. */
2673 if (prev) {
2674 prev->next = block->next;
2675 block->next = *prevp;
2676 *prevp = block;
2677 }
2678 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00002679}
2680
pbrook5579c7f2009-04-11 14:47:08 +00002681/* Some of the softmmu routines need to translate from a host pointer
2682 (typically a TLB entry) back to a ram offset. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002683ram_addr_t qemu_ram_addr_from_host(void *ptr)
pbrook5579c7f2009-04-11 14:47:08 +00002684{
pbrook94a6b542009-04-11 17:15:54 +00002685 RAMBlock *prev;
pbrook94a6b542009-04-11 17:15:54 +00002686 RAMBlock *block;
2687 uint8_t *host = ptr;
2688
pbrook94a6b542009-04-11 17:15:54 +00002689 prev = NULL;
pbrook94a6b542009-04-11 17:15:54 +00002690 block = ram_blocks;
2691 while (block && (block->host > host
2692 || block->host + block->length <= host)) {
pbrook94a6b542009-04-11 17:15:54 +00002693 prev = block;
2694 block = block->next;
2695 }
2696 if (!block) {
2697 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2698 abort();
2699 }
2700 return block->offset + (host - block->host);
pbrook5579c7f2009-04-11 14:47:08 +00002701}
2702
Anthony Liguoric227f092009-10-01 16:12:16 -05002703static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00002704{
pbrook67d3b952006-12-18 05:03:52 +00002705#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002706 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002707#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002708#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002709 do_unassigned_access(addr, 0, 0, 0, 1);
2710#endif
2711 return 0;
2712}
2713
Anthony Liguoric227f092009-10-01 16:12:16 -05002714static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002715{
2716#ifdef DEBUG_UNASSIGNED
2717 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2718#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002719#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002720 do_unassigned_access(addr, 0, 0, 0, 2);
2721#endif
2722 return 0;
2723}
2724
Anthony Liguoric227f092009-10-01 16:12:16 -05002725static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002726{
2727#ifdef DEBUG_UNASSIGNED
2728 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2729#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002730#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002731 do_unassigned_access(addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002732#endif
bellard33417e72003-08-10 21:47:01 +00002733 return 0;
2734}
2735
Anthony Liguoric227f092009-10-01 16:12:16 -05002736static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00002737{
pbrook67d3b952006-12-18 05:03:52 +00002738#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002739 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00002740#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002741#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002742 do_unassigned_access(addr, 1, 0, 0, 1);
2743#endif
2744}
2745
Anthony Liguoric227f092009-10-01 16:12:16 -05002746static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002747{
2748#ifdef DEBUG_UNASSIGNED
2749 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2750#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002751#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002752 do_unassigned_access(addr, 1, 0, 0, 2);
2753#endif
2754}
2755
Anthony Liguoric227f092009-10-01 16:12:16 -05002756static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002757{
2758#ifdef DEBUG_UNASSIGNED
2759 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2760#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002761#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002762 do_unassigned_access(addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002763#endif
bellard33417e72003-08-10 21:47:01 +00002764}
2765
Blue Swirld60efc62009-08-25 18:29:31 +00002766static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00002767 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00002768 unassigned_mem_readw,
2769 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00002770};
2771
Blue Swirld60efc62009-08-25 18:29:31 +00002772static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00002773 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00002774 unassigned_mem_writew,
2775 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00002776};
2777
Anthony Liguoric227f092009-10-01 16:12:16 -05002778static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002779 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002780{
bellard3a7d9292005-08-21 09:26:42 +00002781 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002782 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2783 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2784#if !defined(CONFIG_USER_ONLY)
2785 tb_invalidate_phys_page_fast(ram_addr, 1);
2786 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2787#endif
2788 }
pbrook5579c7f2009-04-11 14:47:08 +00002789 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002790 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2791 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2792 /* we remove the notdirty callback only if the code has been
2793 flushed */
2794 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002795 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002796}
2797
Anthony Liguoric227f092009-10-01 16:12:16 -05002798static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002799 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002800{
bellard3a7d9292005-08-21 09:26:42 +00002801 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002802 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2803 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2804#if !defined(CONFIG_USER_ONLY)
2805 tb_invalidate_phys_page_fast(ram_addr, 2);
2806 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2807#endif
2808 }
pbrook5579c7f2009-04-11 14:47:08 +00002809 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002810 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2811 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2812 /* we remove the notdirty callback only if the code has been
2813 flushed */
2814 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002815 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002816}
2817
Anthony Liguoric227f092009-10-01 16:12:16 -05002818static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002819 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002820{
bellard3a7d9292005-08-21 09:26:42 +00002821 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002822 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2823 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2824#if !defined(CONFIG_USER_ONLY)
2825 tb_invalidate_phys_page_fast(ram_addr, 4);
2826 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2827#endif
2828 }
pbrook5579c7f2009-04-11 14:47:08 +00002829 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002830 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2831 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2832 /* we remove the notdirty callback only if the code has been
2833 flushed */
2834 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002835 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002836}
2837
Blue Swirld60efc62009-08-25 18:29:31 +00002838static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00002839 NULL, /* never used */
2840 NULL, /* never used */
2841 NULL, /* never used */
2842};
2843
Blue Swirld60efc62009-08-25 18:29:31 +00002844static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00002845 notdirty_mem_writeb,
2846 notdirty_mem_writew,
2847 notdirty_mem_writel,
2848};
2849
pbrook0f459d12008-06-09 00:20:13 +00002850/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002851static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002852{
2853 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002854 target_ulong pc, cs_base;
2855 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002856 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002857 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002858 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002859
aliguori06d55cc2008-11-18 20:24:06 +00002860 if (env->watchpoint_hit) {
2861 /* We re-entered the check after replacing the TB. Now raise
2862 * the debug interrupt so that is will trigger after the
2863 * current instruction. */
2864 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2865 return;
2866 }
pbrook2e70f6e2008-06-29 01:03:05 +00002867 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002868 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002869 if ((vaddr == (wp->vaddr & len_mask) ||
2870 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002871 wp->flags |= BP_WATCHPOINT_HIT;
2872 if (!env->watchpoint_hit) {
2873 env->watchpoint_hit = wp;
2874 tb = tb_find_pc(env->mem_io_pc);
2875 if (!tb) {
2876 cpu_abort(env, "check_watchpoint: could not find TB for "
2877 "pc=%p", (void *)env->mem_io_pc);
2878 }
2879 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2880 tb_phys_invalidate(tb, -1);
2881 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2882 env->exception_index = EXCP_DEBUG;
2883 } else {
2884 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2885 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2886 }
2887 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00002888 }
aliguori6e140f22008-11-18 20:37:55 +00002889 } else {
2890 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002891 }
2892 }
2893}
2894
pbrook6658ffb2007-03-16 23:58:11 +00002895/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2896 so these check for a hit then pass through to the normal out-of-line
2897 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002898static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00002899{
aliguorib4051332008-11-18 20:14:20 +00002900 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002901 return ldub_phys(addr);
2902}
2903
Anthony Liguoric227f092009-10-01 16:12:16 -05002904static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00002905{
aliguorib4051332008-11-18 20:14:20 +00002906 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002907 return lduw_phys(addr);
2908}
2909
Anthony Liguoric227f092009-10-01 16:12:16 -05002910static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00002911{
aliguorib4051332008-11-18 20:14:20 +00002912 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002913 return ldl_phys(addr);
2914}
2915
Anthony Liguoric227f092009-10-01 16:12:16 -05002916static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00002917 uint32_t val)
2918{
aliguorib4051332008-11-18 20:14:20 +00002919 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002920 stb_phys(addr, val);
2921}
2922
Anthony Liguoric227f092009-10-01 16:12:16 -05002923static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00002924 uint32_t val)
2925{
aliguorib4051332008-11-18 20:14:20 +00002926 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002927 stw_phys(addr, val);
2928}
2929
Anthony Liguoric227f092009-10-01 16:12:16 -05002930static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00002931 uint32_t val)
2932{
aliguorib4051332008-11-18 20:14:20 +00002933 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002934 stl_phys(addr, val);
2935}
2936
Blue Swirld60efc62009-08-25 18:29:31 +00002937static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00002938 watch_mem_readb,
2939 watch_mem_readw,
2940 watch_mem_readl,
2941};
2942
Blue Swirld60efc62009-08-25 18:29:31 +00002943static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00002944 watch_mem_writeb,
2945 watch_mem_writew,
2946 watch_mem_writel,
2947};
pbrook6658ffb2007-03-16 23:58:11 +00002948
Anthony Liguoric227f092009-10-01 16:12:16 -05002949static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002950 unsigned int len)
2951{
blueswir1db7b5422007-05-26 17:36:03 +00002952 uint32_t ret;
2953 unsigned int idx;
2954
pbrook8da3ff12008-12-01 18:59:50 +00002955 idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00002956#if defined(DEBUG_SUBPAGE)
2957 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2958 mmio, len, addr, idx);
2959#endif
pbrook8da3ff12008-12-01 18:59:50 +00002960 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2961 addr + mmio->region_offset[idx][0][len]);
blueswir1db7b5422007-05-26 17:36:03 +00002962
2963 return ret;
2964}
2965
Anthony Liguoric227f092009-10-01 16:12:16 -05002966static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002967 uint32_t value, unsigned int len)
2968{
blueswir1db7b5422007-05-26 17:36:03 +00002969 unsigned int idx;
2970
pbrook8da3ff12008-12-01 18:59:50 +00002971 idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00002972#if defined(DEBUG_SUBPAGE)
2973 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2974 mmio, len, addr, idx, value);
2975#endif
pbrook8da3ff12008-12-01 18:59:50 +00002976 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2977 addr + mmio->region_offset[idx][1][len],
2978 value);
blueswir1db7b5422007-05-26 17:36:03 +00002979}
2980
Anthony Liguoric227f092009-10-01 16:12:16 -05002981static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00002982{
2983#if defined(DEBUG_SUBPAGE)
2984 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2985#endif
2986
2987 return subpage_readlen(opaque, addr, 0);
2988}
2989
Anthony Liguoric227f092009-10-01 16:12:16 -05002990static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002991 uint32_t value)
2992{
2993#if defined(DEBUG_SUBPAGE)
2994 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2995#endif
2996 subpage_writelen(opaque, addr, value, 0);
2997}
2998
Anthony Liguoric227f092009-10-01 16:12:16 -05002999static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003000{
3001#if defined(DEBUG_SUBPAGE)
3002 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3003#endif
3004
3005 return subpage_readlen(opaque, addr, 1);
3006}
3007
Anthony Liguoric227f092009-10-01 16:12:16 -05003008static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003009 uint32_t value)
3010{
3011#if defined(DEBUG_SUBPAGE)
3012 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3013#endif
3014 subpage_writelen(opaque, addr, value, 1);
3015}
3016
Anthony Liguoric227f092009-10-01 16:12:16 -05003017static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003018{
3019#if defined(DEBUG_SUBPAGE)
3020 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3021#endif
3022
3023 return subpage_readlen(opaque, addr, 2);
3024}
3025
3026static void subpage_writel (void *opaque,
Anthony Liguoric227f092009-10-01 16:12:16 -05003027 target_phys_addr_t addr, uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003028{
3029#if defined(DEBUG_SUBPAGE)
3030 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3031#endif
3032 subpage_writelen(opaque, addr, value, 2);
3033}
3034
Blue Swirld60efc62009-08-25 18:29:31 +00003035static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003036 &subpage_readb,
3037 &subpage_readw,
3038 &subpage_readl,
3039};
3040
Blue Swirld60efc62009-08-25 18:29:31 +00003041static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003042 &subpage_writeb,
3043 &subpage_writew,
3044 &subpage_writel,
3045};
3046
Anthony Liguoric227f092009-10-01 16:12:16 -05003047static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3048 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003049{
3050 int idx, eidx;
blueswir14254fab2008-01-01 16:57:19 +00003051 unsigned int i;
blueswir1db7b5422007-05-26 17:36:03 +00003052
3053 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3054 return -1;
3055 idx = SUBPAGE_IDX(start);
3056 eidx = SUBPAGE_IDX(end);
3057#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003058 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003059 mmio, start, end, idx, eidx, memory);
3060#endif
3061 memory >>= IO_MEM_SHIFT;
3062 for (; idx <= eidx; idx++) {
blueswir14254fab2008-01-01 16:57:19 +00003063 for (i = 0; i < 4; i++) {
blueswir13ee89922008-01-02 19:45:26 +00003064 if (io_mem_read[memory][i]) {
3065 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3066 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
pbrook8da3ff12008-12-01 18:59:50 +00003067 mmio->region_offset[idx][0][i] = region_offset;
blueswir13ee89922008-01-02 19:45:26 +00003068 }
3069 if (io_mem_write[memory][i]) {
3070 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3071 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
pbrook8da3ff12008-12-01 18:59:50 +00003072 mmio->region_offset[idx][1][i] = region_offset;
blueswir13ee89922008-01-02 19:45:26 +00003073 }
blueswir14254fab2008-01-01 16:57:19 +00003074 }
blueswir1db7b5422007-05-26 17:36:03 +00003075 }
3076
3077 return 0;
3078}
3079
Anthony Liguoric227f092009-10-01 16:12:16 -05003080static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3081 ram_addr_t orig_memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003082{
Anthony Liguoric227f092009-10-01 16:12:16 -05003083 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003084 int subpage_memory;
3085
Anthony Liguoric227f092009-10-01 16:12:16 -05003086 mmio = qemu_mallocz(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003087
3088 mmio->base = base;
Avi Kivity1eed09c2009-06-14 11:38:51 +03003089 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
blueswir1db7b5422007-05-26 17:36:03 +00003090#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003091 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3092 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003093#endif
aliguori1eec6142009-02-05 22:06:18 +00003094 *phys = subpage_memory | IO_MEM_SUBPAGE;
3095 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
pbrook8da3ff12008-12-01 18:59:50 +00003096 region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003097
3098 return mmio;
3099}
3100
aliguori88715652009-02-11 15:20:58 +00003101static int get_free_io_mem_idx(void)
3102{
3103 int i;
3104
3105 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3106 if (!io_mem_used[i]) {
3107 io_mem_used[i] = 1;
3108 return i;
3109 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003110 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003111 return -1;
3112}
3113
bellard33417e72003-08-10 21:47:01 +00003114/* mem_read and mem_write are arrays of functions containing the
3115 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003116 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003117 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003118 modified. If it is zero, a new io zone is allocated. The return
3119 value can be used with cpu_register_physical_memory(). (-1) is
3120 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003121static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003122 CPUReadMemoryFunc * const *mem_read,
3123 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03003124 void *opaque)
bellard33417e72003-08-10 21:47:01 +00003125{
blueswir14254fab2008-01-01 16:57:19 +00003126 int i, subwidth = 0;
bellard33417e72003-08-10 21:47:01 +00003127
3128 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003129 io_index = get_free_io_mem_idx();
3130 if (io_index == -1)
3131 return io_index;
bellard33417e72003-08-10 21:47:01 +00003132 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003133 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003134 if (io_index >= IO_MEM_NB_ENTRIES)
3135 return -1;
3136 }
bellardb5ff1b32005-11-26 10:38:39 +00003137
bellard33417e72003-08-10 21:47:01 +00003138 for(i = 0;i < 3; i++) {
blueswir14254fab2008-01-01 16:57:19 +00003139 if (!mem_read[i] || !mem_write[i])
3140 subwidth = IO_MEM_SUBWIDTH;
bellard33417e72003-08-10 21:47:01 +00003141 io_mem_read[io_index][i] = mem_read[i];
3142 io_mem_write[io_index][i] = mem_write[i];
3143 }
bellarda4193c82004-06-03 14:01:43 +00003144 io_mem_opaque[io_index] = opaque;
blueswir14254fab2008-01-01 16:57:19 +00003145 return (io_index << IO_MEM_SHIFT) | subwidth;
bellard33417e72003-08-10 21:47:01 +00003146}
bellard61382a52003-10-27 21:22:23 +00003147
Blue Swirld60efc62009-08-25 18:29:31 +00003148int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3149 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03003150 void *opaque)
3151{
3152 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3153}
3154
aliguori88715652009-02-11 15:20:58 +00003155void cpu_unregister_io_memory(int io_table_address)
3156{
3157 int i;
3158 int io_index = io_table_address >> IO_MEM_SHIFT;
3159
3160 for (i=0;i < 3; i++) {
3161 io_mem_read[io_index][i] = unassigned_mem_read[i];
3162 io_mem_write[io_index][i] = unassigned_mem_write[i];
3163 }
3164 io_mem_opaque[io_index] = NULL;
3165 io_mem_used[io_index] = 0;
3166}
3167
Avi Kivitye9179ce2009-06-14 11:38:52 +03003168static void io_mem_init(void)
3169{
3170 int i;
3171
3172 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3173 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3174 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3175 for (i=0; i<5; i++)
3176 io_mem_used[i] = 1;
3177
3178 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3179 watch_mem_write, NULL);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003180}
3181
pbrooke2eef172008-06-08 01:09:01 +00003182#endif /* !defined(CONFIG_USER_ONLY) */
3183
bellard13eb76e2004-01-24 15:23:36 +00003184/* physical memory access (slow version, mainly for debug) */
3185#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003186int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3187 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003188{
3189 int l, flags;
3190 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003191 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003192
3193 while (len > 0) {
3194 page = addr & TARGET_PAGE_MASK;
3195 l = (page + TARGET_PAGE_SIZE) - addr;
3196 if (l > len)
3197 l = len;
3198 flags = page_get_flags(page);
3199 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003200 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003201 if (is_write) {
3202 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003203 return -1;
bellard579a97f2007-11-11 14:26:47 +00003204 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003205 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003206 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003207 memcpy(p, buf, l);
3208 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003209 } else {
3210 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003211 return -1;
bellard579a97f2007-11-11 14:26:47 +00003212 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003213 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003214 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003215 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003216 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003217 }
3218 len -= l;
3219 buf += l;
3220 addr += l;
3221 }
Paul Brooka68fe892010-03-01 00:08:59 +00003222 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003223}
bellard8df1cd02005-01-28 22:37:22 +00003224
bellard13eb76e2004-01-24 15:23:36 +00003225#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003226void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003227 int len, int is_write)
3228{
3229 int l, io_index;
3230 uint8_t *ptr;
3231 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003232 target_phys_addr_t page;
bellard2e126692004-04-25 21:28:44 +00003233 unsigned long pd;
bellard92e873b2004-05-21 14:52:29 +00003234 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003235
bellard13eb76e2004-01-24 15:23:36 +00003236 while (len > 0) {
3237 page = addr & TARGET_PAGE_MASK;
3238 l = (page + TARGET_PAGE_SIZE) - addr;
3239 if (l > len)
3240 l = len;
bellard92e873b2004-05-21 14:52:29 +00003241 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003242 if (!p) {
3243 pd = IO_MEM_UNASSIGNED;
3244 } else {
3245 pd = p->phys_offset;
3246 }
ths3b46e622007-09-17 08:09:54 +00003247
bellard13eb76e2004-01-24 15:23:36 +00003248 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003249 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003250 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003251 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003252 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003253 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003254 /* XXX: could force cpu_single_env to NULL to avoid
3255 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003256 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003257 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003258 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003259 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003260 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003261 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003262 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003263 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003264 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003265 l = 2;
3266 } else {
bellard1c213d12005-09-03 10:49:04 +00003267 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003268 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003269 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003270 l = 1;
3271 }
3272 } else {
bellardb448f2f2004-02-25 23:24:04 +00003273 unsigned long addr1;
3274 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003275 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003276 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003277 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003278 if (!cpu_physical_memory_is_dirty(addr1)) {
3279 /* invalidate code */
3280 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3281 /* set dirty bit */
ths5fafdf22007-09-16 21:08:06 +00003282 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
bellardf23db162005-08-21 19:12:28 +00003283 (0xff & ~CODE_DIRTY_FLAG);
bellard3a7d9292005-08-21 09:26:42 +00003284 }
bellard13eb76e2004-01-24 15:23:36 +00003285 }
3286 } else {
ths5fafdf22007-09-16 21:08:06 +00003287 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003288 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003289 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003290 /* I/O case */
3291 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003292 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003293 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3294 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003295 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003296 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003297 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003298 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003299 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003300 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003301 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003302 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003303 l = 2;
3304 } else {
bellard1c213d12005-09-03 10:49:04 +00003305 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003306 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003307 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003308 l = 1;
3309 }
3310 } else {
3311 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003312 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard13eb76e2004-01-24 15:23:36 +00003313 (addr & ~TARGET_PAGE_MASK);
3314 memcpy(buf, ptr, l);
3315 }
3316 }
3317 len -= l;
3318 buf += l;
3319 addr += l;
3320 }
3321}
bellard8df1cd02005-01-28 22:37:22 +00003322
bellardd0ecd2a2006-04-23 17:14:48 +00003323/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003324void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003325 const uint8_t *buf, int len)
3326{
3327 int l;
3328 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003329 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003330 unsigned long pd;
3331 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003332
bellardd0ecd2a2006-04-23 17:14:48 +00003333 while (len > 0) {
3334 page = addr & TARGET_PAGE_MASK;
3335 l = (page + TARGET_PAGE_SIZE) - addr;
3336 if (l > len)
3337 l = len;
3338 p = phys_page_find(page >> TARGET_PAGE_BITS);
3339 if (!p) {
3340 pd = IO_MEM_UNASSIGNED;
3341 } else {
3342 pd = p->phys_offset;
3343 }
ths3b46e622007-09-17 08:09:54 +00003344
bellardd0ecd2a2006-04-23 17:14:48 +00003345 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00003346 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3347 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003348 /* do nothing */
3349 } else {
3350 unsigned long addr1;
3351 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3352 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003353 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003354 memcpy(ptr, buf, l);
3355 }
3356 len -= l;
3357 buf += l;
3358 addr += l;
3359 }
3360}
3361
aliguori6d16c2f2009-01-22 16:59:11 +00003362typedef struct {
3363 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003364 target_phys_addr_t addr;
3365 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003366} BounceBuffer;
3367
3368static BounceBuffer bounce;
3369
aliguoriba223c22009-01-22 16:59:16 +00003370typedef struct MapClient {
3371 void *opaque;
3372 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003373 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003374} MapClient;
3375
Blue Swirl72cf2d42009-09-12 07:36:22 +00003376static QLIST_HEAD(map_client_list, MapClient) map_client_list
3377 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003378
3379void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3380{
3381 MapClient *client = qemu_malloc(sizeof(*client));
3382
3383 client->opaque = opaque;
3384 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003385 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003386 return client;
3387}
3388
3389void cpu_unregister_map_client(void *_client)
3390{
3391 MapClient *client = (MapClient *)_client;
3392
Blue Swirl72cf2d42009-09-12 07:36:22 +00003393 QLIST_REMOVE(client, link);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003394 qemu_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003395}
3396
3397static void cpu_notify_map_clients(void)
3398{
3399 MapClient *client;
3400
Blue Swirl72cf2d42009-09-12 07:36:22 +00003401 while (!QLIST_EMPTY(&map_client_list)) {
3402 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003403 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003404 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003405 }
3406}
3407
aliguori6d16c2f2009-01-22 16:59:11 +00003408/* Map a physical memory region into a host virtual address.
3409 * May map a subset of the requested range, given by and returned in *plen.
3410 * May return NULL if resources needed to perform the mapping are exhausted.
3411 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003412 * Use cpu_register_map_client() to know when retrying the map operation is
3413 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003414 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003415void *cpu_physical_memory_map(target_phys_addr_t addr,
3416 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003417 int is_write)
3418{
Anthony Liguoric227f092009-10-01 16:12:16 -05003419 target_phys_addr_t len = *plen;
3420 target_phys_addr_t done = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003421 int l;
3422 uint8_t *ret = NULL;
3423 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003424 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003425 unsigned long pd;
3426 PhysPageDesc *p;
3427 unsigned long addr1;
3428
3429 while (len > 0) {
3430 page = addr & TARGET_PAGE_MASK;
3431 l = (page + TARGET_PAGE_SIZE) - addr;
3432 if (l > len)
3433 l = len;
3434 p = phys_page_find(page >> TARGET_PAGE_BITS);
3435 if (!p) {
3436 pd = IO_MEM_UNASSIGNED;
3437 } else {
3438 pd = p->phys_offset;
3439 }
3440
3441 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3442 if (done || bounce.buffer) {
3443 break;
3444 }
3445 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3446 bounce.addr = addr;
3447 bounce.len = l;
3448 if (!is_write) {
3449 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3450 }
3451 ptr = bounce.buffer;
3452 } else {
3453 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003454 ptr = qemu_get_ram_ptr(addr1);
aliguori6d16c2f2009-01-22 16:59:11 +00003455 }
3456 if (!done) {
3457 ret = ptr;
3458 } else if (ret + done != ptr) {
3459 break;
3460 }
3461
3462 len -= l;
3463 addr += l;
3464 done += l;
3465 }
3466 *plen = done;
3467 return ret;
3468}
3469
3470/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3471 * Will also mark the memory as dirty if is_write == 1. access_len gives
3472 * the amount of memory that was actually read or written by the caller.
3473 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003474void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3475 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003476{
3477 if (buffer != bounce.buffer) {
3478 if (is_write) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003479 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003480 while (access_len) {
3481 unsigned l;
3482 l = TARGET_PAGE_SIZE;
3483 if (l > access_len)
3484 l = access_len;
3485 if (!cpu_physical_memory_is_dirty(addr1)) {
3486 /* invalidate code */
3487 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3488 /* set dirty bit */
3489 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3490 (0xff & ~CODE_DIRTY_FLAG);
3491 }
3492 addr1 += l;
3493 access_len -= l;
3494 }
3495 }
3496 return;
3497 }
3498 if (is_write) {
3499 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3500 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003501 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003502 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003503 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003504}
bellardd0ecd2a2006-04-23 17:14:48 +00003505
bellard8df1cd02005-01-28 22:37:22 +00003506/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003507uint32_t ldl_phys(target_phys_addr_t addr)
bellard8df1cd02005-01-28 22:37:22 +00003508{
3509 int io_index;
3510 uint8_t *ptr;
3511 uint32_t val;
3512 unsigned long pd;
3513 PhysPageDesc *p;
3514
3515 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3516 if (!p) {
3517 pd = IO_MEM_UNASSIGNED;
3518 } else {
3519 pd = p->phys_offset;
3520 }
ths3b46e622007-09-17 08:09:54 +00003521
ths5fafdf22007-09-16 21:08:06 +00003522 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003523 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00003524 /* I/O case */
3525 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003526 if (p)
3527 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003528 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3529 } else {
3530 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003531 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00003532 (addr & ~TARGET_PAGE_MASK);
3533 val = ldl_p(ptr);
3534 }
3535 return val;
3536}
3537
bellard84b7b8e2005-11-28 21:19:04 +00003538/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003539uint64_t ldq_phys(target_phys_addr_t addr)
bellard84b7b8e2005-11-28 21:19:04 +00003540{
3541 int io_index;
3542 uint8_t *ptr;
3543 uint64_t val;
3544 unsigned long pd;
3545 PhysPageDesc *p;
3546
3547 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3548 if (!p) {
3549 pd = IO_MEM_UNASSIGNED;
3550 } else {
3551 pd = p->phys_offset;
3552 }
ths3b46e622007-09-17 08:09:54 +00003553
bellard2a4188a2006-06-25 21:54:59 +00003554 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3555 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00003556 /* I/O case */
3557 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003558 if (p)
3559 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard84b7b8e2005-11-28 21:19:04 +00003560#ifdef TARGET_WORDS_BIGENDIAN
3561 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3562 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3563#else
3564 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3565 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3566#endif
3567 } else {
3568 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003569 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00003570 (addr & ~TARGET_PAGE_MASK);
3571 val = ldq_p(ptr);
3572 }
3573 return val;
3574}
3575
bellardaab33092005-10-30 20:48:42 +00003576/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003577uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003578{
3579 uint8_t val;
3580 cpu_physical_memory_read(addr, &val, 1);
3581 return val;
3582}
3583
3584/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003585uint32_t lduw_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003586{
3587 uint16_t val;
3588 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3589 return tswap16(val);
3590}
3591
bellard8df1cd02005-01-28 22:37:22 +00003592/* warning: addr must be aligned. The ram page is not masked as dirty
3593 and the code inside is not invalidated. It is useful if the dirty
3594 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003595void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003596{
3597 int io_index;
3598 uint8_t *ptr;
3599 unsigned long pd;
3600 PhysPageDesc *p;
3601
3602 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3603 if (!p) {
3604 pd = IO_MEM_UNASSIGNED;
3605 } else {
3606 pd = p->phys_offset;
3607 }
ths3b46e622007-09-17 08:09:54 +00003608
bellard3a7d9292005-08-21 09:26:42 +00003609 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003610 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003611 if (p)
3612 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003613 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3614 } else {
aliguori74576192008-10-06 14:02:03 +00003615 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003616 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003617 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003618
3619 if (unlikely(in_migration)) {
3620 if (!cpu_physical_memory_is_dirty(addr1)) {
3621 /* invalidate code */
3622 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3623 /* set dirty bit */
3624 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3625 (0xff & ~CODE_DIRTY_FLAG);
3626 }
3627 }
bellard8df1cd02005-01-28 22:37:22 +00003628 }
3629}
3630
Anthony Liguoric227f092009-10-01 16:12:16 -05003631void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003632{
3633 int io_index;
3634 uint8_t *ptr;
3635 unsigned long pd;
3636 PhysPageDesc *p;
3637
3638 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3639 if (!p) {
3640 pd = IO_MEM_UNASSIGNED;
3641 } else {
3642 pd = p->phys_offset;
3643 }
ths3b46e622007-09-17 08:09:54 +00003644
j_mayerbc98a7e2007-04-04 07:55:12 +00003645 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3646 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003647 if (p)
3648 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00003649#ifdef TARGET_WORDS_BIGENDIAN
3650 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3651 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3652#else
3653 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3654 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3655#endif
3656 } else {
pbrook5579c7f2009-04-11 14:47:08 +00003657 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00003658 (addr & ~TARGET_PAGE_MASK);
3659 stq_p(ptr, val);
3660 }
3661}
3662
bellard8df1cd02005-01-28 22:37:22 +00003663/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003664void stl_phys(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003665{
3666 int io_index;
3667 uint8_t *ptr;
3668 unsigned long pd;
3669 PhysPageDesc *p;
3670
3671 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3672 if (!p) {
3673 pd = IO_MEM_UNASSIGNED;
3674 } else {
3675 pd = p->phys_offset;
3676 }
ths3b46e622007-09-17 08:09:54 +00003677
bellard3a7d9292005-08-21 09:26:42 +00003678 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003679 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003680 if (p)
3681 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003682 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3683 } else {
3684 unsigned long addr1;
3685 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3686 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003687 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003688 stl_p(ptr, val);
bellard3a7d9292005-08-21 09:26:42 +00003689 if (!cpu_physical_memory_is_dirty(addr1)) {
3690 /* invalidate code */
3691 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3692 /* set dirty bit */
bellardf23db162005-08-21 19:12:28 +00003693 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3694 (0xff & ~CODE_DIRTY_FLAG);
bellard3a7d9292005-08-21 09:26:42 +00003695 }
bellard8df1cd02005-01-28 22:37:22 +00003696 }
3697}
3698
bellardaab33092005-10-30 20:48:42 +00003699/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003700void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003701{
3702 uint8_t v = val;
3703 cpu_physical_memory_write(addr, &v, 1);
3704}
3705
3706/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003707void stw_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003708{
3709 uint16_t v = tswap16(val);
3710 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3711}
3712
3713/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003714void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003715{
3716 val = tswap64(val);
3717 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3718}
3719
aliguori5e2972f2009-03-28 17:51:36 +00003720/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00003721int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003722 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003723{
3724 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003725 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003726 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003727
3728 while (len > 0) {
3729 page = addr & TARGET_PAGE_MASK;
3730 phys_addr = cpu_get_phys_page_debug(env, page);
3731 /* if no physical page mapped, return an error */
3732 if (phys_addr == -1)
3733 return -1;
3734 l = (page + TARGET_PAGE_SIZE) - addr;
3735 if (l > len)
3736 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003737 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00003738 if (is_write)
3739 cpu_physical_memory_write_rom(phys_addr, buf, l);
3740 else
aliguori5e2972f2009-03-28 17:51:36 +00003741 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00003742 len -= l;
3743 buf += l;
3744 addr += l;
3745 }
3746 return 0;
3747}
Paul Brooka68fe892010-03-01 00:08:59 +00003748#endif
bellard13eb76e2004-01-24 15:23:36 +00003749
pbrook2e70f6e2008-06-29 01:03:05 +00003750/* in deterministic execution mode, instructions doing device I/Os
3751 must be at the end of the TB */
3752void cpu_io_recompile(CPUState *env, void *retaddr)
3753{
3754 TranslationBlock *tb;
3755 uint32_t n, cflags;
3756 target_ulong pc, cs_base;
3757 uint64_t flags;
3758
3759 tb = tb_find_pc((unsigned long)retaddr);
3760 if (!tb) {
3761 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3762 retaddr);
3763 }
3764 n = env->icount_decr.u16.low + tb->icount;
3765 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3766 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00003767 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00003768 n = n - env->icount_decr.u16.low;
3769 /* Generate a new TB ending on the I/O insn. */
3770 n++;
3771 /* On MIPS and SH, delay slot instructions can only be restarted if
3772 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00003773 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00003774 branch. */
3775#if defined(TARGET_MIPS)
3776 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3777 env->active_tc.PC -= 4;
3778 env->icount_decr.u16.low++;
3779 env->hflags &= ~MIPS_HFLAG_BMASK;
3780 }
3781#elif defined(TARGET_SH4)
3782 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3783 && n > 1) {
3784 env->pc -= 2;
3785 env->icount_decr.u16.low++;
3786 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3787 }
3788#endif
3789 /* This should never happen. */
3790 if (n > CF_COUNT_MASK)
3791 cpu_abort(env, "TB too big during recompile");
3792
3793 cflags = n | CF_LAST_IO;
3794 pc = tb->pc;
3795 cs_base = tb->cs_base;
3796 flags = tb->flags;
3797 tb_phys_invalidate(tb, -1);
3798 /* FIXME: In theory this could raise an exception. In practice
3799 we have already translated the block once so it's probably ok. */
3800 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00003801 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00003802 the first in the TB) then we end up generating a whole new TB and
3803 repeating the fault, which is horribly inefficient.
3804 Better would be to execute just this insn uncached, or generate a
3805 second new TB. */
3806 cpu_resume_from_signal(env, NULL);
3807}
3808
bellarde3db7222005-01-26 22:00:47 +00003809void dump_exec_info(FILE *f,
3810 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3811{
3812 int i, target_code_size, max_target_code_size;
3813 int direct_jmp_count, direct_jmp2_count, cross_page;
3814 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00003815
bellarde3db7222005-01-26 22:00:47 +00003816 target_code_size = 0;
3817 max_target_code_size = 0;
3818 cross_page = 0;
3819 direct_jmp_count = 0;
3820 direct_jmp2_count = 0;
3821 for(i = 0; i < nb_tbs; i++) {
3822 tb = &tbs[i];
3823 target_code_size += tb->size;
3824 if (tb->size > max_target_code_size)
3825 max_target_code_size = tb->size;
3826 if (tb->page_addr[1] != -1)
3827 cross_page++;
3828 if (tb->tb_next_offset[0] != 0xffff) {
3829 direct_jmp_count++;
3830 if (tb->tb_next_offset[1] != 0xffff) {
3831 direct_jmp2_count++;
3832 }
3833 }
3834 }
3835 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00003836 cpu_fprintf(f, "Translation buffer state:\n");
bellard26a5f132008-05-28 12:30:31 +00003837 cpu_fprintf(f, "gen code size %ld/%ld\n",
3838 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3839 cpu_fprintf(f, "TB count %d/%d\n",
3840 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00003841 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00003842 nb_tbs ? target_code_size / nb_tbs : 0,
3843 max_target_code_size);
ths5fafdf22007-09-16 21:08:06 +00003844 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00003845 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3846 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00003847 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3848 cross_page,
bellarde3db7222005-01-26 22:00:47 +00003849 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3850 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00003851 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00003852 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3853 direct_jmp2_count,
3854 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00003855 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00003856 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3857 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3858 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00003859 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00003860}
3861
ths5fafdf22007-09-16 21:08:06 +00003862#if !defined(CONFIG_USER_ONLY)
bellard61382a52003-10-27 21:22:23 +00003863
3864#define MMUSUFFIX _cmmu
3865#define GETPC() NULL
3866#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00003867#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00003868
3869#define SHIFT 0
3870#include "softmmu_template.h"
3871
3872#define SHIFT 1
3873#include "softmmu_template.h"
3874
3875#define SHIFT 2
3876#include "softmmu_template.h"
3877
3878#define SHIFT 3
3879#include "softmmu_template.h"
3880
3881#undef env
3882
3883#endif