blob: 65c89b08b7f88fe82027f5d85c34b5648503c01a [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
bellard6180a182003-09-30 21:04:53 +000034#include "cpu.h"
35#include "exec-all.h"
aurel32ca10f862008-04-11 21:35:42 +000036#include "qemu-common.h"
bellardb67d9a52008-05-23 09:57:34 +000037#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000038#include "hw/hw.h"
aliguori74576192008-10-06 14:02:03 +000039#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000040#include "kvm.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
43#endif
bellard54936002003-05-13 00:25:15 +000044
bellardfd6ce8f2003-05-14 19:00:11 +000045//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000046//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000047//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000048//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000049
50/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000051//#define DEBUG_TB_CHECK
52//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000053
ths1196be32007-03-17 15:17:58 +000054//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000055//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000056
pbrook99773bd2006-04-16 15:14:59 +000057#if !defined(CONFIG_USER_ONLY)
58/* TB consistency checks only implemented for usermode emulation. */
59#undef DEBUG_TB_CHECK
60#endif
61
bellard9fa3e852004-01-04 18:06:42 +000062#define SMC_BITMAP_USE_THRESHOLD 10
63
bellard108c49b2005-07-24 12:55:09 +000064#if defined(TARGET_SPARC64)
65#define TARGET_PHYS_ADDR_SPACE_BITS 41
blueswir15dcb6b92007-05-19 12:58:30 +000066#elif defined(TARGET_SPARC)
67#define TARGET_PHYS_ADDR_SPACE_BITS 36
j_mayerbedb69e2007-04-05 20:08:21 +000068#elif defined(TARGET_ALPHA)
69#define TARGET_PHYS_ADDR_SPACE_BITS 42
70#define TARGET_VIRT_ADDR_SPACE_BITS 42
bellard108c49b2005-07-24 12:55:09 +000071#elif defined(TARGET_PPC64)
72#define TARGET_PHYS_ADDR_SPACE_BITS 42
blueswir1640f42e2009-04-19 10:18:01 +000073#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
aurel3200f82b82008-04-27 21:12:55 +000074#define TARGET_PHYS_ADDR_SPACE_BITS 42
blueswir1640f42e2009-04-19 10:18:01 +000075#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
aurel3200f82b82008-04-27 21:12:55 +000076#define TARGET_PHYS_ADDR_SPACE_BITS 36
bellard108c49b2005-07-24 12:55:09 +000077#else
78/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
79#define TARGET_PHYS_ADDR_SPACE_BITS 32
80#endif
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
bellard26a5f132008-05-28 12:30:31 +000083int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
bellardfd6ce8f2003-05-14 19:00:11 +0000110uint8_t *code_gen_ptr;
111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
bellard1ccde1c2004-02-06 19:46:14 +0000114uint8_t *phys_ram_dirty;
aliguori74576192008-10-06 14:02:03 +0000115static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000116
117typedef struct RAMBlock {
118 uint8_t *host;
119 ram_addr_t offset;
120 ram_addr_t length;
121 struct RAMBlock *next;
122} RAMBlock;
123
124static RAMBlock *ram_blocks;
125/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100126 then we can no longer assume contiguous ram offsets, and external uses
pbrook94a6b542009-04-11 17:15:54 +0000127 of this variable will break. */
128ram_addr_t last_ram_offset;
pbrooke2eef172008-06-08 01:09:01 +0000129#endif
bellard9fa3e852004-01-04 18:06:42 +0000130
bellard6a00d602005-11-21 23:25:50 +0000131CPUState *first_cpu;
132/* current CPU in the current thread. It is only valid inside
133 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000134CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000135/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000136 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000137 2 = Adaptive rate instruction counting. */
138int use_icount = 0;
139/* Current instruction counter. While executing translated code this may
140 include some instructions that have not yet been executed. */
141int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000142
bellard54936002003-05-13 00:25:15 +0000143typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000144 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000145 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000146 /* in order to optimize self modifying code, we count the number
147 of lookups we do to a given page to use a bitmap */
148 unsigned int code_write_count;
149 uint8_t *code_bitmap;
150#if defined(CONFIG_USER_ONLY)
151 unsigned long flags;
152#endif
bellard54936002003-05-13 00:25:15 +0000153} PageDesc;
154
bellard92e873b2004-05-21 14:52:29 +0000155typedef struct PhysPageDesc {
pbrook0f459d12008-06-09 00:20:13 +0000156 /* offset in host memory of the page + io_index in the low bits */
aurel3200f82b82008-04-27 21:12:55 +0000157 ram_addr_t phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +0000158 ram_addr_t region_offset;
bellard92e873b2004-05-21 14:52:29 +0000159} PhysPageDesc;
160
bellard54936002003-05-13 00:25:15 +0000161#define L2_BITS 10
j_mayerbedb69e2007-04-05 20:08:21 +0000162#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163/* XXX: this is a temporary hack for alpha target.
164 * In the future, this is to be replaced by a multi-level table
165 * to actually be able to handle the complete 64 bits address space.
166 */
167#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168#else
aurel3203875442008-04-22 20:45:18 +0000169#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
j_mayerbedb69e2007-04-05 20:08:21 +0000170#endif
bellard54936002003-05-13 00:25:15 +0000171
172#define L1_SIZE (1 << L1_BITS)
173#define L2_SIZE (1 << L2_BITS)
174
bellard83fb7ad2004-07-05 21:25:26 +0000175unsigned long qemu_real_host_page_size;
176unsigned long qemu_host_page_bits;
177unsigned long qemu_host_page_size;
178unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000179
bellard92e873b2004-05-21 14:52:29 +0000180/* XXX: for system emulation, it could just be an array */
bellard54936002003-05-13 00:25:15 +0000181static PageDesc *l1_map[L1_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +0000182static PhysPageDesc **l1_phys_map;
bellard54936002003-05-13 00:25:15 +0000183
pbrooke2eef172008-06-08 01:09:01 +0000184#if !defined(CONFIG_USER_ONLY)
185static void io_mem_init(void);
186
bellard33417e72003-08-10 21:47:01 +0000187/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000188CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
189CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000190void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000191static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000192static int io_mem_watch;
193#endif
bellard33417e72003-08-10 21:47:01 +0000194
bellard34865132003-10-05 14:28:56 +0000195/* log support */
blueswir1d9b630f2008-10-05 09:57:08 +0000196static const char *logfilename = "/tmp/qemu.log";
bellard34865132003-10-05 14:28:56 +0000197FILE *logfile;
198int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000199static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000200
bellarde3db7222005-01-26 22:00:47 +0000201/* statistics */
202static int tlb_flush_count;
203static int tb_flush_count;
204static int tb_phys_invalidate_count;
205
blueswir1db7b5422007-05-26 17:36:03 +0000206#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
207typedef struct subpage_t {
208 target_phys_addr_t base;
blueswir13ee89922008-01-02 19:45:26 +0000209 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
210 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
211 void *opaque[TARGET_PAGE_SIZE][2][4];
pbrook8da3ff12008-12-01 18:59:50 +0000212 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
blueswir1db7b5422007-05-26 17:36:03 +0000213} subpage_t;
214
bellard7cb69ca2008-05-10 10:55:51 +0000215#ifdef _WIN32
216static void map_exec(void *addr, long size)
217{
218 DWORD old_protect;
219 VirtualProtect(addr, size,
220 PAGE_EXECUTE_READWRITE, &old_protect);
221
222}
223#else
224static void map_exec(void *addr, long size)
225{
bellard43694152008-05-29 09:35:57 +0000226 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000227
bellard43694152008-05-29 09:35:57 +0000228 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000229 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000230 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000231
232 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000233 end += page_size - 1;
234 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000235
236 mprotect((void *)start, end - start,
237 PROT_READ | PROT_WRITE | PROT_EXEC);
238}
239#endif
240
bellardb346ff42003-06-15 20:05:50 +0000241static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000242{
bellard83fb7ad2004-07-05 21:25:26 +0000243 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000244 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000245#ifdef _WIN32
246 {
247 SYSTEM_INFO system_info;
248
249 GetSystemInfo(&system_info);
250 qemu_real_host_page_size = system_info.dwPageSize;
251 }
252#else
253 qemu_real_host_page_size = getpagesize();
254#endif
bellard83fb7ad2004-07-05 21:25:26 +0000255 if (qemu_host_page_size == 0)
256 qemu_host_page_size = qemu_real_host_page_size;
257 if (qemu_host_page_size < TARGET_PAGE_SIZE)
258 qemu_host_page_size = TARGET_PAGE_SIZE;
259 qemu_host_page_bits = 0;
260 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
261 qemu_host_page_bits++;
262 qemu_host_page_mask = ~(qemu_host_page_size - 1);
bellard108c49b2005-07-24 12:55:09 +0000263 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
264 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
balrog50a95692007-12-12 01:16:23 +0000265
266#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
267 {
268 long long startaddr, endaddr;
269 FILE *f;
270 int n;
271
pbrookc8a706f2008-06-02 16:16:42 +0000272 mmap_lock();
pbrook07765902008-05-31 16:33:53 +0000273 last_brk = (unsigned long)sbrk(0);
balrog50a95692007-12-12 01:16:23 +0000274 f = fopen("/proc/self/maps", "r");
275 if (f) {
276 do {
277 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
278 if (n == 2) {
blueswir1e0b8d652008-05-03 17:51:24 +0000279 startaddr = MIN(startaddr,
280 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
281 endaddr = MIN(endaddr,
282 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
pbrookb5fc9092008-05-29 13:56:10 +0000283 page_set_flags(startaddr & TARGET_PAGE_MASK,
balrog50a95692007-12-12 01:16:23 +0000284 TARGET_PAGE_ALIGN(endaddr),
285 PAGE_RESERVED);
286 }
287 } while (!feof(f));
288 fclose(f);
289 }
pbrookc8a706f2008-06-02 16:16:42 +0000290 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000291 }
292#endif
bellard54936002003-05-13 00:25:15 +0000293}
294
aliguori434929b2008-09-15 15:56:30 +0000295static inline PageDesc **page_l1_map(target_ulong index)
bellard54936002003-05-13 00:25:15 +0000296{
pbrook17e23772008-06-09 13:47:45 +0000297#if TARGET_LONG_BITS > 32
298 /* Host memory outside guest VM. For 32-bit targets we have already
299 excluded high addresses. */
thsd8173e02008-08-29 13:10:00 +0000300 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
pbrook17e23772008-06-09 13:47:45 +0000301 return NULL;
302#endif
aliguori434929b2008-09-15 15:56:30 +0000303 return &l1_map[index >> L2_BITS];
304}
305
306static inline PageDesc *page_find_alloc(target_ulong index)
307{
308 PageDesc **lp, *p;
309 lp = page_l1_map(index);
310 if (!lp)
311 return NULL;
312
bellard54936002003-05-13 00:25:15 +0000313 p = *lp;
314 if (!p) {
315 /* allocate if not found */
pbrook17e23772008-06-09 13:47:45 +0000316#if defined(CONFIG_USER_ONLY)
pbrook17e23772008-06-09 13:47:45 +0000317 size_t len = sizeof(PageDesc) * L2_SIZE;
318 /* Don't use qemu_malloc because it may recurse. */
319 p = mmap(0, len, PROT_READ | PROT_WRITE,
320 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
bellard54936002003-05-13 00:25:15 +0000321 *lp = p;
aurel32fb1c2cd2008-12-08 18:12:26 +0000322 if (h2g_valid(p)) {
323 unsigned long addr = h2g(p);
pbrook17e23772008-06-09 13:47:45 +0000324 page_set_flags(addr & TARGET_PAGE_MASK,
325 TARGET_PAGE_ALIGN(addr + len),
326 PAGE_RESERVED);
327 }
328#else
329 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
330 *lp = p;
331#endif
bellard54936002003-05-13 00:25:15 +0000332 }
333 return p + (index & (L2_SIZE - 1));
334}
335
aurel3200f82b82008-04-27 21:12:55 +0000336static inline PageDesc *page_find(target_ulong index)
bellard54936002003-05-13 00:25:15 +0000337{
aliguori434929b2008-09-15 15:56:30 +0000338 PageDesc **lp, *p;
339 lp = page_l1_map(index);
340 if (!lp)
341 return NULL;
bellard54936002003-05-13 00:25:15 +0000342
aliguori434929b2008-09-15 15:56:30 +0000343 p = *lp;
bellard54936002003-05-13 00:25:15 +0000344 if (!p)
345 return 0;
bellardfd6ce8f2003-05-14 19:00:11 +0000346 return p + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000347}
348
bellard108c49b2005-07-24 12:55:09 +0000349static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000350{
bellard108c49b2005-07-24 12:55:09 +0000351 void **lp, **p;
pbrooke3f4e2a2006-04-08 20:02:06 +0000352 PhysPageDesc *pd;
bellard92e873b2004-05-21 14:52:29 +0000353
bellard108c49b2005-07-24 12:55:09 +0000354 p = (void **)l1_phys_map;
355#if TARGET_PHYS_ADDR_SPACE_BITS > 32
356
357#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
359#endif
360 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000361 p = *lp;
362 if (!p) {
363 /* allocate if not found */
bellard108c49b2005-07-24 12:55:09 +0000364 if (!alloc)
365 return NULL;
366 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
367 memset(p, 0, sizeof(void *) * L1_SIZE);
368 *lp = p;
369 }
370#endif
371 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
pbrooke3f4e2a2006-04-08 20:02:06 +0000372 pd = *lp;
373 if (!pd) {
374 int i;
bellard108c49b2005-07-24 12:55:09 +0000375 /* allocate if not found */
376 if (!alloc)
377 return NULL;
pbrooke3f4e2a2006-04-08 20:02:06 +0000378 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
379 *lp = pd;
pbrook67c4d232009-02-23 13:16:07 +0000380 for (i = 0; i < L2_SIZE; i++) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000381 pd[i].phys_offset = IO_MEM_UNASSIGNED;
pbrook67c4d232009-02-23 13:16:07 +0000382 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
383 }
bellard92e873b2004-05-21 14:52:29 +0000384 }
pbrooke3f4e2a2006-04-08 20:02:06 +0000385 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000386}
387
bellard108c49b2005-07-24 12:55:09 +0000388static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000389{
bellard108c49b2005-07-24 12:55:09 +0000390 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000391}
392
bellard9fa3e852004-01-04 18:06:42 +0000393#if !defined(CONFIG_USER_ONLY)
bellard6a00d602005-11-21 23:25:50 +0000394static void tlb_protect_code(ram_addr_t ram_addr);
ths5fafdf22007-09-16 21:08:06 +0000395static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000396 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000397#define mmap_lock() do { } while(0)
398#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000399#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000400
bellard43694152008-05-29 09:35:57 +0000401#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
402
403#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100404/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000405 user mode. It will change when a dedicated libc will be used */
406#define USE_STATIC_CODE_GEN_BUFFER
407#endif
408
409#ifdef USE_STATIC_CODE_GEN_BUFFER
410static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
411#endif
412
blueswir18fcd3692008-08-17 20:26:25 +0000413static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000414{
bellard43694152008-05-29 09:35:57 +0000415#ifdef USE_STATIC_CODE_GEN_BUFFER
416 code_gen_buffer = static_code_gen_buffer;
417 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
418 map_exec(code_gen_buffer, code_gen_buffer_size);
419#else
bellard26a5f132008-05-28 12:30:31 +0000420 code_gen_buffer_size = tb_size;
421 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000422#if defined(CONFIG_USER_ONLY)
423 /* in user mode, phys_ram_size is not meaningful */
424 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
425#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100426 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000427 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000428#endif
bellard26a5f132008-05-28 12:30:31 +0000429 }
430 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
431 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
432 /* The code gen buffer location may have constraints depending on
433 the host cpu and OS */
434#if defined(__linux__)
435 {
436 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000437 void *start = NULL;
438
bellard26a5f132008-05-28 12:30:31 +0000439 flags = MAP_PRIVATE | MAP_ANONYMOUS;
440#if defined(__x86_64__)
441 flags |= MAP_32BIT;
442 /* Cannot map more than that */
443 if (code_gen_buffer_size > (800 * 1024 * 1024))
444 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000445#elif defined(__sparc_v9__)
446 // Map the buffer below 2G, so we can use direct calls and branches
447 flags |= MAP_FIXED;
448 start = (void *) 0x60000000UL;
449 if (code_gen_buffer_size > (512 * 1024 * 1024))
450 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000451#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000452 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000453 flags |= MAP_FIXED;
454 start = (void *) 0x01000000UL;
455 if (code_gen_buffer_size > 16 * 1024 * 1024)
456 code_gen_buffer_size = 16 * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000457#endif
blueswir1141ac462008-07-26 15:05:57 +0000458 code_gen_buffer = mmap(start, code_gen_buffer_size,
459 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000460 flags, -1, 0);
461 if (code_gen_buffer == MAP_FAILED) {
462 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
463 exit(1);
464 }
465 }
blueswir1c5e97232009-03-07 20:06:23 +0000466#elif defined(__FreeBSD__) || defined(__DragonFly__)
aliguori06e67a82008-09-27 15:32:41 +0000467 {
468 int flags;
469 void *addr = NULL;
470 flags = MAP_PRIVATE | MAP_ANONYMOUS;
471#if defined(__x86_64__)
472 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
473 * 0x40000000 is free */
474 flags |= MAP_FIXED;
475 addr = (void *)0x40000000;
476 /* Cannot map more than that */
477 if (code_gen_buffer_size > (800 * 1024 * 1024))
478 code_gen_buffer_size = (800 * 1024 * 1024);
479#endif
480 code_gen_buffer = mmap(addr, code_gen_buffer_size,
481 PROT_WRITE | PROT_READ | PROT_EXEC,
482 flags, -1, 0);
483 if (code_gen_buffer == MAP_FAILED) {
484 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
485 exit(1);
486 }
487 }
bellard26a5f132008-05-28 12:30:31 +0000488#else
489 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000490 map_exec(code_gen_buffer, code_gen_buffer_size);
491#endif
bellard43694152008-05-29 09:35:57 +0000492#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000493 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
494 code_gen_buffer_max_size = code_gen_buffer_size -
495 code_gen_max_block_size();
496 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
497 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
498}
499
500/* Must be called before using the QEMU cpus. 'tb_size' is the size
501 (in bytes) allocated to the translation buffer. Zero means default
502 size. */
503void cpu_exec_init_all(unsigned long tb_size)
504{
bellard26a5f132008-05-28 12:30:31 +0000505 cpu_gen_init();
506 code_gen_alloc(tb_size);
507 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000508 page_init();
pbrooke2eef172008-06-08 01:09:01 +0000509#if !defined(CONFIG_USER_ONLY)
bellard26a5f132008-05-28 12:30:31 +0000510 io_mem_init();
pbrooke2eef172008-06-08 01:09:01 +0000511#endif
bellard26a5f132008-05-28 12:30:31 +0000512}
513
pbrook9656f322008-07-01 20:01:19 +0000514#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
515
516#define CPU_COMMON_SAVE_VERSION 1
517
518static void cpu_common_save(QEMUFile *f, void *opaque)
519{
520 CPUState *env = opaque;
521
Jan Kiszkab0a46a32009-05-02 00:22:51 +0200522 cpu_synchronize_state(env, 0);
523
pbrook9656f322008-07-01 20:01:19 +0000524 qemu_put_be32s(f, &env->halted);
525 qemu_put_be32s(f, &env->interrupt_request);
526}
527
528static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
529{
530 CPUState *env = opaque;
531
532 if (version_id != CPU_COMMON_SAVE_VERSION)
533 return -EINVAL;
534
535 qemu_get_be32s(f, &env->halted);
pbrook75f482a2008-07-01 21:53:33 +0000536 qemu_get_be32s(f, &env->interrupt_request);
aurel323098dba2009-03-07 21:28:24 +0000537 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
538 version_id is increased. */
539 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000540 tlb_flush(env, 1);
Jan Kiszkab0a46a32009-05-02 00:22:51 +0200541 cpu_synchronize_state(env, 1);
pbrook9656f322008-07-01 20:01:19 +0000542
543 return 0;
544}
545#endif
546
Glauber Costa950f1472009-06-09 12:15:18 -0400547CPUState *qemu_get_cpu(int cpu)
548{
549 CPUState *env = first_cpu;
550
551 while (env) {
552 if (env->cpu_index == cpu)
553 break;
554 env = env->next_cpu;
555 }
556
557 return env;
558}
559
bellard6a00d602005-11-21 23:25:50 +0000560void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000561{
bellard6a00d602005-11-21 23:25:50 +0000562 CPUState **penv;
563 int cpu_index;
564
pbrookc2764712009-03-07 15:24:59 +0000565#if defined(CONFIG_USER_ONLY)
566 cpu_list_lock();
567#endif
bellard6a00d602005-11-21 23:25:50 +0000568 env->next_cpu = NULL;
569 penv = &first_cpu;
570 cpu_index = 0;
571 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700572 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000573 cpu_index++;
574 }
575 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000576 env->numa_node = 0;
aliguoric0ce9982008-11-25 22:13:57 +0000577 TAILQ_INIT(&env->breakpoints);
578 TAILQ_INIT(&env->watchpoints);
bellard6a00d602005-11-21 23:25:50 +0000579 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000580#if defined(CONFIG_USER_ONLY)
581 cpu_list_unlock();
582#endif
pbrookb3c77242008-06-30 16:31:04 +0000583#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000584 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
585 cpu_common_save, cpu_common_load, env);
pbrookb3c77242008-06-30 16:31:04 +0000586 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
587 cpu_save, cpu_load, env);
588#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000589}
590
bellard9fa3e852004-01-04 18:06:42 +0000591static inline void invalidate_page_bitmap(PageDesc *p)
592{
593 if (p->code_bitmap) {
bellard59817cc2004-02-16 22:01:13 +0000594 qemu_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000595 p->code_bitmap = NULL;
596 }
597 p->code_write_count = 0;
598}
599
bellardfd6ce8f2003-05-14 19:00:11 +0000600/* set to NULL all the 'first_tb' fields in all PageDescs */
601static void page_flush_tb(void)
602{
603 int i, j;
604 PageDesc *p;
605
606 for(i = 0; i < L1_SIZE; i++) {
607 p = l1_map[i];
608 if (p) {
bellard9fa3e852004-01-04 18:06:42 +0000609 for(j = 0; j < L2_SIZE; j++) {
610 p->first_tb = NULL;
611 invalidate_page_bitmap(p);
612 p++;
613 }
bellardfd6ce8f2003-05-14 19:00:11 +0000614 }
615 }
616}
617
618/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000619/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000620void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000621{
bellard6a00d602005-11-21 23:25:50 +0000622 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000623#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000624 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
625 (unsigned long)(code_gen_ptr - code_gen_buffer),
626 nb_tbs, nb_tbs > 0 ?
627 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000628#endif
bellard26a5f132008-05-28 12:30:31 +0000629 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000630 cpu_abort(env1, "Internal error: code buffer overflow\n");
631
bellardfd6ce8f2003-05-14 19:00:11 +0000632 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000633
bellard6a00d602005-11-21 23:25:50 +0000634 for(env = first_cpu; env != NULL; env = env->next_cpu) {
635 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
636 }
bellard9fa3e852004-01-04 18:06:42 +0000637
bellard8a8a6082004-10-03 13:36:49 +0000638 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000639 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000640
bellardfd6ce8f2003-05-14 19:00:11 +0000641 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000642 /* XXX: flush processor icache at this point if cache flush is
643 expensive */
bellarde3db7222005-01-26 22:00:47 +0000644 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000645}
646
647#ifdef DEBUG_TB_CHECK
648
j_mayerbc98a7e2007-04-04 07:55:12 +0000649static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000650{
651 TranslationBlock *tb;
652 int i;
653 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000654 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
655 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000656 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
657 address >= tb->pc + tb->size)) {
658 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000659 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000660 }
661 }
662 }
663}
664
665/* verify that all the pages have correct rights for code */
666static void tb_page_check(void)
667{
668 TranslationBlock *tb;
669 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000670
pbrook99773bd2006-04-16 15:14:59 +0000671 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
672 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000673 flags1 = page_get_flags(tb->pc);
674 flags2 = page_get_flags(tb->pc + tb->size - 1);
675 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
676 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000677 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000678 }
679 }
680 }
681}
682
blueswir1bdaf78e2008-10-04 07:24:27 +0000683static void tb_jmp_check(TranslationBlock *tb)
bellardd4e81642003-05-25 16:46:15 +0000684{
685 TranslationBlock *tb1;
686 unsigned int n1;
687
688 /* suppress any remaining jumps to this TB */
689 tb1 = tb->jmp_first;
690 for(;;) {
691 n1 = (long)tb1 & 3;
692 tb1 = (TranslationBlock *)((long)tb1 & ~3);
693 if (n1 == 2)
694 break;
695 tb1 = tb1->jmp_next[n1];
696 }
697 /* check end of list */
698 if (tb1 != tb) {
699 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
700 }
701}
702
bellardfd6ce8f2003-05-14 19:00:11 +0000703#endif
704
705/* invalidate one TB */
706static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
707 int next_offset)
708{
709 TranslationBlock *tb1;
710 for(;;) {
711 tb1 = *ptb;
712 if (tb1 == tb) {
713 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
714 break;
715 }
716 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
717 }
718}
719
bellard9fa3e852004-01-04 18:06:42 +0000720static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
721{
722 TranslationBlock *tb1;
723 unsigned int n1;
724
725 for(;;) {
726 tb1 = *ptb;
727 n1 = (long)tb1 & 3;
728 tb1 = (TranslationBlock *)((long)tb1 & ~3);
729 if (tb1 == tb) {
730 *ptb = tb1->page_next[n1];
731 break;
732 }
733 ptb = &tb1->page_next[n1];
734 }
735}
736
bellardd4e81642003-05-25 16:46:15 +0000737static inline void tb_jmp_remove(TranslationBlock *tb, int n)
738{
739 TranslationBlock *tb1, **ptb;
740 unsigned int n1;
741
742 ptb = &tb->jmp_next[n];
743 tb1 = *ptb;
744 if (tb1) {
745 /* find tb(n) in circular list */
746 for(;;) {
747 tb1 = *ptb;
748 n1 = (long)tb1 & 3;
749 tb1 = (TranslationBlock *)((long)tb1 & ~3);
750 if (n1 == n && tb1 == tb)
751 break;
752 if (n1 == 2) {
753 ptb = &tb1->jmp_first;
754 } else {
755 ptb = &tb1->jmp_next[n1];
756 }
757 }
758 /* now we can suppress tb(n) from the list */
759 *ptb = tb->jmp_next[n];
760
761 tb->jmp_next[n] = NULL;
762 }
763}
764
765/* reset the jump entry 'n' of a TB so that it is not chained to
766 another TB */
767static inline void tb_reset_jump(TranslationBlock *tb, int n)
768{
769 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
770}
771
pbrook2e70f6e2008-06-29 01:03:05 +0000772void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000773{
bellard6a00d602005-11-21 23:25:50 +0000774 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000775 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000776 unsigned int h, n1;
aurel3200f82b82008-04-27 21:12:55 +0000777 target_phys_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000778 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000779
bellard9fa3e852004-01-04 18:06:42 +0000780 /* remove the TB from the hash list */
781 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
782 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000783 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000784 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000785
bellard9fa3e852004-01-04 18:06:42 +0000786 /* remove the TB from the page list */
787 if (tb->page_addr[0] != page_addr) {
788 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
789 tb_page_remove(&p->first_tb, tb);
790 invalidate_page_bitmap(p);
791 }
792 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
793 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
794 tb_page_remove(&p->first_tb, tb);
795 invalidate_page_bitmap(p);
796 }
797
bellard8a40a182005-11-20 10:35:40 +0000798 tb_invalidated_flag = 1;
799
800 /* remove the TB from the hash list */
801 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000802 for(env = first_cpu; env != NULL; env = env->next_cpu) {
803 if (env->tb_jmp_cache[h] == tb)
804 env->tb_jmp_cache[h] = NULL;
805 }
bellard8a40a182005-11-20 10:35:40 +0000806
807 /* suppress this TB from the two jump lists */
808 tb_jmp_remove(tb, 0);
809 tb_jmp_remove(tb, 1);
810
811 /* suppress any remaining jumps to this TB */
812 tb1 = tb->jmp_first;
813 for(;;) {
814 n1 = (long)tb1 & 3;
815 if (n1 == 2)
816 break;
817 tb1 = (TranslationBlock *)((long)tb1 & ~3);
818 tb2 = tb1->jmp_next[n1];
819 tb_reset_jump(tb1, n1);
820 tb1->jmp_next[n1] = NULL;
821 tb1 = tb2;
822 }
823 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
824
bellarde3db7222005-01-26 22:00:47 +0000825 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000826}
827
828static inline void set_bits(uint8_t *tab, int start, int len)
829{
830 int end, mask, end1;
831
832 end = start + len;
833 tab += start >> 3;
834 mask = 0xff << (start & 7);
835 if ((start & ~7) == (end & ~7)) {
836 if (start < end) {
837 mask &= ~(0xff << (end & 7));
838 *tab |= mask;
839 }
840 } else {
841 *tab++ |= mask;
842 start = (start + 8) & ~7;
843 end1 = end & ~7;
844 while (start < end1) {
845 *tab++ = 0xff;
846 start += 8;
847 }
848 if (start < end) {
849 mask = ~(0xff << (end & 7));
850 *tab |= mask;
851 }
852 }
853}
854
855static void build_page_bitmap(PageDesc *p)
856{
857 int n, tb_start, tb_end;
858 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000859
pbrookb2a70812008-06-09 13:57:23 +0000860 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000861
862 tb = p->first_tb;
863 while (tb != NULL) {
864 n = (long)tb & 3;
865 tb = (TranslationBlock *)((long)tb & ~3);
866 /* NOTE: this is subtle as a TB may span two physical pages */
867 if (n == 0) {
868 /* NOTE: tb_end may be after the end of the page, but
869 it is not a problem */
870 tb_start = tb->pc & ~TARGET_PAGE_MASK;
871 tb_end = tb_start + tb->size;
872 if (tb_end > TARGET_PAGE_SIZE)
873 tb_end = TARGET_PAGE_SIZE;
874 } else {
875 tb_start = 0;
876 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
877 }
878 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
879 tb = tb->page_next[n];
880 }
881}
882
pbrook2e70f6e2008-06-29 01:03:05 +0000883TranslationBlock *tb_gen_code(CPUState *env,
884 target_ulong pc, target_ulong cs_base,
885 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000886{
887 TranslationBlock *tb;
888 uint8_t *tc_ptr;
889 target_ulong phys_pc, phys_page2, virt_page2;
890 int code_gen_size;
891
bellardc27004e2005-01-03 23:35:10 +0000892 phys_pc = get_phys_addr_code(env, pc);
893 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000894 if (!tb) {
895 /* flush must be done */
896 tb_flush(env);
897 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000898 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000899 /* Don't forget to invalidate previous TB info. */
900 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000901 }
902 tc_ptr = code_gen_ptr;
903 tb->tc_ptr = tc_ptr;
904 tb->cs_base = cs_base;
905 tb->flags = flags;
906 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +0000907 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +0000908 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +0000909
bellardd720b932004-04-25 17:57:43 +0000910 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +0000911 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +0000912 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +0000913 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
bellardd720b932004-04-25 17:57:43 +0000914 phys_page2 = get_phys_addr_code(env, virt_page2);
915 }
916 tb_link_phys(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +0000917 return tb;
bellardd720b932004-04-25 17:57:43 +0000918}
ths3b46e622007-09-17 08:09:54 +0000919
bellard9fa3e852004-01-04 18:06:42 +0000920/* invalidate all TBs which intersect with the target physical page
921 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +0000922 the same physical page. 'is_cpu_write_access' should be true if called
923 from a real cpu write access: the virtual CPU will exit the current
924 TB if code is modified inside this TB. */
aurel3200f82b82008-04-27 21:12:55 +0000925void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
bellardd720b932004-04-25 17:57:43 +0000926 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +0000927{
aliguori6b917542008-11-18 19:46:41 +0000928 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +0000929 CPUState *env = cpu_single_env;
bellard9fa3e852004-01-04 18:06:42 +0000930 target_ulong tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +0000931 PageDesc *p;
932 int n;
933#ifdef TARGET_HAS_PRECISE_SMC
934 int current_tb_not_found = is_cpu_write_access;
935 TranslationBlock *current_tb = NULL;
936 int current_tb_modified = 0;
937 target_ulong current_pc = 0;
938 target_ulong current_cs_base = 0;
939 int current_flags = 0;
940#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +0000941
942 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +0000943 if (!p)
bellard9fa3e852004-01-04 18:06:42 +0000944 return;
ths5fafdf22007-09-16 21:08:06 +0000945 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +0000946 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
947 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +0000948 /* build code bitmap */
949 build_page_bitmap(p);
950 }
951
952 /* we remove all the TBs in the range [start, end[ */
953 /* XXX: see if in some cases it could be faster to invalidate all the code */
954 tb = p->first_tb;
955 while (tb != NULL) {
956 n = (long)tb & 3;
957 tb = (TranslationBlock *)((long)tb & ~3);
958 tb_next = tb->page_next[n];
959 /* NOTE: this is subtle as a TB may span two physical pages */
960 if (n == 0) {
961 /* NOTE: tb_end may be after the end of the page, but
962 it is not a problem */
963 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
964 tb_end = tb_start + tb->size;
965 } else {
966 tb_start = tb->page_addr[1];
967 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
968 }
969 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +0000970#ifdef TARGET_HAS_PRECISE_SMC
971 if (current_tb_not_found) {
972 current_tb_not_found = 0;
973 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +0000974 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +0000975 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +0000976 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +0000977 }
978 }
979 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +0000980 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +0000981 /* If we are modifying the current TB, we must stop
982 its execution. We could be more precise by checking
983 that the modification is after the current PC, but it
984 would require a specialized function to partially
985 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +0000986
bellardd720b932004-04-25 17:57:43 +0000987 current_tb_modified = 1;
ths5fafdf22007-09-16 21:08:06 +0000988 cpu_restore_state(current_tb, env,
pbrook2e70f6e2008-06-29 01:03:05 +0000989 env->mem_io_pc, NULL);
aliguori6b917542008-11-18 19:46:41 +0000990 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
991 &current_flags);
bellardd720b932004-04-25 17:57:43 +0000992 }
993#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +0000994 /* we need to do that to handle the case where a signal
995 occurs while doing tb_phys_invalidate() */
996 saved_tb = NULL;
997 if (env) {
998 saved_tb = env->current_tb;
999 env->current_tb = NULL;
1000 }
bellard9fa3e852004-01-04 18:06:42 +00001001 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001002 if (env) {
1003 env->current_tb = saved_tb;
1004 if (env->interrupt_request && env->current_tb)
1005 cpu_interrupt(env, env->interrupt_request);
1006 }
bellard9fa3e852004-01-04 18:06:42 +00001007 }
1008 tb = tb_next;
1009 }
1010#if !defined(CONFIG_USER_ONLY)
1011 /* if no code remaining, no need to continue to use slow writes */
1012 if (!p->first_tb) {
1013 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001014 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001015 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001016 }
1017 }
1018#endif
1019#ifdef TARGET_HAS_PRECISE_SMC
1020 if (current_tb_modified) {
1021 /* we generate a block containing just the instruction
1022 modifying the memory. It will ensure that it cannot modify
1023 itself */
bellardea1c1802004-06-14 18:56:36 +00001024 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001025 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001026 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001027 }
1028#endif
1029}
1030
1031/* len must be <= 8 and start must be a multiple of len */
aurel3200f82b82008-04-27 21:12:55 +00001032static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001033{
1034 PageDesc *p;
1035 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001036#if 0
bellarda4193c82004-06-03 14:01:43 +00001037 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001038 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1039 cpu_single_env->mem_io_vaddr, len,
1040 cpu_single_env->eip,
1041 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001042 }
1043#endif
bellard9fa3e852004-01-04 18:06:42 +00001044 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001045 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001046 return;
1047 if (p->code_bitmap) {
1048 offset = start & ~TARGET_PAGE_MASK;
1049 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1050 if (b & ((1 << len) - 1))
1051 goto do_invalidate;
1052 } else {
1053 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001054 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001055 }
1056}
1057
bellard9fa3e852004-01-04 18:06:42 +00001058#if !defined(CONFIG_SOFTMMU)
aurel3200f82b82008-04-27 21:12:55 +00001059static void tb_invalidate_phys_page(target_phys_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001060 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001061{
aliguori6b917542008-11-18 19:46:41 +00001062 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001063 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001064 int n;
bellardd720b932004-04-25 17:57:43 +00001065#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001066 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001067 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001068 int current_tb_modified = 0;
1069 target_ulong current_pc = 0;
1070 target_ulong current_cs_base = 0;
1071 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001072#endif
bellard9fa3e852004-01-04 18:06:42 +00001073
1074 addr &= TARGET_PAGE_MASK;
1075 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001076 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001077 return;
1078 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001079#ifdef TARGET_HAS_PRECISE_SMC
1080 if (tb && pc != 0) {
1081 current_tb = tb_find_pc(pc);
1082 }
1083#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001084 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001085 n = (long)tb & 3;
1086 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001087#ifdef TARGET_HAS_PRECISE_SMC
1088 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001089 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001090 /* If we are modifying the current TB, we must stop
1091 its execution. We could be more precise by checking
1092 that the modification is after the current PC, but it
1093 would require a specialized function to partially
1094 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001095
bellardd720b932004-04-25 17:57:43 +00001096 current_tb_modified = 1;
1097 cpu_restore_state(current_tb, env, pc, puc);
aliguori6b917542008-11-18 19:46:41 +00001098 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1099 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001100 }
1101#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001102 tb_phys_invalidate(tb, addr);
1103 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001104 }
1105 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001106#ifdef TARGET_HAS_PRECISE_SMC
1107 if (current_tb_modified) {
1108 /* we generate a block containing just the instruction
1109 modifying the memory. It will ensure that it cannot modify
1110 itself */
bellardea1c1802004-06-14 18:56:36 +00001111 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001112 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001113 cpu_resume_from_signal(env, puc);
1114 }
1115#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001116}
bellard9fa3e852004-01-04 18:06:42 +00001117#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001118
1119/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001120static inline void tb_alloc_page(TranslationBlock *tb,
pbrook53a59602006-03-25 19:31:22 +00001121 unsigned int n, target_ulong page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001122{
1123 PageDesc *p;
bellard9fa3e852004-01-04 18:06:42 +00001124 TranslationBlock *last_first_tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001125
bellard9fa3e852004-01-04 18:06:42 +00001126 tb->page_addr[n] = page_addr;
bellard3a7d9292005-08-21 09:26:42 +00001127 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00001128 tb->page_next[n] = p->first_tb;
1129 last_first_tb = p->first_tb;
1130 p->first_tb = (TranslationBlock *)((long)tb | n);
1131 invalidate_page_bitmap(p);
1132
bellard107db442004-06-22 18:48:46 +00001133#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001134
bellard9fa3e852004-01-04 18:06:42 +00001135#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001136 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001137 target_ulong addr;
1138 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001139 int prot;
1140
bellardfd6ce8f2003-05-14 19:00:11 +00001141 /* force the host page as non writable (writes will have a
1142 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001143 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001144 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001145 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1146 addr += TARGET_PAGE_SIZE) {
1147
1148 p2 = page_find (addr >> TARGET_PAGE_BITS);
1149 if (!p2)
1150 continue;
1151 prot |= p2->flags;
1152 p2->flags &= ~PAGE_WRITE;
1153 page_get_flags(addr);
1154 }
ths5fafdf22007-09-16 21:08:06 +00001155 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001156 (prot & PAGE_BITS) & ~PAGE_WRITE);
1157#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001158 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001159 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001160#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001161 }
bellard9fa3e852004-01-04 18:06:42 +00001162#else
1163 /* if some code is already present, then the pages are already
1164 protected. So we handle the case where only the first TB is
1165 allocated in a physical page */
1166 if (!last_first_tb) {
bellard6a00d602005-11-21 23:25:50 +00001167 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001168 }
1169#endif
bellardd720b932004-04-25 17:57:43 +00001170
1171#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001172}
1173
1174/* Allocate a new translation block. Flush the translation buffer if
1175 too many translation blocks or too much generated code. */
bellardc27004e2005-01-03 23:35:10 +00001176TranslationBlock *tb_alloc(target_ulong pc)
bellardfd6ce8f2003-05-14 19:00:11 +00001177{
1178 TranslationBlock *tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001179
bellard26a5f132008-05-28 12:30:31 +00001180 if (nb_tbs >= code_gen_max_blocks ||
1181 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
bellardd4e81642003-05-25 16:46:15 +00001182 return NULL;
bellardfd6ce8f2003-05-14 19:00:11 +00001183 tb = &tbs[nb_tbs++];
1184 tb->pc = pc;
bellardb448f2f2004-02-25 23:24:04 +00001185 tb->cflags = 0;
bellardd4e81642003-05-25 16:46:15 +00001186 return tb;
1187}
1188
pbrook2e70f6e2008-06-29 01:03:05 +00001189void tb_free(TranslationBlock *tb)
1190{
thsbf20dc02008-06-30 17:22:19 +00001191 /* In practice this is mostly used for single use temporary TB
pbrook2e70f6e2008-06-29 01:03:05 +00001192 Ignore the hard cases and just back up if this TB happens to
1193 be the last one generated. */
1194 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1195 code_gen_ptr = tb->tc_ptr;
1196 nb_tbs--;
1197 }
1198}
1199
bellard9fa3e852004-01-04 18:06:42 +00001200/* add a new TB and link it to the physical page tables. phys_page2 is
1201 (-1) to indicate that only one page contains the TB. */
ths5fafdf22007-09-16 21:08:06 +00001202void tb_link_phys(TranslationBlock *tb,
bellard9fa3e852004-01-04 18:06:42 +00001203 target_ulong phys_pc, target_ulong phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001204{
bellard9fa3e852004-01-04 18:06:42 +00001205 unsigned int h;
1206 TranslationBlock **ptb;
1207
pbrookc8a706f2008-06-02 16:16:42 +00001208 /* Grab the mmap lock to stop another thread invalidating this TB
1209 before we are done. */
1210 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001211 /* add in the physical hash table */
1212 h = tb_phys_hash_func(phys_pc);
1213 ptb = &tb_phys_hash[h];
1214 tb->phys_hash_next = *ptb;
1215 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001216
1217 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001218 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1219 if (phys_page2 != -1)
1220 tb_alloc_page(tb, 1, phys_page2);
1221 else
1222 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001223
bellardd4e81642003-05-25 16:46:15 +00001224 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1225 tb->jmp_next[0] = NULL;
1226 tb->jmp_next[1] = NULL;
1227
1228 /* init original jump addresses */
1229 if (tb->tb_next_offset[0] != 0xffff)
1230 tb_reset_jump(tb, 0);
1231 if (tb->tb_next_offset[1] != 0xffff)
1232 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001233
1234#ifdef DEBUG_TB_CHECK
1235 tb_page_check();
1236#endif
pbrookc8a706f2008-06-02 16:16:42 +00001237 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001238}
1239
bellarda513fe12003-05-27 23:29:48 +00001240/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1241 tb[1].tc_ptr. Return NULL if not found */
1242TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1243{
1244 int m_min, m_max, m;
1245 unsigned long v;
1246 TranslationBlock *tb;
1247
1248 if (nb_tbs <= 0)
1249 return NULL;
1250 if (tc_ptr < (unsigned long)code_gen_buffer ||
1251 tc_ptr >= (unsigned long)code_gen_ptr)
1252 return NULL;
1253 /* binary search (cf Knuth) */
1254 m_min = 0;
1255 m_max = nb_tbs - 1;
1256 while (m_min <= m_max) {
1257 m = (m_min + m_max) >> 1;
1258 tb = &tbs[m];
1259 v = (unsigned long)tb->tc_ptr;
1260 if (v == tc_ptr)
1261 return tb;
1262 else if (tc_ptr < v) {
1263 m_max = m - 1;
1264 } else {
1265 m_min = m + 1;
1266 }
ths5fafdf22007-09-16 21:08:06 +00001267 }
bellarda513fe12003-05-27 23:29:48 +00001268 return &tbs[m_max];
1269}
bellard75012672003-06-21 13:11:07 +00001270
bellardea041c02003-06-25 16:16:50 +00001271static void tb_reset_jump_recursive(TranslationBlock *tb);
1272
1273static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1274{
1275 TranslationBlock *tb1, *tb_next, **ptb;
1276 unsigned int n1;
1277
1278 tb1 = tb->jmp_next[n];
1279 if (tb1 != NULL) {
1280 /* find head of list */
1281 for(;;) {
1282 n1 = (long)tb1 & 3;
1283 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1284 if (n1 == 2)
1285 break;
1286 tb1 = tb1->jmp_next[n1];
1287 }
1288 /* we are now sure now that tb jumps to tb1 */
1289 tb_next = tb1;
1290
1291 /* remove tb from the jmp_first list */
1292 ptb = &tb_next->jmp_first;
1293 for(;;) {
1294 tb1 = *ptb;
1295 n1 = (long)tb1 & 3;
1296 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1297 if (n1 == n && tb1 == tb)
1298 break;
1299 ptb = &tb1->jmp_next[n1];
1300 }
1301 *ptb = tb->jmp_next[n];
1302 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001303
bellardea041c02003-06-25 16:16:50 +00001304 /* suppress the jump to next tb in generated code */
1305 tb_reset_jump(tb, n);
1306
bellard01243112004-01-04 15:48:17 +00001307 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001308 tb_reset_jump_recursive(tb_next);
1309 }
1310}
1311
1312static void tb_reset_jump_recursive(TranslationBlock *tb)
1313{
1314 tb_reset_jump_recursive2(tb, 0);
1315 tb_reset_jump_recursive2(tb, 1);
1316}
1317
bellard1fddef42005-04-17 19:16:13 +00001318#if defined(TARGET_HAS_ICE)
bellardd720b932004-04-25 17:57:43 +00001319static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1320{
j_mayer9b3c35e2007-04-07 11:21:28 +00001321 target_phys_addr_t addr;
1322 target_ulong pd;
pbrookc2f07f82006-04-08 17:14:56 +00001323 ram_addr_t ram_addr;
1324 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001325
pbrookc2f07f82006-04-08 17:14:56 +00001326 addr = cpu_get_phys_page_debug(env, pc);
1327 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1328 if (!p) {
1329 pd = IO_MEM_UNASSIGNED;
1330 } else {
1331 pd = p->phys_offset;
1332 }
1333 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001334 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001335}
bellardc27004e2005-01-03 23:35:10 +00001336#endif
bellardd720b932004-04-25 17:57:43 +00001337
pbrook6658ffb2007-03-16 23:58:11 +00001338/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001339int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1340 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001341{
aliguorib4051332008-11-18 20:14:20 +00001342 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001343 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001344
aliguorib4051332008-11-18 20:14:20 +00001345 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1346 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1347 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1348 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1349 return -EINVAL;
1350 }
aliguoria1d1bb32008-11-18 20:07:32 +00001351 wp = qemu_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001352
aliguoria1d1bb32008-11-18 20:07:32 +00001353 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001354 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001355 wp->flags = flags;
1356
aliguori2dc9f412008-11-18 20:56:59 +00001357 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001358 if (flags & BP_GDB)
1359 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1360 else
1361 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001362
pbrook6658ffb2007-03-16 23:58:11 +00001363 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001364
1365 if (watchpoint)
1366 *watchpoint = wp;
1367 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001368}
1369
aliguoria1d1bb32008-11-18 20:07:32 +00001370/* Remove a specific watchpoint. */
1371int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1372 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001373{
aliguorib4051332008-11-18 20:14:20 +00001374 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001375 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001376
aliguoric0ce9982008-11-25 22:13:57 +00001377 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001378 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001379 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001380 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001381 return 0;
1382 }
1383 }
aliguoria1d1bb32008-11-18 20:07:32 +00001384 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001385}
1386
aliguoria1d1bb32008-11-18 20:07:32 +00001387/* Remove a specific watchpoint by reference. */
1388void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1389{
aliguoric0ce9982008-11-25 22:13:57 +00001390 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001391
aliguoria1d1bb32008-11-18 20:07:32 +00001392 tlb_flush_page(env, watchpoint->vaddr);
1393
1394 qemu_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001395}
1396
aliguoria1d1bb32008-11-18 20:07:32 +00001397/* Remove all matching watchpoints. */
1398void cpu_watchpoint_remove_all(CPUState *env, int mask)
1399{
aliguoric0ce9982008-11-25 22:13:57 +00001400 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001401
aliguoric0ce9982008-11-25 22:13:57 +00001402 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001403 if (wp->flags & mask)
1404 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001405 }
aliguoria1d1bb32008-11-18 20:07:32 +00001406}
1407
1408/* Add a breakpoint. */
1409int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1410 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001411{
bellard1fddef42005-04-17 19:16:13 +00001412#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001413 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001414
aliguoria1d1bb32008-11-18 20:07:32 +00001415 bp = qemu_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001416
1417 bp->pc = pc;
1418 bp->flags = flags;
1419
aliguori2dc9f412008-11-18 20:56:59 +00001420 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001421 if (flags & BP_GDB)
1422 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1423 else
1424 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001425
1426 breakpoint_invalidate(env, pc);
1427
1428 if (breakpoint)
1429 *breakpoint = bp;
1430 return 0;
1431#else
1432 return -ENOSYS;
1433#endif
1434}
1435
1436/* Remove a specific breakpoint. */
1437int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1438{
1439#if defined(TARGET_HAS_ICE)
1440 CPUBreakpoint *bp;
1441
aliguoric0ce9982008-11-25 22:13:57 +00001442 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001443 if (bp->pc == pc && bp->flags == flags) {
1444 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001445 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001446 }
bellard4c3a88a2003-07-26 12:06:08 +00001447 }
aliguoria1d1bb32008-11-18 20:07:32 +00001448 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001449#else
aliguoria1d1bb32008-11-18 20:07:32 +00001450 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001451#endif
1452}
1453
aliguoria1d1bb32008-11-18 20:07:32 +00001454/* Remove a specific breakpoint by reference. */
1455void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001456{
bellard1fddef42005-04-17 19:16:13 +00001457#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001458 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001459
aliguoria1d1bb32008-11-18 20:07:32 +00001460 breakpoint_invalidate(env, breakpoint->pc);
1461
1462 qemu_free(breakpoint);
1463#endif
1464}
1465
1466/* Remove all matching breakpoints. */
1467void cpu_breakpoint_remove_all(CPUState *env, int mask)
1468{
1469#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001470 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001471
aliguoric0ce9982008-11-25 22:13:57 +00001472 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001473 if (bp->flags & mask)
1474 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001475 }
bellard4c3a88a2003-07-26 12:06:08 +00001476#endif
1477}
1478
bellardc33a3462003-07-29 20:50:33 +00001479/* enable or disable single step mode. EXCP_DEBUG is returned by the
1480 CPU loop after each instruction */
1481void cpu_single_step(CPUState *env, int enabled)
1482{
bellard1fddef42005-04-17 19:16:13 +00001483#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001484 if (env->singlestep_enabled != enabled) {
1485 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001486 if (kvm_enabled())
1487 kvm_update_guest_debug(env, 0);
1488 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001489 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001490 /* XXX: only flush what is necessary */
1491 tb_flush(env);
1492 }
bellardc33a3462003-07-29 20:50:33 +00001493 }
1494#endif
1495}
1496
bellard34865132003-10-05 14:28:56 +00001497/* enable or disable low levels log */
1498void cpu_set_log(int log_flags)
1499{
1500 loglevel = log_flags;
1501 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001502 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001503 if (!logfile) {
1504 perror(logfilename);
1505 _exit(1);
1506 }
bellard9fa3e852004-01-04 18:06:42 +00001507#if !defined(CONFIG_SOFTMMU)
1508 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1509 {
blueswir1b55266b2008-09-20 08:07:15 +00001510 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001511 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1512 }
1513#else
bellard34865132003-10-05 14:28:56 +00001514 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001515#endif
pbrooke735b912007-06-30 13:53:24 +00001516 log_append = 1;
1517 }
1518 if (!loglevel && logfile) {
1519 fclose(logfile);
1520 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001521 }
1522}
1523
1524void cpu_set_log_filename(const char *filename)
1525{
1526 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001527 if (logfile) {
1528 fclose(logfile);
1529 logfile = NULL;
1530 }
1531 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001532}
bellardc33a3462003-07-29 20:50:33 +00001533
aurel323098dba2009-03-07 21:28:24 +00001534static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001535{
pbrookd5975362008-06-07 20:50:51 +00001536#if defined(USE_NPTL)
1537 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1538 problem and hope the cpu will stop of its own accord. For userspace
1539 emulation this often isn't actually as bad as it sounds. Often
1540 signals are used primarily to interrupt blocking syscalls. */
1541#else
aurel323098dba2009-03-07 21:28:24 +00001542 TranslationBlock *tb;
1543 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1544
1545 tb = env->current_tb;
1546 /* if the cpu is currently executing code, we must unlink it and
1547 all the potentially executing TB */
1548 if (tb && !testandset(&interrupt_lock)) {
1549 env->current_tb = NULL;
1550 tb_reset_jump_recursive(tb);
1551 resetlock(&interrupt_lock);
1552 }
1553#endif
1554}
1555
1556/* mask must never be zero, except for A20 change call */
1557void cpu_interrupt(CPUState *env, int mask)
1558{
1559 int old_mask;
1560
1561 old_mask = env->interrupt_request;
1562 env->interrupt_request |= mask;
1563
aliguori8edac962009-04-24 18:03:45 +00001564#ifndef CONFIG_USER_ONLY
1565 /*
1566 * If called from iothread context, wake the target cpu in
1567 * case its halted.
1568 */
1569 if (!qemu_cpu_self(env)) {
1570 qemu_cpu_kick(env);
1571 return;
1572 }
1573#endif
1574
pbrook2e70f6e2008-06-29 01:03:05 +00001575 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001576 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001577#ifndef CONFIG_USER_ONLY
pbrook2e70f6e2008-06-29 01:03:05 +00001578 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001579 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001580 cpu_abort(env, "Raised interrupt while not in I/O function");
1581 }
1582#endif
1583 } else {
aurel323098dba2009-03-07 21:28:24 +00001584 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001585 }
1586}
1587
bellardb54ad042004-05-20 13:42:52 +00001588void cpu_reset_interrupt(CPUState *env, int mask)
1589{
1590 env->interrupt_request &= ~mask;
1591}
1592
aurel323098dba2009-03-07 21:28:24 +00001593void cpu_exit(CPUState *env)
1594{
1595 env->exit_request = 1;
1596 cpu_unlink_tb(env);
1597}
1598
blueswir1c7cd6a32008-10-02 18:27:46 +00001599const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001600 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001601 "show generated host assembly code for each compiled TB" },
1602 { CPU_LOG_TB_IN_ASM, "in_asm",
1603 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001604 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001605 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001606 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001607 "show micro ops "
1608#ifdef TARGET_I386
1609 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001610#endif
blueswir1e01a1152008-03-14 17:37:11 +00001611 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001612 { CPU_LOG_INT, "int",
1613 "show interrupts/exceptions in short format" },
1614 { CPU_LOG_EXEC, "exec",
1615 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001616 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001617 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001618#ifdef TARGET_I386
1619 { CPU_LOG_PCALL, "pcall",
1620 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001621 { CPU_LOG_RESET, "cpu_reset",
1622 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001623#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001624#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001625 { CPU_LOG_IOPORT, "ioport",
1626 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001627#endif
bellardf193c792004-03-21 17:06:25 +00001628 { 0, NULL, NULL },
1629};
1630
1631static int cmp1(const char *s1, int n, const char *s2)
1632{
1633 if (strlen(s2) != n)
1634 return 0;
1635 return memcmp(s1, s2, n) == 0;
1636}
ths3b46e622007-09-17 08:09:54 +00001637
bellardf193c792004-03-21 17:06:25 +00001638/* takes a comma separated list of log masks. Return 0 if error. */
1639int cpu_str_to_log_mask(const char *str)
1640{
blueswir1c7cd6a32008-10-02 18:27:46 +00001641 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001642 int mask;
1643 const char *p, *p1;
1644
1645 p = str;
1646 mask = 0;
1647 for(;;) {
1648 p1 = strchr(p, ',');
1649 if (!p1)
1650 p1 = p + strlen(p);
bellard8e3a9fd2004-10-09 17:32:58 +00001651 if(cmp1(p,p1-p,"all")) {
1652 for(item = cpu_log_items; item->mask != 0; item++) {
1653 mask |= item->mask;
1654 }
1655 } else {
bellardf193c792004-03-21 17:06:25 +00001656 for(item = cpu_log_items; item->mask != 0; item++) {
1657 if (cmp1(p, p1 - p, item->name))
1658 goto found;
1659 }
1660 return 0;
bellard8e3a9fd2004-10-09 17:32:58 +00001661 }
bellardf193c792004-03-21 17:06:25 +00001662 found:
1663 mask |= item->mask;
1664 if (*p1 != ',')
1665 break;
1666 p = p1 + 1;
1667 }
1668 return mask;
1669}
bellardea041c02003-06-25 16:16:50 +00001670
bellard75012672003-06-21 13:11:07 +00001671void cpu_abort(CPUState *env, const char *fmt, ...)
1672{
1673 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001674 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001675
1676 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001677 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001678 fprintf(stderr, "qemu: fatal: ");
1679 vfprintf(stderr, fmt, ap);
1680 fprintf(stderr, "\n");
1681#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001682 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1683#else
1684 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001685#endif
aliguori93fcfe32009-01-15 22:34:14 +00001686 if (qemu_log_enabled()) {
1687 qemu_log("qemu: fatal: ");
1688 qemu_log_vprintf(fmt, ap2);
1689 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001690#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001691 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001692#else
aliguori93fcfe32009-01-15 22:34:14 +00001693 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001694#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001695 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001696 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001697 }
pbrook493ae1f2007-11-23 16:53:59 +00001698 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001699 va_end(ap);
bellard75012672003-06-21 13:11:07 +00001700 abort();
1701}
1702
thsc5be9f02007-02-28 20:20:53 +00001703CPUState *cpu_copy(CPUState *env)
1704{
ths01ba9812007-12-09 02:22:57 +00001705 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001706 CPUState *next_cpu = new_env->next_cpu;
1707 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001708#if defined(TARGET_HAS_ICE)
1709 CPUBreakpoint *bp;
1710 CPUWatchpoint *wp;
1711#endif
1712
thsc5be9f02007-02-28 20:20:53 +00001713 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001714
1715 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001716 new_env->next_cpu = next_cpu;
1717 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001718
1719 /* Clone all break/watchpoints.
1720 Note: Once we support ptrace with hw-debug register access, make sure
1721 BP_CPU break/watchpoints are handled correctly on clone. */
1722 TAILQ_INIT(&env->breakpoints);
1723 TAILQ_INIT(&env->watchpoints);
1724#if defined(TARGET_HAS_ICE)
1725 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1726 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1727 }
1728 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1729 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1730 wp->flags, NULL);
1731 }
1732#endif
1733
thsc5be9f02007-02-28 20:20:53 +00001734 return new_env;
1735}
1736
bellard01243112004-01-04 15:48:17 +00001737#if !defined(CONFIG_USER_ONLY)
1738
edgar_igl5c751e92008-05-06 08:44:21 +00001739static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1740{
1741 unsigned int i;
1742
1743 /* Discard jump cache entries for any tb which might potentially
1744 overlap the flushed page. */
1745 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1746 memset (&env->tb_jmp_cache[i], 0,
1747 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1748
1749 i = tb_jmp_cache_hash_page(addr);
1750 memset (&env->tb_jmp_cache[i], 0,
1751 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1752}
1753
Igor Kovalenko08738982009-07-12 02:15:40 +04001754static CPUTLBEntry s_cputlb_empty_entry = {
1755 .addr_read = -1,
1756 .addr_write = -1,
1757 .addr_code = -1,
1758 .addend = -1,
1759};
1760
bellardee8b7022004-02-03 23:35:10 +00001761/* NOTE: if flush_global is true, also flush global entries (not
1762 implemented yet) */
1763void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001764{
bellard33417e72003-08-10 21:47:01 +00001765 int i;
bellard01243112004-01-04 15:48:17 +00001766
bellard9fa3e852004-01-04 18:06:42 +00001767#if defined(DEBUG_TLB)
1768 printf("tlb_flush:\n");
1769#endif
bellard01243112004-01-04 15:48:17 +00001770 /* must reset current TB so that interrupts cannot modify the
1771 links while we are modifying them */
1772 env->current_tb = NULL;
1773
bellard33417e72003-08-10 21:47:01 +00001774 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001775 int mmu_idx;
1776 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001777 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001778 }
bellard33417e72003-08-10 21:47:01 +00001779 }
bellard9fa3e852004-01-04 18:06:42 +00001780
bellard8a40a182005-11-20 10:35:40 +00001781 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001782
blueswir1640f42e2009-04-19 10:18:01 +00001783#ifdef CONFIG_KQEMU
bellard0a962c02005-02-10 22:00:27 +00001784 if (env->kqemu_enabled) {
1785 kqemu_flush(env, flush_global);
1786 }
1787#endif
bellarde3db7222005-01-26 22:00:47 +00001788 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001789}
1790
bellard274da6b2004-05-20 21:56:27 +00001791static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001792{
ths5fafdf22007-09-16 21:08:06 +00001793 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001794 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001795 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001796 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001797 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001798 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001799 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001800 }
bellard61382a52003-10-27 21:22:23 +00001801}
1802
bellard2e126692004-04-25 21:28:44 +00001803void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001804{
bellard8a40a182005-11-20 10:35:40 +00001805 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001806 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001807
bellard9fa3e852004-01-04 18:06:42 +00001808#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001809 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001810#endif
bellard01243112004-01-04 15:48:17 +00001811 /* must reset current TB so that interrupts cannot modify the
1812 links while we are modifying them */
1813 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001814
bellard61382a52003-10-27 21:22:23 +00001815 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001816 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001817 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1818 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001819
edgar_igl5c751e92008-05-06 08:44:21 +00001820 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001821
blueswir1640f42e2009-04-19 10:18:01 +00001822#ifdef CONFIG_KQEMU
bellard0a962c02005-02-10 22:00:27 +00001823 if (env->kqemu_enabled) {
1824 kqemu_flush_page(env, addr);
1825 }
1826#endif
bellard9fa3e852004-01-04 18:06:42 +00001827}
1828
bellard9fa3e852004-01-04 18:06:42 +00001829/* update the TLBs so that writes to code in the virtual page 'addr'
1830 can be detected */
bellard6a00d602005-11-21 23:25:50 +00001831static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001832{
ths5fafdf22007-09-16 21:08:06 +00001833 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001834 ram_addr + TARGET_PAGE_SIZE,
1835 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001836}
1837
bellard9fa3e852004-01-04 18:06:42 +00001838/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001839 tested for self modifying code */
ths5fafdf22007-09-16 21:08:06 +00001840static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001841 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001842{
bellard3a7d9292005-08-21 09:26:42 +00001843 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
bellard1ccde1c2004-02-06 19:46:14 +00001844}
1845
ths5fafdf22007-09-16 21:08:06 +00001846static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001847 unsigned long start, unsigned long length)
1848{
1849 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00001850 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1851 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001852 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001853 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001854 }
1855 }
1856}
1857
pbrook5579c7f2009-04-11 14:47:08 +00001858/* Note: start and end must be within the same ram block. */
bellard3a7d9292005-08-21 09:26:42 +00001859void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001860 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001861{
1862 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001863 unsigned long length, start1;
bellard0a962c02005-02-10 22:00:27 +00001864 int i, mask, len;
1865 uint8_t *p;
bellard1ccde1c2004-02-06 19:46:14 +00001866
1867 start &= TARGET_PAGE_MASK;
1868 end = TARGET_PAGE_ALIGN(end);
1869
1870 length = end - start;
1871 if (length == 0)
1872 return;
bellard0a962c02005-02-10 22:00:27 +00001873 len = length >> TARGET_PAGE_BITS;
blueswir1640f42e2009-04-19 10:18:01 +00001874#ifdef CONFIG_KQEMU
bellard6a00d602005-11-21 23:25:50 +00001875 /* XXX: should not depend on cpu context */
1876 env = first_cpu;
bellard3a7d9292005-08-21 09:26:42 +00001877 if (env->kqemu_enabled) {
bellardf23db162005-08-21 19:12:28 +00001878 ram_addr_t addr;
1879 addr = start;
1880 for(i = 0; i < len; i++) {
1881 kqemu_set_notdirty(env, addr);
1882 addr += TARGET_PAGE_SIZE;
1883 }
bellard3a7d9292005-08-21 09:26:42 +00001884 }
1885#endif
bellardf23db162005-08-21 19:12:28 +00001886 mask = ~dirty_flags;
1887 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1888 for(i = 0; i < len; i++)
1889 p[i] &= mask;
1890
bellard1ccde1c2004-02-06 19:46:14 +00001891 /* we modify the TLB cache so that the dirty bit will be set again
1892 when accessing the range */
pbrook5579c7f2009-04-11 14:47:08 +00001893 start1 = (unsigned long)qemu_get_ram_ptr(start);
1894 /* Chek that we don't span multiple blocks - this breaks the
1895 address comparisons below. */
1896 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1897 != (end - 1) - start) {
1898 abort();
1899 }
1900
bellard6a00d602005-11-21 23:25:50 +00001901 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001902 int mmu_idx;
1903 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1904 for(i = 0; i < CPU_TLB_SIZE; i++)
1905 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1906 start1, length);
1907 }
bellard6a00d602005-11-21 23:25:50 +00001908 }
bellard1ccde1c2004-02-06 19:46:14 +00001909}
1910
aliguori74576192008-10-06 14:02:03 +00001911int cpu_physical_memory_set_dirty_tracking(int enable)
1912{
1913 in_migration = enable;
Jan Kiszkab0a46a32009-05-02 00:22:51 +02001914 if (kvm_enabled()) {
1915 return kvm_set_migration_log(enable);
1916 }
aliguori74576192008-10-06 14:02:03 +00001917 return 0;
1918}
1919
1920int cpu_physical_memory_get_dirty_tracking(void)
1921{
1922 return in_migration;
1923}
1924
Jan Kiszka151f7742009-05-01 20:52:47 +02001925int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1926 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00001927{
Jan Kiszka151f7742009-05-01 20:52:47 +02001928 int ret = 0;
1929
aliguori2bec46d2008-11-24 20:21:41 +00001930 if (kvm_enabled())
Jan Kiszka151f7742009-05-01 20:52:47 +02001931 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1932 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00001933}
1934
bellard3a7d9292005-08-21 09:26:42 +00001935static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1936{
1937 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001938 void *p;
bellard3a7d9292005-08-21 09:26:42 +00001939
bellard84b7b8e2005-11-28 21:19:04 +00001940 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00001941 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1942 + tlb_entry->addend);
1943 ram_addr = qemu_ram_addr_from_host(p);
bellard3a7d9292005-08-21 09:26:42 +00001944 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00001945 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00001946 }
1947 }
1948}
1949
1950/* update the TLB according to the current state of the dirty bits */
1951void cpu_tlb_update_dirty(CPUState *env)
1952{
1953 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001954 int mmu_idx;
1955 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1956 for(i = 0; i < CPU_TLB_SIZE; i++)
1957 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1958 }
bellard3a7d9292005-08-21 09:26:42 +00001959}
1960
pbrook0f459d12008-06-09 00:20:13 +00001961static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00001962{
pbrook0f459d12008-06-09 00:20:13 +00001963 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1964 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00001965}
1966
pbrook0f459d12008-06-09 00:20:13 +00001967/* update the TLB corresponding to virtual page vaddr
1968 so that it is no longer dirty */
1969static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00001970{
bellard1ccde1c2004-02-06 19:46:14 +00001971 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001972 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00001973
pbrook0f459d12008-06-09 00:20:13 +00001974 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00001975 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001976 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1977 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00001978}
1979
bellard59817cc2004-02-16 22:01:13 +00001980/* add a new TLB entry. At most one entry for a given virtual address
1981 is permitted. Return 0 if OK or 2 if the page could not be mapped
1982 (can only happen in non SOFTMMU mode for I/O pages or pages
1983 conflicting with the host address space). */
ths5fafdf22007-09-16 21:08:06 +00001984int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1985 target_phys_addr_t paddr, int prot,
j_mayer6ebbf392007-10-14 07:07:08 +00001986 int mmu_idx, int is_softmmu)
bellard9fa3e852004-01-04 18:06:42 +00001987{
bellard92e873b2004-05-21 14:52:29 +00001988 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00001989 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00001990 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00001991 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00001992 target_ulong code_address;
bellard108c49b2005-07-24 12:55:09 +00001993 target_phys_addr_t addend;
bellard9fa3e852004-01-04 18:06:42 +00001994 int ret;
bellard84b7b8e2005-11-28 21:19:04 +00001995 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00001996 CPUWatchpoint *wp;
pbrook0f459d12008-06-09 00:20:13 +00001997 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00001998
bellard92e873b2004-05-21 14:52:29 +00001999 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002000 if (!p) {
2001 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002002 } else {
2003 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002004 }
2005#if defined(DEBUG_TLB)
j_mayer6ebbf392007-10-14 07:07:08 +00002006 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2007 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
bellard9fa3e852004-01-04 18:06:42 +00002008#endif
2009
2010 ret = 0;
pbrook0f459d12008-06-09 00:20:13 +00002011 address = vaddr;
2012 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2013 /* IO memory case (romd handled later) */
2014 address |= TLB_MMIO;
2015 }
pbrook5579c7f2009-04-11 14:47:08 +00002016 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002017 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2018 /* Normal RAM. */
2019 iotlb = pd & TARGET_PAGE_MASK;
2020 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2021 iotlb |= IO_MEM_NOTDIRTY;
2022 else
2023 iotlb |= IO_MEM_ROM;
2024 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002025 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002026 It would be nice to pass an offset from the base address
2027 of that region. This would avoid having to special case RAM,
2028 and avoid full address decoding in every device.
2029 We can't use the high bits of pd for this because
2030 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002031 iotlb = (pd & ~TARGET_PAGE_MASK);
2032 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002033 iotlb += p->region_offset;
2034 } else {
2035 iotlb += paddr;
2036 }
pbrook0f459d12008-06-09 00:20:13 +00002037 }
pbrook6658ffb2007-03-16 23:58:11 +00002038
pbrook0f459d12008-06-09 00:20:13 +00002039 code_address = address;
2040 /* Make accesses to pages with watchpoints go via the
2041 watchpoint trap routines. */
aliguoric0ce9982008-11-25 22:13:57 +00002042 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002043 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
pbrook0f459d12008-06-09 00:20:13 +00002044 iotlb = io_mem_watch + paddr;
2045 /* TODO: The memory case can be optimized by not trapping
2046 reads of pages with a write breakpoint. */
2047 address |= TLB_MMIO;
pbrook6658ffb2007-03-16 23:58:11 +00002048 }
pbrook0f459d12008-06-09 00:20:13 +00002049 }
balrogd79acba2007-06-26 20:01:13 +00002050
pbrook0f459d12008-06-09 00:20:13 +00002051 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2052 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2053 te = &env->tlb_table[mmu_idx][index];
2054 te->addend = addend - vaddr;
2055 if (prot & PAGE_READ) {
2056 te->addr_read = address;
2057 } else {
2058 te->addr_read = -1;
2059 }
edgar_igl5c751e92008-05-06 08:44:21 +00002060
pbrook0f459d12008-06-09 00:20:13 +00002061 if (prot & PAGE_EXEC) {
2062 te->addr_code = code_address;
2063 } else {
2064 te->addr_code = -1;
2065 }
2066 if (prot & PAGE_WRITE) {
2067 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2068 (pd & IO_MEM_ROMD)) {
2069 /* Write access calls the I/O callback. */
2070 te->addr_write = address | TLB_MMIO;
2071 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2072 !cpu_physical_memory_is_dirty(pd)) {
2073 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002074 } else {
pbrook0f459d12008-06-09 00:20:13 +00002075 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002076 }
pbrook0f459d12008-06-09 00:20:13 +00002077 } else {
2078 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002079 }
bellard9fa3e852004-01-04 18:06:42 +00002080 return ret;
2081}
2082
bellard01243112004-01-04 15:48:17 +00002083#else
2084
bellardee8b7022004-02-03 23:35:10 +00002085void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002086{
2087}
2088
bellard2e126692004-04-25 21:28:44 +00002089void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002090{
2091}
2092
ths5fafdf22007-09-16 21:08:06 +00002093int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2094 target_phys_addr_t paddr, int prot,
j_mayer6ebbf392007-10-14 07:07:08 +00002095 int mmu_idx, int is_softmmu)
bellard33417e72003-08-10 21:47:01 +00002096{
bellard9fa3e852004-01-04 18:06:42 +00002097 return 0;
2098}
bellard33417e72003-08-10 21:47:01 +00002099
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002100/*
2101 * Walks guest process memory "regions" one by one
2102 * and calls callback function 'fn' for each region.
2103 */
2104int walk_memory_regions(void *priv,
2105 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
bellard9fa3e852004-01-04 18:06:42 +00002106{
2107 unsigned long start, end;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002108 PageDesc *p = NULL;
bellard9fa3e852004-01-04 18:06:42 +00002109 int i, j, prot, prot1;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002110 int rc = 0;
bellard9fa3e852004-01-04 18:06:42 +00002111
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002112 start = end = -1;
bellard9fa3e852004-01-04 18:06:42 +00002113 prot = 0;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002114
2115 for (i = 0; i <= L1_SIZE; i++) {
2116 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2117 for (j = 0; j < L2_SIZE; j++) {
2118 prot1 = (p == NULL) ? 0 : p[j].flags;
2119 /*
2120 * "region" is one continuous chunk of memory
2121 * that has same protection flags set.
2122 */
bellard9fa3e852004-01-04 18:06:42 +00002123 if (prot1 != prot) {
2124 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2125 if (start != -1) {
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002126 rc = (*fn)(priv, start, end, prot);
2127 /* callback can stop iteration by returning != 0 */
2128 if (rc != 0)
2129 return (rc);
bellard9fa3e852004-01-04 18:06:42 +00002130 }
2131 if (prot1 != 0)
2132 start = end;
2133 else
2134 start = -1;
2135 prot = prot1;
2136 }
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002137 if (p == NULL)
bellard9fa3e852004-01-04 18:06:42 +00002138 break;
2139 }
bellard33417e72003-08-10 21:47:01 +00002140 }
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002141 return (rc);
2142}
2143
2144static int dump_region(void *priv, unsigned long start,
2145 unsigned long end, unsigned long prot)
2146{
2147 FILE *f = (FILE *)priv;
2148
2149 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2150 start, end, end - start,
2151 ((prot & PAGE_READ) ? 'r' : '-'),
2152 ((prot & PAGE_WRITE) ? 'w' : '-'),
2153 ((prot & PAGE_EXEC) ? 'x' : '-'));
2154
2155 return (0);
2156}
2157
2158/* dump memory mappings */
2159void page_dump(FILE *f)
2160{
2161 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2162 "start", "end", "size", "prot");
2163 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002164}
2165
pbrook53a59602006-03-25 19:31:22 +00002166int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002167{
bellard9fa3e852004-01-04 18:06:42 +00002168 PageDesc *p;
2169
2170 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002171 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002172 return 0;
2173 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002174}
2175
bellard9fa3e852004-01-04 18:06:42 +00002176/* modify the flags of a page and invalidate the code if
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002177 necessary. The flag PAGE_WRITE_ORG is positioned automatically
bellard9fa3e852004-01-04 18:06:42 +00002178 depending on PAGE_WRITE */
pbrook53a59602006-03-25 19:31:22 +00002179void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002180{
2181 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002182 target_ulong addr;
bellard9fa3e852004-01-04 18:06:42 +00002183
pbrookc8a706f2008-06-02 16:16:42 +00002184 /* mmap_lock should already be held. */
bellard9fa3e852004-01-04 18:06:42 +00002185 start = start & TARGET_PAGE_MASK;
2186 end = TARGET_PAGE_ALIGN(end);
2187 if (flags & PAGE_WRITE)
2188 flags |= PAGE_WRITE_ORG;
bellard9fa3e852004-01-04 18:06:42 +00002189 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2190 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
pbrook17e23772008-06-09 13:47:45 +00002191 /* We may be called for host regions that are outside guest
2192 address space. */
2193 if (!p)
2194 return;
bellard9fa3e852004-01-04 18:06:42 +00002195 /* if the write protection is set, then we invalidate the code
2196 inside */
ths5fafdf22007-09-16 21:08:06 +00002197 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002198 (flags & PAGE_WRITE) &&
2199 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002200 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002201 }
2202 p->flags = flags;
2203 }
bellard9fa3e852004-01-04 18:06:42 +00002204}
2205
ths3d97b402007-11-02 19:02:07 +00002206int page_check_range(target_ulong start, target_ulong len, int flags)
2207{
2208 PageDesc *p;
2209 target_ulong end;
2210 target_ulong addr;
2211
balrog55f280c2008-10-28 10:24:11 +00002212 if (start + len < start)
2213 /* we've wrapped around */
2214 return -1;
2215
ths3d97b402007-11-02 19:02:07 +00002216 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2217 start = start & TARGET_PAGE_MASK;
2218
ths3d97b402007-11-02 19:02:07 +00002219 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2220 p = page_find(addr >> TARGET_PAGE_BITS);
2221 if( !p )
2222 return -1;
2223 if( !(p->flags & PAGE_VALID) )
2224 return -1;
2225
bellarddae32702007-11-14 10:51:00 +00002226 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002227 return -1;
bellarddae32702007-11-14 10:51:00 +00002228 if (flags & PAGE_WRITE) {
2229 if (!(p->flags & PAGE_WRITE_ORG))
2230 return -1;
2231 /* unprotect the page if it was put read-only because it
2232 contains translated code */
2233 if (!(p->flags & PAGE_WRITE)) {
2234 if (!page_unprotect(addr, 0, NULL))
2235 return -1;
2236 }
2237 return 0;
2238 }
ths3d97b402007-11-02 19:02:07 +00002239 }
2240 return 0;
2241}
2242
bellard9fa3e852004-01-04 18:06:42 +00002243/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002244 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002245int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002246{
2247 unsigned int page_index, prot, pindex;
2248 PageDesc *p, *p1;
pbrook53a59602006-03-25 19:31:22 +00002249 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002250
pbrookc8a706f2008-06-02 16:16:42 +00002251 /* Technically this isn't safe inside a signal handler. However we
2252 know this only ever happens in a synchronous SEGV handler, so in
2253 practice it seems to be ok. */
2254 mmap_lock();
2255
bellard83fb7ad2004-07-05 21:25:26 +00002256 host_start = address & qemu_host_page_mask;
bellard9fa3e852004-01-04 18:06:42 +00002257 page_index = host_start >> TARGET_PAGE_BITS;
2258 p1 = page_find(page_index);
pbrookc8a706f2008-06-02 16:16:42 +00002259 if (!p1) {
2260 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002261 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002262 }
bellard83fb7ad2004-07-05 21:25:26 +00002263 host_end = host_start + qemu_host_page_size;
bellard9fa3e852004-01-04 18:06:42 +00002264 p = p1;
2265 prot = 0;
2266 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2267 prot |= p->flags;
2268 p++;
2269 }
2270 /* if the page was really writable, then we change its
2271 protection back to writable */
2272 if (prot & PAGE_WRITE_ORG) {
2273 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2274 if (!(p1[pindex].flags & PAGE_WRITE)) {
ths5fafdf22007-09-16 21:08:06 +00002275 mprotect((void *)g2h(host_start), qemu_host_page_size,
bellard9fa3e852004-01-04 18:06:42 +00002276 (prot & PAGE_BITS) | PAGE_WRITE);
2277 p1[pindex].flags |= PAGE_WRITE;
2278 /* and since the content will be modified, we must invalidate
2279 the corresponding translated code. */
bellardd720b932004-04-25 17:57:43 +00002280 tb_invalidate_phys_page(address, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002281#ifdef DEBUG_TB_CHECK
2282 tb_invalidate_check(address);
2283#endif
pbrookc8a706f2008-06-02 16:16:42 +00002284 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002285 return 1;
2286 }
2287 }
pbrookc8a706f2008-06-02 16:16:42 +00002288 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002289 return 0;
2290}
2291
bellard6a00d602005-11-21 23:25:50 +00002292static inline void tlb_set_dirty(CPUState *env,
2293 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002294{
2295}
bellard9fa3e852004-01-04 18:06:42 +00002296#endif /* defined(CONFIG_USER_ONLY) */
2297
pbrooke2eef172008-06-08 01:09:01 +00002298#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002299
blueswir1db7b5422007-05-26 17:36:03 +00002300static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
pbrook8da3ff12008-12-01 18:59:50 +00002301 ram_addr_t memory, ram_addr_t region_offset);
aurel3200f82b82008-04-27 21:12:55 +00002302static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
pbrook8da3ff12008-12-01 18:59:50 +00002303 ram_addr_t orig_memory, ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002304#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2305 need_subpage) \
2306 do { \
2307 if (addr > start_addr) \
2308 start_addr2 = 0; \
2309 else { \
2310 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2311 if (start_addr2 > 0) \
2312 need_subpage = 1; \
2313 } \
2314 \
blueswir149e9fba2007-05-30 17:25:06 +00002315 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002316 end_addr2 = TARGET_PAGE_SIZE - 1; \
2317 else { \
2318 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2319 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2320 need_subpage = 1; \
2321 } \
2322 } while (0)
2323
bellard33417e72003-08-10 21:47:01 +00002324/* register physical memory. 'size' must be a multiple of the target
2325 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002326 io memory page. The address used when calling the IO function is
2327 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002328 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002329 before calculating this offset. This should not be a problem unless
2330 the low bits of start_addr and region_offset differ. */
2331void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2332 ram_addr_t size,
2333 ram_addr_t phys_offset,
2334 ram_addr_t region_offset)
bellard33417e72003-08-10 21:47:01 +00002335{
bellard108c49b2005-07-24 12:55:09 +00002336 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002337 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002338 CPUState *env;
aurel3200f82b82008-04-27 21:12:55 +00002339 ram_addr_t orig_size = size;
blueswir1db7b5422007-05-26 17:36:03 +00002340 void *subpage;
bellard33417e72003-08-10 21:47:01 +00002341
blueswir1640f42e2009-04-19 10:18:01 +00002342#ifdef CONFIG_KQEMU
bellardda260242008-05-30 20:48:25 +00002343 /* XXX: should not depend on cpu context */
2344 env = first_cpu;
2345 if (env->kqemu_enabled) {
2346 kqemu_set_phys_mem(start_addr, size, phys_offset);
2347 }
2348#endif
aliguori7ba1e612008-11-05 16:04:33 +00002349 if (kvm_enabled())
2350 kvm_set_phys_mem(start_addr, size, phys_offset);
2351
pbrook67c4d232009-02-23 13:16:07 +00002352 if (phys_offset == IO_MEM_UNASSIGNED) {
2353 region_offset = start_addr;
2354 }
pbrook8da3ff12008-12-01 18:59:50 +00002355 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002356 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
blueswir149e9fba2007-05-30 17:25:06 +00002357 end_addr = start_addr + (target_phys_addr_t)size;
2358 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
blueswir1db7b5422007-05-26 17:36:03 +00002359 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2360 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
aurel3200f82b82008-04-27 21:12:55 +00002361 ram_addr_t orig_memory = p->phys_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002362 target_phys_addr_t start_addr2, end_addr2;
2363 int need_subpage = 0;
2364
2365 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2366 need_subpage);
blueswir14254fab2008-01-01 16:57:19 +00002367 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
blueswir1db7b5422007-05-26 17:36:03 +00002368 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2369 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002370 &p->phys_offset, orig_memory,
2371 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002372 } else {
2373 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2374 >> IO_MEM_SHIFT];
2375 }
pbrook8da3ff12008-12-01 18:59:50 +00002376 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2377 region_offset);
2378 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002379 } else {
2380 p->phys_offset = phys_offset;
2381 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2382 (phys_offset & IO_MEM_ROMD))
2383 phys_offset += TARGET_PAGE_SIZE;
2384 }
2385 } else {
2386 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2387 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002388 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002389 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002390 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002391 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002392 } else {
blueswir1db7b5422007-05-26 17:36:03 +00002393 target_phys_addr_t start_addr2, end_addr2;
2394 int need_subpage = 0;
2395
2396 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2397 end_addr2, need_subpage);
2398
blueswir14254fab2008-01-01 16:57:19 +00002399 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
blueswir1db7b5422007-05-26 17:36:03 +00002400 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002401 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002402 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002403 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002404 phys_offset, region_offset);
2405 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002406 }
2407 }
2408 }
pbrook8da3ff12008-12-01 18:59:50 +00002409 region_offset += TARGET_PAGE_SIZE;
bellard33417e72003-08-10 21:47:01 +00002410 }
ths3b46e622007-09-17 08:09:54 +00002411
bellard9d420372006-06-25 22:25:22 +00002412 /* since each CPU stores ram addresses in its TLB cache, we must
2413 reset the modified entries */
2414 /* XXX: slow ! */
2415 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2416 tlb_flush(env, 1);
2417 }
bellard33417e72003-08-10 21:47:01 +00002418}
2419
bellardba863452006-09-24 18:41:10 +00002420/* XXX: temporary until new memory mapping API */
aurel3200f82b82008-04-27 21:12:55 +00002421ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002422{
2423 PhysPageDesc *p;
2424
2425 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2426 if (!p)
2427 return IO_MEM_UNASSIGNED;
2428 return p->phys_offset;
2429}
2430
aliguorif65ed4c2008-12-09 20:09:57 +00002431void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2432{
2433 if (kvm_enabled())
2434 kvm_coalesce_mmio_region(addr, size);
2435}
2436
2437void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2438{
2439 if (kvm_enabled())
2440 kvm_uncoalesce_mmio_region(addr, size);
2441}
2442
blueswir1640f42e2009-04-19 10:18:01 +00002443#ifdef CONFIG_KQEMU
bellarde9a1ab12007-02-08 23:08:38 +00002444/* XXX: better than nothing */
pbrook94a6b542009-04-11 17:15:54 +00002445static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
bellarde9a1ab12007-02-08 23:08:38 +00002446{
2447 ram_addr_t addr;
pbrook94a6b542009-04-11 17:15:54 +00002448 if ((last_ram_offset + size) > kqemu_phys_ram_size) {
ths012a7042008-10-02 17:34:21 +00002449 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
pbrook94a6b542009-04-11 17:15:54 +00002450 (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
bellarde9a1ab12007-02-08 23:08:38 +00002451 abort();
2452 }
pbrook94a6b542009-04-11 17:15:54 +00002453 addr = last_ram_offset;
2454 last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
bellarde9a1ab12007-02-08 23:08:38 +00002455 return addr;
2456}
pbrook94a6b542009-04-11 17:15:54 +00002457#endif
2458
2459ram_addr_t qemu_ram_alloc(ram_addr_t size)
2460{
2461 RAMBlock *new_block;
2462
blueswir1640f42e2009-04-19 10:18:01 +00002463#ifdef CONFIG_KQEMU
pbrook94a6b542009-04-11 17:15:54 +00002464 if (kqemu_phys_ram_base) {
2465 return kqemu_ram_alloc(size);
2466 }
2467#endif
2468
2469 size = TARGET_PAGE_ALIGN(size);
2470 new_block = qemu_malloc(sizeof(*new_block));
2471
2472 new_block->host = qemu_vmalloc(size);
2473 new_block->offset = last_ram_offset;
2474 new_block->length = size;
2475
2476 new_block->next = ram_blocks;
2477 ram_blocks = new_block;
2478
2479 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2480 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2481 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2482 0xff, size >> TARGET_PAGE_BITS);
2483
2484 last_ram_offset += size;
2485
Jan Kiszka6f0437e2009-04-26 18:03:40 +02002486 if (kvm_enabled())
2487 kvm_setup_guest_memory(new_block->host, size);
2488
pbrook94a6b542009-04-11 17:15:54 +00002489 return new_block->offset;
2490}
bellarde9a1ab12007-02-08 23:08:38 +00002491
2492void qemu_ram_free(ram_addr_t addr)
2493{
pbrook94a6b542009-04-11 17:15:54 +00002494 /* TODO: implement this. */
bellarde9a1ab12007-02-08 23:08:38 +00002495}
2496
pbrookdc828ca2009-04-09 22:21:07 +00002497/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002498 With the exception of the softmmu code in this file, this should
2499 only be used for local memory (e.g. video ram) that the device owns,
2500 and knows it isn't going to access beyond the end of the block.
2501
2502 It should not be used for general purpose DMA.
2503 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2504 */
pbrookdc828ca2009-04-09 22:21:07 +00002505void *qemu_get_ram_ptr(ram_addr_t addr)
2506{
pbrook94a6b542009-04-11 17:15:54 +00002507 RAMBlock *prev;
2508 RAMBlock **prevp;
2509 RAMBlock *block;
2510
blueswir1640f42e2009-04-19 10:18:01 +00002511#ifdef CONFIG_KQEMU
pbrook94a6b542009-04-11 17:15:54 +00002512 if (kqemu_phys_ram_base) {
2513 return kqemu_phys_ram_base + addr;
2514 }
2515#endif
2516
2517 prev = NULL;
2518 prevp = &ram_blocks;
2519 block = ram_blocks;
2520 while (block && (block->offset > addr
2521 || block->offset + block->length <= addr)) {
2522 if (prev)
2523 prevp = &prev->next;
2524 prev = block;
2525 block = block->next;
2526 }
2527 if (!block) {
2528 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2529 abort();
2530 }
2531 /* Move this entry to to start of the list. */
2532 if (prev) {
2533 prev->next = block->next;
2534 block->next = *prevp;
2535 *prevp = block;
2536 }
2537 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00002538}
2539
pbrook5579c7f2009-04-11 14:47:08 +00002540/* Some of the softmmu routines need to translate from a host pointer
2541 (typically a TLB entry) back to a ram offset. */
2542ram_addr_t qemu_ram_addr_from_host(void *ptr)
2543{
pbrook94a6b542009-04-11 17:15:54 +00002544 RAMBlock *prev;
2545 RAMBlock **prevp;
2546 RAMBlock *block;
2547 uint8_t *host = ptr;
2548
blueswir1640f42e2009-04-19 10:18:01 +00002549#ifdef CONFIG_KQEMU
pbrook94a6b542009-04-11 17:15:54 +00002550 if (kqemu_phys_ram_base) {
2551 return host - kqemu_phys_ram_base;
2552 }
2553#endif
2554
2555 prev = NULL;
2556 prevp = &ram_blocks;
2557 block = ram_blocks;
2558 while (block && (block->host > host
2559 || block->host + block->length <= host)) {
2560 if (prev)
2561 prevp = &prev->next;
2562 prev = block;
2563 block = block->next;
2564 }
2565 if (!block) {
2566 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2567 abort();
2568 }
2569 return block->offset + (host - block->host);
pbrook5579c7f2009-04-11 14:47:08 +00002570}
2571
bellarda4193c82004-06-03 14:01:43 +00002572static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00002573{
pbrook67d3b952006-12-18 05:03:52 +00002574#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002575 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002576#endif
edgar_igl0a6f8a62008-12-29 14:39:57 +00002577#if defined(TARGET_SPARC)
blueswir1e18231a2008-10-06 18:46:28 +00002578 do_unassigned_access(addr, 0, 0, 0, 1);
2579#endif
2580 return 0;
2581}
2582
2583static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2584{
2585#ifdef DEBUG_UNASSIGNED
2586 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2587#endif
edgar_igl0a6f8a62008-12-29 14:39:57 +00002588#if defined(TARGET_SPARC)
blueswir1e18231a2008-10-06 18:46:28 +00002589 do_unassigned_access(addr, 0, 0, 0, 2);
2590#endif
2591 return 0;
2592}
2593
2594static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2595{
2596#ifdef DEBUG_UNASSIGNED
2597 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2598#endif
edgar_igl0a6f8a62008-12-29 14:39:57 +00002599#if defined(TARGET_SPARC)
blueswir1e18231a2008-10-06 18:46:28 +00002600 do_unassigned_access(addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002601#endif
bellard33417e72003-08-10 21:47:01 +00002602 return 0;
2603}
2604
bellarda4193c82004-06-03 14:01:43 +00002605static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00002606{
pbrook67d3b952006-12-18 05:03:52 +00002607#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002608 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00002609#endif
edgar_igl0a6f8a62008-12-29 14:39:57 +00002610#if defined(TARGET_SPARC)
blueswir1e18231a2008-10-06 18:46:28 +00002611 do_unassigned_access(addr, 1, 0, 0, 1);
2612#endif
2613}
2614
2615static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2616{
2617#ifdef DEBUG_UNASSIGNED
2618 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2619#endif
edgar_igl0a6f8a62008-12-29 14:39:57 +00002620#if defined(TARGET_SPARC)
blueswir1e18231a2008-10-06 18:46:28 +00002621 do_unassigned_access(addr, 1, 0, 0, 2);
2622#endif
2623}
2624
2625static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2626{
2627#ifdef DEBUG_UNASSIGNED
2628 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2629#endif
edgar_igl0a6f8a62008-12-29 14:39:57 +00002630#if defined(TARGET_SPARC)
blueswir1e18231a2008-10-06 18:46:28 +00002631 do_unassigned_access(addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002632#endif
bellard33417e72003-08-10 21:47:01 +00002633}
2634
2635static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2636 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00002637 unassigned_mem_readw,
2638 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00002639};
2640
2641static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2642 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00002643 unassigned_mem_writew,
2644 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00002645};
2646
pbrook0f459d12008-06-09 00:20:13 +00002647static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2648 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002649{
bellard3a7d9292005-08-21 09:26:42 +00002650 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002651 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2652 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2653#if !defined(CONFIG_USER_ONLY)
2654 tb_invalidate_phys_page_fast(ram_addr, 1);
2655 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2656#endif
2657 }
pbrook5579c7f2009-04-11 14:47:08 +00002658 stb_p(qemu_get_ram_ptr(ram_addr), val);
blueswir1640f42e2009-04-19 10:18:01 +00002659#ifdef CONFIG_KQEMU
bellardf32fc642006-02-08 22:43:39 +00002660 if (cpu_single_env->kqemu_enabled &&
2661 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2662 kqemu_modify_page(cpu_single_env, ram_addr);
2663#endif
bellardf23db162005-08-21 19:12:28 +00002664 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2665 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2666 /* we remove the notdirty callback only if the code has been
2667 flushed */
2668 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002669 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002670}
2671
pbrook0f459d12008-06-09 00:20:13 +00002672static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2673 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002674{
bellard3a7d9292005-08-21 09:26:42 +00002675 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002676 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2677 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2678#if !defined(CONFIG_USER_ONLY)
2679 tb_invalidate_phys_page_fast(ram_addr, 2);
2680 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2681#endif
2682 }
pbrook5579c7f2009-04-11 14:47:08 +00002683 stw_p(qemu_get_ram_ptr(ram_addr), val);
blueswir1640f42e2009-04-19 10:18:01 +00002684#ifdef CONFIG_KQEMU
bellardf32fc642006-02-08 22:43:39 +00002685 if (cpu_single_env->kqemu_enabled &&
2686 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2687 kqemu_modify_page(cpu_single_env, ram_addr);
2688#endif
bellardf23db162005-08-21 19:12:28 +00002689 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2690 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2691 /* we remove the notdirty callback only if the code has been
2692 flushed */
2693 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002694 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002695}
2696
pbrook0f459d12008-06-09 00:20:13 +00002697static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2698 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002699{
bellard3a7d9292005-08-21 09:26:42 +00002700 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002701 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2702 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2703#if !defined(CONFIG_USER_ONLY)
2704 tb_invalidate_phys_page_fast(ram_addr, 4);
2705 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2706#endif
2707 }
pbrook5579c7f2009-04-11 14:47:08 +00002708 stl_p(qemu_get_ram_ptr(ram_addr), val);
blueswir1640f42e2009-04-19 10:18:01 +00002709#ifdef CONFIG_KQEMU
bellardf32fc642006-02-08 22:43:39 +00002710 if (cpu_single_env->kqemu_enabled &&
2711 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2712 kqemu_modify_page(cpu_single_env, ram_addr);
2713#endif
bellardf23db162005-08-21 19:12:28 +00002714 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2715 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2716 /* we remove the notdirty callback only if the code has been
2717 flushed */
2718 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002719 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002720}
2721
bellard3a7d9292005-08-21 09:26:42 +00002722static CPUReadMemoryFunc *error_mem_read[3] = {
2723 NULL, /* never used */
2724 NULL, /* never used */
2725 NULL, /* never used */
2726};
2727
bellard1ccde1c2004-02-06 19:46:14 +00002728static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2729 notdirty_mem_writeb,
2730 notdirty_mem_writew,
2731 notdirty_mem_writel,
2732};
2733
pbrook0f459d12008-06-09 00:20:13 +00002734/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002735static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002736{
2737 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002738 target_ulong pc, cs_base;
2739 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002740 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002741 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002742 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002743
aliguori06d55cc2008-11-18 20:24:06 +00002744 if (env->watchpoint_hit) {
2745 /* We re-entered the check after replacing the TB. Now raise
2746 * the debug interrupt so that is will trigger after the
2747 * current instruction. */
2748 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2749 return;
2750 }
pbrook2e70f6e2008-06-29 01:03:05 +00002751 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
aliguoric0ce9982008-11-25 22:13:57 +00002752 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002753 if ((vaddr == (wp->vaddr & len_mask) ||
2754 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002755 wp->flags |= BP_WATCHPOINT_HIT;
2756 if (!env->watchpoint_hit) {
2757 env->watchpoint_hit = wp;
2758 tb = tb_find_pc(env->mem_io_pc);
2759 if (!tb) {
2760 cpu_abort(env, "check_watchpoint: could not find TB for "
2761 "pc=%p", (void *)env->mem_io_pc);
2762 }
2763 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2764 tb_phys_invalidate(tb, -1);
2765 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2766 env->exception_index = EXCP_DEBUG;
2767 } else {
2768 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2769 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2770 }
2771 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00002772 }
aliguori6e140f22008-11-18 20:37:55 +00002773 } else {
2774 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002775 }
2776 }
2777}
2778
pbrook6658ffb2007-03-16 23:58:11 +00002779/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2780 so these check for a hit then pass through to the normal out-of-line
2781 phys routines. */
2782static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2783{
aliguorib4051332008-11-18 20:14:20 +00002784 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002785 return ldub_phys(addr);
2786}
2787
2788static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2789{
aliguorib4051332008-11-18 20:14:20 +00002790 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002791 return lduw_phys(addr);
2792}
2793
2794static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2795{
aliguorib4051332008-11-18 20:14:20 +00002796 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002797 return ldl_phys(addr);
2798}
2799
pbrook6658ffb2007-03-16 23:58:11 +00002800static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2801 uint32_t val)
2802{
aliguorib4051332008-11-18 20:14:20 +00002803 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002804 stb_phys(addr, val);
2805}
2806
2807static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2808 uint32_t val)
2809{
aliguorib4051332008-11-18 20:14:20 +00002810 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002811 stw_phys(addr, val);
2812}
2813
2814static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2815 uint32_t val)
2816{
aliguorib4051332008-11-18 20:14:20 +00002817 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002818 stl_phys(addr, val);
2819}
2820
2821static CPUReadMemoryFunc *watch_mem_read[3] = {
2822 watch_mem_readb,
2823 watch_mem_readw,
2824 watch_mem_readl,
2825};
2826
2827static CPUWriteMemoryFunc *watch_mem_write[3] = {
2828 watch_mem_writeb,
2829 watch_mem_writew,
2830 watch_mem_writel,
2831};
pbrook6658ffb2007-03-16 23:58:11 +00002832
blueswir1db7b5422007-05-26 17:36:03 +00002833static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2834 unsigned int len)
2835{
blueswir1db7b5422007-05-26 17:36:03 +00002836 uint32_t ret;
2837 unsigned int idx;
2838
pbrook8da3ff12008-12-01 18:59:50 +00002839 idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00002840#if defined(DEBUG_SUBPAGE)
2841 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2842 mmio, len, addr, idx);
2843#endif
pbrook8da3ff12008-12-01 18:59:50 +00002844 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2845 addr + mmio->region_offset[idx][0][len]);
blueswir1db7b5422007-05-26 17:36:03 +00002846
2847 return ret;
2848}
2849
2850static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2851 uint32_t value, unsigned int len)
2852{
blueswir1db7b5422007-05-26 17:36:03 +00002853 unsigned int idx;
2854
pbrook8da3ff12008-12-01 18:59:50 +00002855 idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00002856#if defined(DEBUG_SUBPAGE)
2857 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2858 mmio, len, addr, idx, value);
2859#endif
pbrook8da3ff12008-12-01 18:59:50 +00002860 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2861 addr + mmio->region_offset[idx][1][len],
2862 value);
blueswir1db7b5422007-05-26 17:36:03 +00002863}
2864
2865static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2866{
2867#if defined(DEBUG_SUBPAGE)
2868 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2869#endif
2870
2871 return subpage_readlen(opaque, addr, 0);
2872}
2873
2874static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2875 uint32_t value)
2876{
2877#if defined(DEBUG_SUBPAGE)
2878 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2879#endif
2880 subpage_writelen(opaque, addr, value, 0);
2881}
2882
2883static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2884{
2885#if defined(DEBUG_SUBPAGE)
2886 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2887#endif
2888
2889 return subpage_readlen(opaque, addr, 1);
2890}
2891
2892static void subpage_writew (void *opaque, target_phys_addr_t addr,
2893 uint32_t value)
2894{
2895#if defined(DEBUG_SUBPAGE)
2896 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2897#endif
2898 subpage_writelen(opaque, addr, value, 1);
2899}
2900
2901static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2902{
2903#if defined(DEBUG_SUBPAGE)
2904 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2905#endif
2906
2907 return subpage_readlen(opaque, addr, 2);
2908}
2909
2910static void subpage_writel (void *opaque,
2911 target_phys_addr_t addr, uint32_t value)
2912{
2913#if defined(DEBUG_SUBPAGE)
2914 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2915#endif
2916 subpage_writelen(opaque, addr, value, 2);
2917}
2918
2919static CPUReadMemoryFunc *subpage_read[] = {
2920 &subpage_readb,
2921 &subpage_readw,
2922 &subpage_readl,
2923};
2924
2925static CPUWriteMemoryFunc *subpage_write[] = {
2926 &subpage_writeb,
2927 &subpage_writew,
2928 &subpage_writel,
2929};
2930
2931static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
pbrook8da3ff12008-12-01 18:59:50 +00002932 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00002933{
2934 int idx, eidx;
blueswir14254fab2008-01-01 16:57:19 +00002935 unsigned int i;
blueswir1db7b5422007-05-26 17:36:03 +00002936
2937 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2938 return -1;
2939 idx = SUBPAGE_IDX(start);
2940 eidx = SUBPAGE_IDX(end);
2941#if defined(DEBUG_SUBPAGE)
2942 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2943 mmio, start, end, idx, eidx, memory);
2944#endif
2945 memory >>= IO_MEM_SHIFT;
2946 for (; idx <= eidx; idx++) {
blueswir14254fab2008-01-01 16:57:19 +00002947 for (i = 0; i < 4; i++) {
blueswir13ee89922008-01-02 19:45:26 +00002948 if (io_mem_read[memory][i]) {
2949 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2950 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
pbrook8da3ff12008-12-01 18:59:50 +00002951 mmio->region_offset[idx][0][i] = region_offset;
blueswir13ee89922008-01-02 19:45:26 +00002952 }
2953 if (io_mem_write[memory][i]) {
2954 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2955 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
pbrook8da3ff12008-12-01 18:59:50 +00002956 mmio->region_offset[idx][1][i] = region_offset;
blueswir13ee89922008-01-02 19:45:26 +00002957 }
blueswir14254fab2008-01-01 16:57:19 +00002958 }
blueswir1db7b5422007-05-26 17:36:03 +00002959 }
2960
2961 return 0;
2962}
2963
aurel3200f82b82008-04-27 21:12:55 +00002964static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
pbrook8da3ff12008-12-01 18:59:50 +00002965 ram_addr_t orig_memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00002966{
2967 subpage_t *mmio;
2968 int subpage_memory;
2969
2970 mmio = qemu_mallocz(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002971
2972 mmio->base = base;
Avi Kivity1eed09c2009-06-14 11:38:51 +03002973 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
blueswir1db7b5422007-05-26 17:36:03 +00002974#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00002975 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2976 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00002977#endif
aliguori1eec6142009-02-05 22:06:18 +00002978 *phys = subpage_memory | IO_MEM_SUBPAGE;
2979 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
pbrook8da3ff12008-12-01 18:59:50 +00002980 region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002981
2982 return mmio;
2983}
2984
aliguori88715652009-02-11 15:20:58 +00002985static int get_free_io_mem_idx(void)
2986{
2987 int i;
2988
2989 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2990 if (!io_mem_used[i]) {
2991 io_mem_used[i] = 1;
2992 return i;
2993 }
2994
2995 return -1;
2996}
2997
bellard33417e72003-08-10 21:47:01 +00002998/* mem_read and mem_write are arrays of functions containing the
2999 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003000 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003001 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003002 modified. If it is zero, a new io zone is allocated. The return
3003 value can be used with cpu_register_physical_memory(). (-1) is
3004 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003005static int cpu_register_io_memory_fixed(int io_index,
3006 CPUReadMemoryFunc **mem_read,
3007 CPUWriteMemoryFunc **mem_write,
3008 void *opaque)
bellard33417e72003-08-10 21:47:01 +00003009{
blueswir14254fab2008-01-01 16:57:19 +00003010 int i, subwidth = 0;
bellard33417e72003-08-10 21:47:01 +00003011
3012 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003013 io_index = get_free_io_mem_idx();
3014 if (io_index == -1)
3015 return io_index;
bellard33417e72003-08-10 21:47:01 +00003016 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003017 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003018 if (io_index >= IO_MEM_NB_ENTRIES)
3019 return -1;
3020 }
bellardb5ff1b32005-11-26 10:38:39 +00003021
bellard33417e72003-08-10 21:47:01 +00003022 for(i = 0;i < 3; i++) {
blueswir14254fab2008-01-01 16:57:19 +00003023 if (!mem_read[i] || !mem_write[i])
3024 subwidth = IO_MEM_SUBWIDTH;
bellard33417e72003-08-10 21:47:01 +00003025 io_mem_read[io_index][i] = mem_read[i];
3026 io_mem_write[io_index][i] = mem_write[i];
3027 }
bellarda4193c82004-06-03 14:01:43 +00003028 io_mem_opaque[io_index] = opaque;
blueswir14254fab2008-01-01 16:57:19 +00003029 return (io_index << IO_MEM_SHIFT) | subwidth;
bellard33417e72003-08-10 21:47:01 +00003030}
bellard61382a52003-10-27 21:22:23 +00003031
Avi Kivity1eed09c2009-06-14 11:38:51 +03003032int cpu_register_io_memory(CPUReadMemoryFunc **mem_read,
3033 CPUWriteMemoryFunc **mem_write,
3034 void *opaque)
3035{
3036 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3037}
3038
aliguori88715652009-02-11 15:20:58 +00003039void cpu_unregister_io_memory(int io_table_address)
3040{
3041 int i;
3042 int io_index = io_table_address >> IO_MEM_SHIFT;
3043
3044 for (i=0;i < 3; i++) {
3045 io_mem_read[io_index][i] = unassigned_mem_read[i];
3046 io_mem_write[io_index][i] = unassigned_mem_write[i];
3047 }
3048 io_mem_opaque[io_index] = NULL;
3049 io_mem_used[io_index] = 0;
3050}
3051
Avi Kivitye9179ce2009-06-14 11:38:52 +03003052static void io_mem_init(void)
3053{
3054 int i;
3055
3056 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3057 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3058 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3059 for (i=0; i<5; i++)
3060 io_mem_used[i] = 1;
3061
3062 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3063 watch_mem_write, NULL);
3064#ifdef CONFIG_KQEMU
3065 if (kqemu_phys_ram_base) {
3066 /* alloc dirty bits array */
3067 phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3068 memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3069 }
3070#endif
3071}
3072
pbrooke2eef172008-06-08 01:09:01 +00003073#endif /* !defined(CONFIG_USER_ONLY) */
3074
bellard13eb76e2004-01-24 15:23:36 +00003075/* physical memory access (slow version, mainly for debug) */
3076#if defined(CONFIG_USER_ONLY)
ths5fafdf22007-09-16 21:08:06 +00003077void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003078 int len, int is_write)
3079{
3080 int l, flags;
3081 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003082 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003083
3084 while (len > 0) {
3085 page = addr & TARGET_PAGE_MASK;
3086 l = (page + TARGET_PAGE_SIZE) - addr;
3087 if (l > len)
3088 l = len;
3089 flags = page_get_flags(page);
3090 if (!(flags & PAGE_VALID))
3091 return;
3092 if (is_write) {
3093 if (!(flags & PAGE_WRITE))
3094 return;
bellard579a97f2007-11-11 14:26:47 +00003095 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003096 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
bellard579a97f2007-11-11 14:26:47 +00003097 /* FIXME - should this return an error rather than just fail? */
3098 return;
aurel3272fb7da2008-04-27 23:53:45 +00003099 memcpy(p, buf, l);
3100 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003101 } else {
3102 if (!(flags & PAGE_READ))
3103 return;
bellard579a97f2007-11-11 14:26:47 +00003104 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003105 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
bellard579a97f2007-11-11 14:26:47 +00003106 /* FIXME - should this return an error rather than just fail? */
3107 return;
aurel3272fb7da2008-04-27 23:53:45 +00003108 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003109 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003110 }
3111 len -= l;
3112 buf += l;
3113 addr += l;
3114 }
3115}
bellard8df1cd02005-01-28 22:37:22 +00003116
bellard13eb76e2004-01-24 15:23:36 +00003117#else
ths5fafdf22007-09-16 21:08:06 +00003118void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003119 int len, int is_write)
3120{
3121 int l, io_index;
3122 uint8_t *ptr;
3123 uint32_t val;
bellard2e126692004-04-25 21:28:44 +00003124 target_phys_addr_t page;
3125 unsigned long pd;
bellard92e873b2004-05-21 14:52:29 +00003126 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003127
bellard13eb76e2004-01-24 15:23:36 +00003128 while (len > 0) {
3129 page = addr & TARGET_PAGE_MASK;
3130 l = (page + TARGET_PAGE_SIZE) - addr;
3131 if (l > len)
3132 l = len;
bellard92e873b2004-05-21 14:52:29 +00003133 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003134 if (!p) {
3135 pd = IO_MEM_UNASSIGNED;
3136 } else {
3137 pd = p->phys_offset;
3138 }
ths3b46e622007-09-17 08:09:54 +00003139
bellard13eb76e2004-01-24 15:23:36 +00003140 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003141 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
aurel326c2934d2009-02-18 21:37:17 +00003142 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003143 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003144 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003145 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003146 /* XXX: could force cpu_single_env to NULL to avoid
3147 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003148 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003149 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003150 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003151 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003152 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003153 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003154 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003155 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003156 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003157 l = 2;
3158 } else {
bellard1c213d12005-09-03 10:49:04 +00003159 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003160 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003161 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003162 l = 1;
3163 }
3164 } else {
bellardb448f2f2004-02-25 23:24:04 +00003165 unsigned long addr1;
3166 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003167 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003168 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003169 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003170 if (!cpu_physical_memory_is_dirty(addr1)) {
3171 /* invalidate code */
3172 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3173 /* set dirty bit */
ths5fafdf22007-09-16 21:08:06 +00003174 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
bellardf23db162005-08-21 19:12:28 +00003175 (0xff & ~CODE_DIRTY_FLAG);
bellard3a7d9292005-08-21 09:26:42 +00003176 }
bellard13eb76e2004-01-24 15:23:36 +00003177 }
3178 } else {
ths5fafdf22007-09-16 21:08:06 +00003179 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003180 !(pd & IO_MEM_ROMD)) {
aurel326c2934d2009-02-18 21:37:17 +00003181 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003182 /* I/O case */
3183 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003184 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003185 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3186 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003187 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003188 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003189 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003190 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003191 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003192 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003193 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003194 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003195 l = 2;
3196 } else {
bellard1c213d12005-09-03 10:49:04 +00003197 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003198 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003199 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003200 l = 1;
3201 }
3202 } else {
3203 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003204 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard13eb76e2004-01-24 15:23:36 +00003205 (addr & ~TARGET_PAGE_MASK);
3206 memcpy(buf, ptr, l);
3207 }
3208 }
3209 len -= l;
3210 buf += l;
3211 addr += l;
3212 }
3213}
bellard8df1cd02005-01-28 22:37:22 +00003214
bellardd0ecd2a2006-04-23 17:14:48 +00003215/* used for ROM loading : can write in RAM and ROM */
ths5fafdf22007-09-16 21:08:06 +00003216void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003217 const uint8_t *buf, int len)
3218{
3219 int l;
3220 uint8_t *ptr;
3221 target_phys_addr_t page;
3222 unsigned long pd;
3223 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003224
bellardd0ecd2a2006-04-23 17:14:48 +00003225 while (len > 0) {
3226 page = addr & TARGET_PAGE_MASK;
3227 l = (page + TARGET_PAGE_SIZE) - addr;
3228 if (l > len)
3229 l = len;
3230 p = phys_page_find(page >> TARGET_PAGE_BITS);
3231 if (!p) {
3232 pd = IO_MEM_UNASSIGNED;
3233 } else {
3234 pd = p->phys_offset;
3235 }
ths3b46e622007-09-17 08:09:54 +00003236
bellardd0ecd2a2006-04-23 17:14:48 +00003237 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00003238 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3239 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003240 /* do nothing */
3241 } else {
3242 unsigned long addr1;
3243 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3244 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003245 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003246 memcpy(ptr, buf, l);
3247 }
3248 len -= l;
3249 buf += l;
3250 addr += l;
3251 }
3252}
3253
aliguori6d16c2f2009-01-22 16:59:11 +00003254typedef struct {
3255 void *buffer;
3256 target_phys_addr_t addr;
3257 target_phys_addr_t len;
3258} BounceBuffer;
3259
3260static BounceBuffer bounce;
3261
aliguoriba223c22009-01-22 16:59:16 +00003262typedef struct MapClient {
3263 void *opaque;
3264 void (*callback)(void *opaque);
3265 LIST_ENTRY(MapClient) link;
3266} MapClient;
3267
3268static LIST_HEAD(map_client_list, MapClient) map_client_list
3269 = LIST_HEAD_INITIALIZER(map_client_list);
3270
3271void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3272{
3273 MapClient *client = qemu_malloc(sizeof(*client));
3274
3275 client->opaque = opaque;
3276 client->callback = callback;
3277 LIST_INSERT_HEAD(&map_client_list, client, link);
3278 return client;
3279}
3280
3281void cpu_unregister_map_client(void *_client)
3282{
3283 MapClient *client = (MapClient *)_client;
3284
3285 LIST_REMOVE(client, link);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003286 qemu_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003287}
3288
3289static void cpu_notify_map_clients(void)
3290{
3291 MapClient *client;
3292
3293 while (!LIST_EMPTY(&map_client_list)) {
3294 client = LIST_FIRST(&map_client_list);
3295 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003296 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003297 }
3298}
3299
aliguori6d16c2f2009-01-22 16:59:11 +00003300/* Map a physical memory region into a host virtual address.
3301 * May map a subset of the requested range, given by and returned in *plen.
3302 * May return NULL if resources needed to perform the mapping are exhausted.
3303 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003304 * Use cpu_register_map_client() to know when retrying the map operation is
3305 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003306 */
3307void *cpu_physical_memory_map(target_phys_addr_t addr,
3308 target_phys_addr_t *plen,
3309 int is_write)
3310{
3311 target_phys_addr_t len = *plen;
3312 target_phys_addr_t done = 0;
3313 int l;
3314 uint8_t *ret = NULL;
3315 uint8_t *ptr;
3316 target_phys_addr_t page;
3317 unsigned long pd;
3318 PhysPageDesc *p;
3319 unsigned long addr1;
3320
3321 while (len > 0) {
3322 page = addr & TARGET_PAGE_MASK;
3323 l = (page + TARGET_PAGE_SIZE) - addr;
3324 if (l > len)
3325 l = len;
3326 p = phys_page_find(page >> TARGET_PAGE_BITS);
3327 if (!p) {
3328 pd = IO_MEM_UNASSIGNED;
3329 } else {
3330 pd = p->phys_offset;
3331 }
3332
3333 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3334 if (done || bounce.buffer) {
3335 break;
3336 }
3337 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3338 bounce.addr = addr;
3339 bounce.len = l;
3340 if (!is_write) {
3341 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3342 }
3343 ptr = bounce.buffer;
3344 } else {
3345 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003346 ptr = qemu_get_ram_ptr(addr1);
aliguori6d16c2f2009-01-22 16:59:11 +00003347 }
3348 if (!done) {
3349 ret = ptr;
3350 } else if (ret + done != ptr) {
3351 break;
3352 }
3353
3354 len -= l;
3355 addr += l;
3356 done += l;
3357 }
3358 *plen = done;
3359 return ret;
3360}
3361
3362/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3363 * Will also mark the memory as dirty if is_write == 1. access_len gives
3364 * the amount of memory that was actually read or written by the caller.
3365 */
3366void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3367 int is_write, target_phys_addr_t access_len)
3368{
3369 if (buffer != bounce.buffer) {
3370 if (is_write) {
pbrook5579c7f2009-04-11 14:47:08 +00003371 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003372 while (access_len) {
3373 unsigned l;
3374 l = TARGET_PAGE_SIZE;
3375 if (l > access_len)
3376 l = access_len;
3377 if (!cpu_physical_memory_is_dirty(addr1)) {
3378 /* invalidate code */
3379 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3380 /* set dirty bit */
3381 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3382 (0xff & ~CODE_DIRTY_FLAG);
3383 }
3384 addr1 += l;
3385 access_len -= l;
3386 }
3387 }
3388 return;
3389 }
3390 if (is_write) {
3391 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3392 }
3393 qemu_free(bounce.buffer);
3394 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003395 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003396}
bellardd0ecd2a2006-04-23 17:14:48 +00003397
bellard8df1cd02005-01-28 22:37:22 +00003398/* warning: addr must be aligned */
3399uint32_t ldl_phys(target_phys_addr_t addr)
3400{
3401 int io_index;
3402 uint8_t *ptr;
3403 uint32_t val;
3404 unsigned long pd;
3405 PhysPageDesc *p;
3406
3407 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3408 if (!p) {
3409 pd = IO_MEM_UNASSIGNED;
3410 } else {
3411 pd = p->phys_offset;
3412 }
ths3b46e622007-09-17 08:09:54 +00003413
ths5fafdf22007-09-16 21:08:06 +00003414 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003415 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00003416 /* I/O case */
3417 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003418 if (p)
3419 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003420 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3421 } else {
3422 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003423 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00003424 (addr & ~TARGET_PAGE_MASK);
3425 val = ldl_p(ptr);
3426 }
3427 return val;
3428}
3429
bellard84b7b8e2005-11-28 21:19:04 +00003430/* warning: addr must be aligned */
3431uint64_t ldq_phys(target_phys_addr_t addr)
3432{
3433 int io_index;
3434 uint8_t *ptr;
3435 uint64_t val;
3436 unsigned long pd;
3437 PhysPageDesc *p;
3438
3439 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3440 if (!p) {
3441 pd = IO_MEM_UNASSIGNED;
3442 } else {
3443 pd = p->phys_offset;
3444 }
ths3b46e622007-09-17 08:09:54 +00003445
bellard2a4188a2006-06-25 21:54:59 +00003446 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3447 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00003448 /* I/O case */
3449 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003450 if (p)
3451 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard84b7b8e2005-11-28 21:19:04 +00003452#ifdef TARGET_WORDS_BIGENDIAN
3453 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3454 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3455#else
3456 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3457 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3458#endif
3459 } else {
3460 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003461 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00003462 (addr & ~TARGET_PAGE_MASK);
3463 val = ldq_p(ptr);
3464 }
3465 return val;
3466}
3467
bellardaab33092005-10-30 20:48:42 +00003468/* XXX: optimize */
3469uint32_t ldub_phys(target_phys_addr_t addr)
3470{
3471 uint8_t val;
3472 cpu_physical_memory_read(addr, &val, 1);
3473 return val;
3474}
3475
3476/* XXX: optimize */
3477uint32_t lduw_phys(target_phys_addr_t addr)
3478{
3479 uint16_t val;
3480 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3481 return tswap16(val);
3482}
3483
bellard8df1cd02005-01-28 22:37:22 +00003484/* warning: addr must be aligned. The ram page is not masked as dirty
3485 and the code inside is not invalidated. It is useful if the dirty
3486 bits are used to track modified PTEs */
3487void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3488{
3489 int io_index;
3490 uint8_t *ptr;
3491 unsigned long pd;
3492 PhysPageDesc *p;
3493
3494 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3495 if (!p) {
3496 pd = IO_MEM_UNASSIGNED;
3497 } else {
3498 pd = p->phys_offset;
3499 }
ths3b46e622007-09-17 08:09:54 +00003500
bellard3a7d9292005-08-21 09:26:42 +00003501 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003502 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003503 if (p)
3504 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003505 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3506 } else {
aliguori74576192008-10-06 14:02:03 +00003507 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003508 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003509 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003510
3511 if (unlikely(in_migration)) {
3512 if (!cpu_physical_memory_is_dirty(addr1)) {
3513 /* invalidate code */
3514 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3515 /* set dirty bit */
3516 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3517 (0xff & ~CODE_DIRTY_FLAG);
3518 }
3519 }
bellard8df1cd02005-01-28 22:37:22 +00003520 }
3521}
3522
j_mayerbc98a7e2007-04-04 07:55:12 +00003523void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3524{
3525 int io_index;
3526 uint8_t *ptr;
3527 unsigned long pd;
3528 PhysPageDesc *p;
3529
3530 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3531 if (!p) {
3532 pd = IO_MEM_UNASSIGNED;
3533 } else {
3534 pd = p->phys_offset;
3535 }
ths3b46e622007-09-17 08:09:54 +00003536
j_mayerbc98a7e2007-04-04 07:55:12 +00003537 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3538 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003539 if (p)
3540 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00003541#ifdef TARGET_WORDS_BIGENDIAN
3542 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3543 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3544#else
3545 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3546 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3547#endif
3548 } else {
pbrook5579c7f2009-04-11 14:47:08 +00003549 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00003550 (addr & ~TARGET_PAGE_MASK);
3551 stq_p(ptr, val);
3552 }
3553}
3554
bellard8df1cd02005-01-28 22:37:22 +00003555/* warning: addr must be aligned */
bellard8df1cd02005-01-28 22:37:22 +00003556void stl_phys(target_phys_addr_t addr, uint32_t val)
3557{
3558 int io_index;
3559 uint8_t *ptr;
3560 unsigned long pd;
3561 PhysPageDesc *p;
3562
3563 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3564 if (!p) {
3565 pd = IO_MEM_UNASSIGNED;
3566 } else {
3567 pd = p->phys_offset;
3568 }
ths3b46e622007-09-17 08:09:54 +00003569
bellard3a7d9292005-08-21 09:26:42 +00003570 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003571 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003572 if (p)
3573 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003574 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3575 } else {
3576 unsigned long addr1;
3577 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3578 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003579 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003580 stl_p(ptr, val);
bellard3a7d9292005-08-21 09:26:42 +00003581 if (!cpu_physical_memory_is_dirty(addr1)) {
3582 /* invalidate code */
3583 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3584 /* set dirty bit */
bellardf23db162005-08-21 19:12:28 +00003585 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3586 (0xff & ~CODE_DIRTY_FLAG);
bellard3a7d9292005-08-21 09:26:42 +00003587 }
bellard8df1cd02005-01-28 22:37:22 +00003588 }
3589}
3590
bellardaab33092005-10-30 20:48:42 +00003591/* XXX: optimize */
3592void stb_phys(target_phys_addr_t addr, uint32_t val)
3593{
3594 uint8_t v = val;
3595 cpu_physical_memory_write(addr, &v, 1);
3596}
3597
3598/* XXX: optimize */
3599void stw_phys(target_phys_addr_t addr, uint32_t val)
3600{
3601 uint16_t v = tswap16(val);
3602 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3603}
3604
3605/* XXX: optimize */
3606void stq_phys(target_phys_addr_t addr, uint64_t val)
3607{
3608 val = tswap64(val);
3609 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3610}
3611
bellard13eb76e2004-01-24 15:23:36 +00003612#endif
3613
aliguori5e2972f2009-03-28 17:51:36 +00003614/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00003615int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003616 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003617{
3618 int l;
j_mayer9b3c35e2007-04-07 11:21:28 +00003619 target_phys_addr_t phys_addr;
3620 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003621
3622 while (len > 0) {
3623 page = addr & TARGET_PAGE_MASK;
3624 phys_addr = cpu_get_phys_page_debug(env, page);
3625 /* if no physical page mapped, return an error */
3626 if (phys_addr == -1)
3627 return -1;
3628 l = (page + TARGET_PAGE_SIZE) - addr;
3629 if (l > len)
3630 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003631 phys_addr += (addr & ~TARGET_PAGE_MASK);
3632#if !defined(CONFIG_USER_ONLY)
3633 if (is_write)
3634 cpu_physical_memory_write_rom(phys_addr, buf, l);
3635 else
3636#endif
3637 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00003638 len -= l;
3639 buf += l;
3640 addr += l;
3641 }
3642 return 0;
3643}
3644
pbrook2e70f6e2008-06-29 01:03:05 +00003645/* in deterministic execution mode, instructions doing device I/Os
3646 must be at the end of the TB */
3647void cpu_io_recompile(CPUState *env, void *retaddr)
3648{
3649 TranslationBlock *tb;
3650 uint32_t n, cflags;
3651 target_ulong pc, cs_base;
3652 uint64_t flags;
3653
3654 tb = tb_find_pc((unsigned long)retaddr);
3655 if (!tb) {
3656 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3657 retaddr);
3658 }
3659 n = env->icount_decr.u16.low + tb->icount;
3660 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3661 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00003662 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00003663 n = n - env->icount_decr.u16.low;
3664 /* Generate a new TB ending on the I/O insn. */
3665 n++;
3666 /* On MIPS and SH, delay slot instructions can only be restarted if
3667 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00003668 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00003669 branch. */
3670#if defined(TARGET_MIPS)
3671 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3672 env->active_tc.PC -= 4;
3673 env->icount_decr.u16.low++;
3674 env->hflags &= ~MIPS_HFLAG_BMASK;
3675 }
3676#elif defined(TARGET_SH4)
3677 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3678 && n > 1) {
3679 env->pc -= 2;
3680 env->icount_decr.u16.low++;
3681 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3682 }
3683#endif
3684 /* This should never happen. */
3685 if (n > CF_COUNT_MASK)
3686 cpu_abort(env, "TB too big during recompile");
3687
3688 cflags = n | CF_LAST_IO;
3689 pc = tb->pc;
3690 cs_base = tb->cs_base;
3691 flags = tb->flags;
3692 tb_phys_invalidate(tb, -1);
3693 /* FIXME: In theory this could raise an exception. In practice
3694 we have already translated the block once so it's probably ok. */
3695 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00003696 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00003697 the first in the TB) then we end up generating a whole new TB and
3698 repeating the fault, which is horribly inefficient.
3699 Better would be to execute just this insn uncached, or generate a
3700 second new TB. */
3701 cpu_resume_from_signal(env, NULL);
3702}
3703
bellarde3db7222005-01-26 22:00:47 +00003704void dump_exec_info(FILE *f,
3705 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3706{
3707 int i, target_code_size, max_target_code_size;
3708 int direct_jmp_count, direct_jmp2_count, cross_page;
3709 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00003710
bellarde3db7222005-01-26 22:00:47 +00003711 target_code_size = 0;
3712 max_target_code_size = 0;
3713 cross_page = 0;
3714 direct_jmp_count = 0;
3715 direct_jmp2_count = 0;
3716 for(i = 0; i < nb_tbs; i++) {
3717 tb = &tbs[i];
3718 target_code_size += tb->size;
3719 if (tb->size > max_target_code_size)
3720 max_target_code_size = tb->size;
3721 if (tb->page_addr[1] != -1)
3722 cross_page++;
3723 if (tb->tb_next_offset[0] != 0xffff) {
3724 direct_jmp_count++;
3725 if (tb->tb_next_offset[1] != 0xffff) {
3726 direct_jmp2_count++;
3727 }
3728 }
3729 }
3730 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00003731 cpu_fprintf(f, "Translation buffer state:\n");
bellard26a5f132008-05-28 12:30:31 +00003732 cpu_fprintf(f, "gen code size %ld/%ld\n",
3733 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3734 cpu_fprintf(f, "TB count %d/%d\n",
3735 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00003736 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00003737 nb_tbs ? target_code_size / nb_tbs : 0,
3738 max_target_code_size);
ths5fafdf22007-09-16 21:08:06 +00003739 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00003740 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3741 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00003742 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3743 cross_page,
bellarde3db7222005-01-26 22:00:47 +00003744 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3745 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00003746 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00003747 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3748 direct_jmp2_count,
3749 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00003750 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00003751 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3752 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3753 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00003754 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00003755}
3756
ths5fafdf22007-09-16 21:08:06 +00003757#if !defined(CONFIG_USER_ONLY)
bellard61382a52003-10-27 21:22:23 +00003758
3759#define MMUSUFFIX _cmmu
3760#define GETPC() NULL
3761#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00003762#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00003763
3764#define SHIFT 0
3765#include "softmmu_template.h"
3766
3767#define SHIFT 1
3768#include "softmmu_template.h"
3769
3770#define SHIFT 2
3771#include "softmmu_template.h"
3772
3773#define SHIFT 3
3774#include "softmmu_template.h"
3775
3776#undef env
3777
3778#endif