blob: 3276a0874faeebe6521ad7dd157760411025e4b7 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
bellard6180a182003-09-30 21:04:53 +000034#include "cpu.h"
35#include "exec-all.h"
aurel32ca10f862008-04-11 21:35:42 +000036#include "qemu-common.h"
bellardb67d9a52008-05-23 09:57:34 +000037#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000038#include "hw/hw.h"
aliguori74576192008-10-06 14:02:03 +000039#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000040#include "kvm.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Riku Voipiofd052bf2010-01-25 14:30:49 +020043#include <signal.h>
pbrook53a59602006-03-25 19:31:22 +000044#endif
bellard54936002003-05-13 00:25:15 +000045
bellardfd6ce8f2003-05-14 19:00:11 +000046//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000047//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000048//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000049//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000050
51/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000052//#define DEBUG_TB_CHECK
53//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000054
ths1196be32007-03-17 15:17:58 +000055//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000056//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000057
pbrook99773bd2006-04-16 15:14:59 +000058#if !defined(CONFIG_USER_ONLY)
59/* TB consistency checks only implemented for usermode emulation. */
60#undef DEBUG_TB_CHECK
61#endif
62
bellard9fa3e852004-01-04 18:06:42 +000063#define SMC_BITMAP_USE_THRESHOLD 10
64
bellard108c49b2005-07-24 12:55:09 +000065#if defined(TARGET_SPARC64)
66#define TARGET_PHYS_ADDR_SPACE_BITS 41
blueswir15dcb6b92007-05-19 12:58:30 +000067#elif defined(TARGET_SPARC)
68#define TARGET_PHYS_ADDR_SPACE_BITS 36
j_mayerbedb69e2007-04-05 20:08:21 +000069#elif defined(TARGET_ALPHA)
70#define TARGET_PHYS_ADDR_SPACE_BITS 42
71#define TARGET_VIRT_ADDR_SPACE_BITS 42
bellard108c49b2005-07-24 12:55:09 +000072#elif defined(TARGET_PPC64)
73#define TARGET_PHYS_ADDR_SPACE_BITS 42
Anthony Liguori4a1418e2009-08-10 17:07:24 -050074#elif defined(TARGET_X86_64)
aurel3200f82b82008-04-27 21:12:55 +000075#define TARGET_PHYS_ADDR_SPACE_BITS 42
Anthony Liguori4a1418e2009-08-10 17:07:24 -050076#elif defined(TARGET_I386)
aurel3200f82b82008-04-27 21:12:55 +000077#define TARGET_PHYS_ADDR_SPACE_BITS 36
bellard108c49b2005-07-24 12:55:09 +000078#else
bellard108c49b2005-07-24 12:55:09 +000079#define TARGET_PHYS_ADDR_SPACE_BITS 32
80#endif
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
bellard26a5f132008-05-28 12:30:31 +000083int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
bellardfd6ce8f2003-05-14 19:00:11 +0000110uint8_t *code_gen_ptr;
111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
bellard1ccde1c2004-02-06 19:46:14 +0000114uint8_t *phys_ram_dirty;
aliguori74576192008-10-06 14:02:03 +0000115static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000116
117typedef struct RAMBlock {
118 uint8_t *host;
Anthony Liguoric227f092009-10-01 16:12:16 -0500119 ram_addr_t offset;
120 ram_addr_t length;
pbrook94a6b542009-04-11 17:15:54 +0000121 struct RAMBlock *next;
122} RAMBlock;
123
124static RAMBlock *ram_blocks;
125/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100126 then we can no longer assume contiguous ram offsets, and external uses
pbrook94a6b542009-04-11 17:15:54 +0000127 of this variable will break. */
Anthony Liguoric227f092009-10-01 16:12:16 -0500128ram_addr_t last_ram_offset;
pbrooke2eef172008-06-08 01:09:01 +0000129#endif
bellard9fa3e852004-01-04 18:06:42 +0000130
bellard6a00d602005-11-21 23:25:50 +0000131CPUState *first_cpu;
132/* current CPU in the current thread. It is only valid inside
133 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000134CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000135/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000136 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000137 2 = Adaptive rate instruction counting. */
138int use_icount = 0;
139/* Current instruction counter. While executing translated code this may
140 include some instructions that have not yet been executed. */
141int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000142
bellard54936002003-05-13 00:25:15 +0000143typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000144 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000145 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000146 /* in order to optimize self modifying code, we count the number
147 of lookups we do to a given page to use a bitmap */
148 unsigned int code_write_count;
149 uint8_t *code_bitmap;
150#if defined(CONFIG_USER_ONLY)
151 unsigned long flags;
152#endif
bellard54936002003-05-13 00:25:15 +0000153} PageDesc;
154
bellard92e873b2004-05-21 14:52:29 +0000155typedef struct PhysPageDesc {
pbrook0f459d12008-06-09 00:20:13 +0000156 /* offset in host memory of the page + io_index in the low bits */
Anthony Liguoric227f092009-10-01 16:12:16 -0500157 ram_addr_t phys_offset;
158 ram_addr_t region_offset;
bellard92e873b2004-05-21 14:52:29 +0000159} PhysPageDesc;
160
bellard54936002003-05-13 00:25:15 +0000161#define L2_BITS 10
j_mayerbedb69e2007-04-05 20:08:21 +0000162#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163/* XXX: this is a temporary hack for alpha target.
164 * In the future, this is to be replaced by a multi-level table
165 * to actually be able to handle the complete 64 bits address space.
166 */
167#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168#else
aurel3203875442008-04-22 20:45:18 +0000169#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
j_mayerbedb69e2007-04-05 20:08:21 +0000170#endif
bellard54936002003-05-13 00:25:15 +0000171
172#define L1_SIZE (1 << L1_BITS)
173#define L2_SIZE (1 << L2_BITS)
174
bellard83fb7ad2004-07-05 21:25:26 +0000175unsigned long qemu_real_host_page_size;
176unsigned long qemu_host_page_bits;
177unsigned long qemu_host_page_size;
178unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000179
bellard92e873b2004-05-21 14:52:29 +0000180/* XXX: for system emulation, it could just be an array */
bellard54936002003-05-13 00:25:15 +0000181static PageDesc *l1_map[L1_SIZE];
182
pbrooke2eef172008-06-08 01:09:01 +0000183#if !defined(CONFIG_USER_ONLY)
Paul Brook6d9a1302010-02-28 23:55:53 +0000184static PhysPageDesc **l1_phys_map;
185
pbrooke2eef172008-06-08 01:09:01 +0000186static void io_mem_init(void);
187
bellard33417e72003-08-10 21:47:01 +0000188/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000189CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
190CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000191void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000192static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000193static int io_mem_watch;
194#endif
bellard33417e72003-08-10 21:47:01 +0000195
bellard34865132003-10-05 14:28:56 +0000196/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200197#ifdef WIN32
198static const char *logfilename = "qemu.log";
199#else
blueswir1d9b630f2008-10-05 09:57:08 +0000200static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200201#endif
bellard34865132003-10-05 14:28:56 +0000202FILE *logfile;
203int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000204static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000205
bellarde3db7222005-01-26 22:00:47 +0000206/* statistics */
207static int tlb_flush_count;
208static int tb_flush_count;
209static int tb_phys_invalidate_count;
210
bellard7cb69ca2008-05-10 10:55:51 +0000211#ifdef _WIN32
212static void map_exec(void *addr, long size)
213{
214 DWORD old_protect;
215 VirtualProtect(addr, size,
216 PAGE_EXECUTE_READWRITE, &old_protect);
217
218}
219#else
220static void map_exec(void *addr, long size)
221{
bellard43694152008-05-29 09:35:57 +0000222 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000223
bellard43694152008-05-29 09:35:57 +0000224 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000225 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000226 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000227
228 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000229 end += page_size - 1;
230 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000231
232 mprotect((void *)start, end - start,
233 PROT_READ | PROT_WRITE | PROT_EXEC);
234}
235#endif
236
bellardb346ff42003-06-15 20:05:50 +0000237static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000238{
bellard83fb7ad2004-07-05 21:25:26 +0000239 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000240 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000241#ifdef _WIN32
242 {
243 SYSTEM_INFO system_info;
244
245 GetSystemInfo(&system_info);
246 qemu_real_host_page_size = system_info.dwPageSize;
247 }
248#else
249 qemu_real_host_page_size = getpagesize();
250#endif
bellard83fb7ad2004-07-05 21:25:26 +0000251 if (qemu_host_page_size == 0)
252 qemu_host_page_size = qemu_real_host_page_size;
253 if (qemu_host_page_size < TARGET_PAGE_SIZE)
254 qemu_host_page_size = TARGET_PAGE_SIZE;
255 qemu_host_page_bits = 0;
256 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
257 qemu_host_page_bits++;
258 qemu_host_page_mask = ~(qemu_host_page_size - 1);
Paul Brook6d9a1302010-02-28 23:55:53 +0000259#if !defined(CONFIG_USER_ONLY)
bellard108c49b2005-07-24 12:55:09 +0000260 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
261 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
Paul Brook6d9a1302010-02-28 23:55:53 +0000262#endif
balrog50a95692007-12-12 01:16:23 +0000263
264#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
265 {
266 long long startaddr, endaddr;
267 FILE *f;
268 int n;
269
pbrookc8a706f2008-06-02 16:16:42 +0000270 mmap_lock();
pbrook07765902008-05-31 16:33:53 +0000271 last_brk = (unsigned long)sbrk(0);
balrog50a95692007-12-12 01:16:23 +0000272 f = fopen("/proc/self/maps", "r");
273 if (f) {
274 do {
275 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
276 if (n == 2) {
blueswir1e0b8d652008-05-03 17:51:24 +0000277 startaddr = MIN(startaddr,
278 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
279 endaddr = MIN(endaddr,
280 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
pbrookb5fc9092008-05-29 13:56:10 +0000281 page_set_flags(startaddr & TARGET_PAGE_MASK,
balrog50a95692007-12-12 01:16:23 +0000282 TARGET_PAGE_ALIGN(endaddr),
283 PAGE_RESERVED);
284 }
285 } while (!feof(f));
286 fclose(f);
287 }
pbrookc8a706f2008-06-02 16:16:42 +0000288 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000289 }
290#endif
bellard54936002003-05-13 00:25:15 +0000291}
292
aliguori434929b2008-09-15 15:56:30 +0000293static inline PageDesc **page_l1_map(target_ulong index)
bellard54936002003-05-13 00:25:15 +0000294{
pbrook17e23772008-06-09 13:47:45 +0000295#if TARGET_LONG_BITS > 32
296 /* Host memory outside guest VM. For 32-bit targets we have already
297 excluded high addresses. */
thsd8173e02008-08-29 13:10:00 +0000298 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
pbrook17e23772008-06-09 13:47:45 +0000299 return NULL;
300#endif
aliguori434929b2008-09-15 15:56:30 +0000301 return &l1_map[index >> L2_BITS];
302}
303
304static inline PageDesc *page_find_alloc(target_ulong index)
305{
306 PageDesc **lp, *p;
307 lp = page_l1_map(index);
308 if (!lp)
309 return NULL;
310
bellard54936002003-05-13 00:25:15 +0000311 p = *lp;
312 if (!p) {
313 /* allocate if not found */
pbrook17e23772008-06-09 13:47:45 +0000314#if defined(CONFIG_USER_ONLY)
pbrook17e23772008-06-09 13:47:45 +0000315 size_t len = sizeof(PageDesc) * L2_SIZE;
316 /* Don't use qemu_malloc because it may recurse. */
Blue Swirl660f11b2009-07-31 21:16:51 +0000317 p = mmap(NULL, len, PROT_READ | PROT_WRITE,
pbrook17e23772008-06-09 13:47:45 +0000318 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
bellard54936002003-05-13 00:25:15 +0000319 *lp = p;
aurel32fb1c2cd2008-12-08 18:12:26 +0000320 if (h2g_valid(p)) {
321 unsigned long addr = h2g(p);
pbrook17e23772008-06-09 13:47:45 +0000322 page_set_flags(addr & TARGET_PAGE_MASK,
323 TARGET_PAGE_ALIGN(addr + len),
324 PAGE_RESERVED);
325 }
326#else
327 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
328 *lp = p;
329#endif
bellard54936002003-05-13 00:25:15 +0000330 }
331 return p + (index & (L2_SIZE - 1));
332}
333
aurel3200f82b82008-04-27 21:12:55 +0000334static inline PageDesc *page_find(target_ulong index)
bellard54936002003-05-13 00:25:15 +0000335{
aliguori434929b2008-09-15 15:56:30 +0000336 PageDesc **lp, *p;
337 lp = page_l1_map(index);
338 if (!lp)
339 return NULL;
bellard54936002003-05-13 00:25:15 +0000340
aliguori434929b2008-09-15 15:56:30 +0000341 p = *lp;
Blue Swirl660f11b2009-07-31 21:16:51 +0000342 if (!p) {
343 return NULL;
344 }
bellardfd6ce8f2003-05-14 19:00:11 +0000345 return p + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000346}
347
Paul Brook6d9a1302010-02-28 23:55:53 +0000348#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500349static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000350{
bellard108c49b2005-07-24 12:55:09 +0000351 void **lp, **p;
pbrooke3f4e2a2006-04-08 20:02:06 +0000352 PhysPageDesc *pd;
bellard92e873b2004-05-21 14:52:29 +0000353
bellard108c49b2005-07-24 12:55:09 +0000354 p = (void **)l1_phys_map;
355#if TARGET_PHYS_ADDR_SPACE_BITS > 32
356
357#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
359#endif
360 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000361 p = *lp;
362 if (!p) {
363 /* allocate if not found */
bellard108c49b2005-07-24 12:55:09 +0000364 if (!alloc)
365 return NULL;
366 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
367 memset(p, 0, sizeof(void *) * L1_SIZE);
368 *lp = p;
369 }
370#endif
371 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
pbrooke3f4e2a2006-04-08 20:02:06 +0000372 pd = *lp;
373 if (!pd) {
374 int i;
bellard108c49b2005-07-24 12:55:09 +0000375 /* allocate if not found */
376 if (!alloc)
377 return NULL;
pbrooke3f4e2a2006-04-08 20:02:06 +0000378 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
379 *lp = pd;
pbrook67c4d232009-02-23 13:16:07 +0000380 for (i = 0; i < L2_SIZE; i++) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000381 pd[i].phys_offset = IO_MEM_UNASSIGNED;
pbrook67c4d232009-02-23 13:16:07 +0000382 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
383 }
bellard92e873b2004-05-21 14:52:29 +0000384 }
pbrooke3f4e2a2006-04-08 20:02:06 +0000385 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000386}
387
Anthony Liguoric227f092009-10-01 16:12:16 -0500388static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000389{
bellard108c49b2005-07-24 12:55:09 +0000390 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000391}
392
Anthony Liguoric227f092009-10-01 16:12:16 -0500393static void tlb_protect_code(ram_addr_t ram_addr);
394static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000395 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000396#define mmap_lock() do { } while(0)
397#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000398#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000399
bellard43694152008-05-29 09:35:57 +0000400#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
401
402#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100403/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000404 user mode. It will change when a dedicated libc will be used */
405#define USE_STATIC_CODE_GEN_BUFFER
406#endif
407
408#ifdef USE_STATIC_CODE_GEN_BUFFER
409static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
410#endif
411
blueswir18fcd3692008-08-17 20:26:25 +0000412static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000413{
bellard43694152008-05-29 09:35:57 +0000414#ifdef USE_STATIC_CODE_GEN_BUFFER
415 code_gen_buffer = static_code_gen_buffer;
416 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
417 map_exec(code_gen_buffer, code_gen_buffer_size);
418#else
bellard26a5f132008-05-28 12:30:31 +0000419 code_gen_buffer_size = tb_size;
420 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000421#if defined(CONFIG_USER_ONLY)
422 /* in user mode, phys_ram_size is not meaningful */
423 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
424#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100425 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000426 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000427#endif
bellard26a5f132008-05-28 12:30:31 +0000428 }
429 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
430 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
431 /* The code gen buffer location may have constraints depending on
432 the host cpu and OS */
433#if defined(__linux__)
434 {
435 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000436 void *start = NULL;
437
bellard26a5f132008-05-28 12:30:31 +0000438 flags = MAP_PRIVATE | MAP_ANONYMOUS;
439#if defined(__x86_64__)
440 flags |= MAP_32BIT;
441 /* Cannot map more than that */
442 if (code_gen_buffer_size > (800 * 1024 * 1024))
443 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000444#elif defined(__sparc_v9__)
445 // Map the buffer below 2G, so we can use direct calls and branches
446 flags |= MAP_FIXED;
447 start = (void *) 0x60000000UL;
448 if (code_gen_buffer_size > (512 * 1024 * 1024))
449 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000450#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000451 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000452 flags |= MAP_FIXED;
453 start = (void *) 0x01000000UL;
454 if (code_gen_buffer_size > 16 * 1024 * 1024)
455 code_gen_buffer_size = 16 * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000456#endif
blueswir1141ac462008-07-26 15:05:57 +0000457 code_gen_buffer = mmap(start, code_gen_buffer_size,
458 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000459 flags, -1, 0);
460 if (code_gen_buffer == MAP_FAILED) {
461 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
462 exit(1);
463 }
464 }
Aurelien Jarnoa167ba52009-11-29 18:00:41 +0100465#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
aliguori06e67a82008-09-27 15:32:41 +0000466 {
467 int flags;
468 void *addr = NULL;
469 flags = MAP_PRIVATE | MAP_ANONYMOUS;
470#if defined(__x86_64__)
471 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
472 * 0x40000000 is free */
473 flags |= MAP_FIXED;
474 addr = (void *)0x40000000;
475 /* Cannot map more than that */
476 if (code_gen_buffer_size > (800 * 1024 * 1024))
477 code_gen_buffer_size = (800 * 1024 * 1024);
478#endif
479 code_gen_buffer = mmap(addr, code_gen_buffer_size,
480 PROT_WRITE | PROT_READ | PROT_EXEC,
481 flags, -1, 0);
482 if (code_gen_buffer == MAP_FAILED) {
483 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
484 exit(1);
485 }
486 }
bellard26a5f132008-05-28 12:30:31 +0000487#else
488 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000489 map_exec(code_gen_buffer, code_gen_buffer_size);
490#endif
bellard43694152008-05-29 09:35:57 +0000491#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000492 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
493 code_gen_buffer_max_size = code_gen_buffer_size -
494 code_gen_max_block_size();
495 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
496 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
497}
498
499/* Must be called before using the QEMU cpus. 'tb_size' is the size
500 (in bytes) allocated to the translation buffer. Zero means default
501 size. */
502void cpu_exec_init_all(unsigned long tb_size)
503{
bellard26a5f132008-05-28 12:30:31 +0000504 cpu_gen_init();
505 code_gen_alloc(tb_size);
506 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000507 page_init();
pbrooke2eef172008-06-08 01:09:01 +0000508#if !defined(CONFIG_USER_ONLY)
bellard26a5f132008-05-28 12:30:31 +0000509 io_mem_init();
pbrooke2eef172008-06-08 01:09:01 +0000510#endif
bellard26a5f132008-05-28 12:30:31 +0000511}
512
pbrook9656f322008-07-01 20:01:19 +0000513#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
514
Juan Quintelad4bfa4d2009-09-29 22:48:22 +0200515static void cpu_common_pre_save(void *opaque)
pbrook9656f322008-07-01 20:01:19 +0000516{
Juan Quintelad4bfa4d2009-09-29 22:48:22 +0200517 CPUState *env = opaque;
pbrook9656f322008-07-01 20:01:19 +0000518
Avi Kivity4c0960c2009-08-17 23:19:53 +0300519 cpu_synchronize_state(env);
pbrook9656f322008-07-01 20:01:19 +0000520}
521
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200522static int cpu_common_pre_load(void *opaque)
pbrook9656f322008-07-01 20:01:19 +0000523{
524 CPUState *env = opaque;
525
Avi Kivity4c0960c2009-08-17 23:19:53 +0300526 cpu_synchronize_state(env);
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200527 return 0;
528}
pbrook9656f322008-07-01 20:01:19 +0000529
Juan Quintelae59fb372009-09-29 22:48:21 +0200530static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200531{
532 CPUState *env = opaque;
533
aurel323098dba2009-03-07 21:28:24 +0000534 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
535 version_id is increased. */
536 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000537 tlb_flush(env, 1);
538
539 return 0;
540}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200541
542static const VMStateDescription vmstate_cpu_common = {
543 .name = "cpu_common",
544 .version_id = 1,
545 .minimum_version_id = 1,
546 .minimum_version_id_old = 1,
547 .pre_save = cpu_common_pre_save,
548 .pre_load = cpu_common_pre_load,
549 .post_load = cpu_common_post_load,
550 .fields = (VMStateField []) {
551 VMSTATE_UINT32(halted, CPUState),
552 VMSTATE_UINT32(interrupt_request, CPUState),
553 VMSTATE_END_OF_LIST()
554 }
555};
pbrook9656f322008-07-01 20:01:19 +0000556#endif
557
Glauber Costa950f1472009-06-09 12:15:18 -0400558CPUState *qemu_get_cpu(int cpu)
559{
560 CPUState *env = first_cpu;
561
562 while (env) {
563 if (env->cpu_index == cpu)
564 break;
565 env = env->next_cpu;
566 }
567
568 return env;
569}
570
bellard6a00d602005-11-21 23:25:50 +0000571void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000572{
bellard6a00d602005-11-21 23:25:50 +0000573 CPUState **penv;
574 int cpu_index;
575
pbrookc2764712009-03-07 15:24:59 +0000576#if defined(CONFIG_USER_ONLY)
577 cpu_list_lock();
578#endif
bellard6a00d602005-11-21 23:25:50 +0000579 env->next_cpu = NULL;
580 penv = &first_cpu;
581 cpu_index = 0;
582 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700583 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000584 cpu_index++;
585 }
586 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000587 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000588 QTAILQ_INIT(&env->breakpoints);
589 QTAILQ_INIT(&env->watchpoints);
bellard6a00d602005-11-21 23:25:50 +0000590 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000591#if defined(CONFIG_USER_ONLY)
592 cpu_list_unlock();
593#endif
pbrookb3c77242008-06-30 16:31:04 +0000594#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200595 vmstate_register(cpu_index, &vmstate_cpu_common, env);
pbrookb3c77242008-06-30 16:31:04 +0000596 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
597 cpu_save, cpu_load, env);
598#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000599}
600
bellard9fa3e852004-01-04 18:06:42 +0000601static inline void invalidate_page_bitmap(PageDesc *p)
602{
603 if (p->code_bitmap) {
bellard59817cc2004-02-16 22:01:13 +0000604 qemu_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000605 p->code_bitmap = NULL;
606 }
607 p->code_write_count = 0;
608}
609
bellardfd6ce8f2003-05-14 19:00:11 +0000610/* set to NULL all the 'first_tb' fields in all PageDescs */
611static void page_flush_tb(void)
612{
613 int i, j;
614 PageDesc *p;
615
616 for(i = 0; i < L1_SIZE; i++) {
617 p = l1_map[i];
618 if (p) {
bellard9fa3e852004-01-04 18:06:42 +0000619 for(j = 0; j < L2_SIZE; j++) {
620 p->first_tb = NULL;
621 invalidate_page_bitmap(p);
622 p++;
623 }
bellardfd6ce8f2003-05-14 19:00:11 +0000624 }
625 }
626}
627
628/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000629/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000630void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000631{
bellard6a00d602005-11-21 23:25:50 +0000632 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000633#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000634 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
635 (unsigned long)(code_gen_ptr - code_gen_buffer),
636 nb_tbs, nb_tbs > 0 ?
637 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000638#endif
bellard26a5f132008-05-28 12:30:31 +0000639 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000640 cpu_abort(env1, "Internal error: code buffer overflow\n");
641
bellardfd6ce8f2003-05-14 19:00:11 +0000642 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000643
bellard6a00d602005-11-21 23:25:50 +0000644 for(env = first_cpu; env != NULL; env = env->next_cpu) {
645 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
646 }
bellard9fa3e852004-01-04 18:06:42 +0000647
bellard8a8a6082004-10-03 13:36:49 +0000648 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000649 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000650
bellardfd6ce8f2003-05-14 19:00:11 +0000651 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000652 /* XXX: flush processor icache at this point if cache flush is
653 expensive */
bellarde3db7222005-01-26 22:00:47 +0000654 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000655}
656
657#ifdef DEBUG_TB_CHECK
658
j_mayerbc98a7e2007-04-04 07:55:12 +0000659static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000660{
661 TranslationBlock *tb;
662 int i;
663 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000664 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
665 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000666 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
667 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000668 printf("ERROR invalidate: address=" TARGET_FMT_lx
669 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000670 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000671 }
672 }
673 }
674}
675
676/* verify that all the pages have correct rights for code */
677static void tb_page_check(void)
678{
679 TranslationBlock *tb;
680 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000681
pbrook99773bd2006-04-16 15:14:59 +0000682 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
683 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000684 flags1 = page_get_flags(tb->pc);
685 flags2 = page_get_flags(tb->pc + tb->size - 1);
686 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
687 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000688 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000689 }
690 }
691 }
692}
693
694#endif
695
696/* invalidate one TB */
697static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
698 int next_offset)
699{
700 TranslationBlock *tb1;
701 for(;;) {
702 tb1 = *ptb;
703 if (tb1 == tb) {
704 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
705 break;
706 }
707 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
708 }
709}
710
bellard9fa3e852004-01-04 18:06:42 +0000711static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
712{
713 TranslationBlock *tb1;
714 unsigned int n1;
715
716 for(;;) {
717 tb1 = *ptb;
718 n1 = (long)tb1 & 3;
719 tb1 = (TranslationBlock *)((long)tb1 & ~3);
720 if (tb1 == tb) {
721 *ptb = tb1->page_next[n1];
722 break;
723 }
724 ptb = &tb1->page_next[n1];
725 }
726}
727
bellardd4e81642003-05-25 16:46:15 +0000728static inline void tb_jmp_remove(TranslationBlock *tb, int n)
729{
730 TranslationBlock *tb1, **ptb;
731 unsigned int n1;
732
733 ptb = &tb->jmp_next[n];
734 tb1 = *ptb;
735 if (tb1) {
736 /* find tb(n) in circular list */
737 for(;;) {
738 tb1 = *ptb;
739 n1 = (long)tb1 & 3;
740 tb1 = (TranslationBlock *)((long)tb1 & ~3);
741 if (n1 == n && tb1 == tb)
742 break;
743 if (n1 == 2) {
744 ptb = &tb1->jmp_first;
745 } else {
746 ptb = &tb1->jmp_next[n1];
747 }
748 }
749 /* now we can suppress tb(n) from the list */
750 *ptb = tb->jmp_next[n];
751
752 tb->jmp_next[n] = NULL;
753 }
754}
755
756/* reset the jump entry 'n' of a TB so that it is not chained to
757 another TB */
758static inline void tb_reset_jump(TranslationBlock *tb, int n)
759{
760 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
761}
762
pbrook2e70f6e2008-06-29 01:03:05 +0000763void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000764{
bellard6a00d602005-11-21 23:25:50 +0000765 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000766 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000767 unsigned int h, n1;
Anthony Liguoric227f092009-10-01 16:12:16 -0500768 target_phys_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000769 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000770
bellard9fa3e852004-01-04 18:06:42 +0000771 /* remove the TB from the hash list */
772 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
773 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000774 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000775 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000776
bellard9fa3e852004-01-04 18:06:42 +0000777 /* remove the TB from the page list */
778 if (tb->page_addr[0] != page_addr) {
779 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
780 tb_page_remove(&p->first_tb, tb);
781 invalidate_page_bitmap(p);
782 }
783 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
784 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
785 tb_page_remove(&p->first_tb, tb);
786 invalidate_page_bitmap(p);
787 }
788
bellard8a40a182005-11-20 10:35:40 +0000789 tb_invalidated_flag = 1;
790
791 /* remove the TB from the hash list */
792 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000793 for(env = first_cpu; env != NULL; env = env->next_cpu) {
794 if (env->tb_jmp_cache[h] == tb)
795 env->tb_jmp_cache[h] = NULL;
796 }
bellard8a40a182005-11-20 10:35:40 +0000797
798 /* suppress this TB from the two jump lists */
799 tb_jmp_remove(tb, 0);
800 tb_jmp_remove(tb, 1);
801
802 /* suppress any remaining jumps to this TB */
803 tb1 = tb->jmp_first;
804 for(;;) {
805 n1 = (long)tb1 & 3;
806 if (n1 == 2)
807 break;
808 tb1 = (TranslationBlock *)((long)tb1 & ~3);
809 tb2 = tb1->jmp_next[n1];
810 tb_reset_jump(tb1, n1);
811 tb1->jmp_next[n1] = NULL;
812 tb1 = tb2;
813 }
814 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
815
bellarde3db7222005-01-26 22:00:47 +0000816 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000817}
818
819static inline void set_bits(uint8_t *tab, int start, int len)
820{
821 int end, mask, end1;
822
823 end = start + len;
824 tab += start >> 3;
825 mask = 0xff << (start & 7);
826 if ((start & ~7) == (end & ~7)) {
827 if (start < end) {
828 mask &= ~(0xff << (end & 7));
829 *tab |= mask;
830 }
831 } else {
832 *tab++ |= mask;
833 start = (start + 8) & ~7;
834 end1 = end & ~7;
835 while (start < end1) {
836 *tab++ = 0xff;
837 start += 8;
838 }
839 if (start < end) {
840 mask = ~(0xff << (end & 7));
841 *tab |= mask;
842 }
843 }
844}
845
846static void build_page_bitmap(PageDesc *p)
847{
848 int n, tb_start, tb_end;
849 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000850
pbrookb2a70812008-06-09 13:57:23 +0000851 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000852
853 tb = p->first_tb;
854 while (tb != NULL) {
855 n = (long)tb & 3;
856 tb = (TranslationBlock *)((long)tb & ~3);
857 /* NOTE: this is subtle as a TB may span two physical pages */
858 if (n == 0) {
859 /* NOTE: tb_end may be after the end of the page, but
860 it is not a problem */
861 tb_start = tb->pc & ~TARGET_PAGE_MASK;
862 tb_end = tb_start + tb->size;
863 if (tb_end > TARGET_PAGE_SIZE)
864 tb_end = TARGET_PAGE_SIZE;
865 } else {
866 tb_start = 0;
867 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
868 }
869 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
870 tb = tb->page_next[n];
871 }
872}
873
pbrook2e70f6e2008-06-29 01:03:05 +0000874TranslationBlock *tb_gen_code(CPUState *env,
875 target_ulong pc, target_ulong cs_base,
876 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000877{
878 TranslationBlock *tb;
879 uint8_t *tc_ptr;
880 target_ulong phys_pc, phys_page2, virt_page2;
881 int code_gen_size;
882
bellardc27004e2005-01-03 23:35:10 +0000883 phys_pc = get_phys_addr_code(env, pc);
884 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000885 if (!tb) {
886 /* flush must be done */
887 tb_flush(env);
888 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000889 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000890 /* Don't forget to invalidate previous TB info. */
891 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000892 }
893 tc_ptr = code_gen_ptr;
894 tb->tc_ptr = tc_ptr;
895 tb->cs_base = cs_base;
896 tb->flags = flags;
897 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +0000898 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +0000899 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +0000900
bellardd720b932004-04-25 17:57:43 +0000901 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +0000902 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +0000903 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +0000904 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
bellardd720b932004-04-25 17:57:43 +0000905 phys_page2 = get_phys_addr_code(env, virt_page2);
906 }
907 tb_link_phys(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +0000908 return tb;
bellardd720b932004-04-25 17:57:43 +0000909}
ths3b46e622007-09-17 08:09:54 +0000910
bellard9fa3e852004-01-04 18:06:42 +0000911/* invalidate all TBs which intersect with the target physical page
912 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +0000913 the same physical page. 'is_cpu_write_access' should be true if called
914 from a real cpu write access: the virtual CPU will exit the current
915 TB if code is modified inside this TB. */
Anthony Liguoric227f092009-10-01 16:12:16 -0500916void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
bellardd720b932004-04-25 17:57:43 +0000917 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +0000918{
aliguori6b917542008-11-18 19:46:41 +0000919 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +0000920 CPUState *env = cpu_single_env;
bellard9fa3e852004-01-04 18:06:42 +0000921 target_ulong tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +0000922 PageDesc *p;
923 int n;
924#ifdef TARGET_HAS_PRECISE_SMC
925 int current_tb_not_found = is_cpu_write_access;
926 TranslationBlock *current_tb = NULL;
927 int current_tb_modified = 0;
928 target_ulong current_pc = 0;
929 target_ulong current_cs_base = 0;
930 int current_flags = 0;
931#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +0000932
933 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +0000934 if (!p)
bellard9fa3e852004-01-04 18:06:42 +0000935 return;
ths5fafdf22007-09-16 21:08:06 +0000936 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +0000937 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
938 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +0000939 /* build code bitmap */
940 build_page_bitmap(p);
941 }
942
943 /* we remove all the TBs in the range [start, end[ */
944 /* XXX: see if in some cases it could be faster to invalidate all the code */
945 tb = p->first_tb;
946 while (tb != NULL) {
947 n = (long)tb & 3;
948 tb = (TranslationBlock *)((long)tb & ~3);
949 tb_next = tb->page_next[n];
950 /* NOTE: this is subtle as a TB may span two physical pages */
951 if (n == 0) {
952 /* NOTE: tb_end may be after the end of the page, but
953 it is not a problem */
954 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
955 tb_end = tb_start + tb->size;
956 } else {
957 tb_start = tb->page_addr[1];
958 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
959 }
960 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +0000961#ifdef TARGET_HAS_PRECISE_SMC
962 if (current_tb_not_found) {
963 current_tb_not_found = 0;
964 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +0000965 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +0000966 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +0000967 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +0000968 }
969 }
970 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +0000971 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +0000972 /* If we are modifying the current TB, we must stop
973 its execution. We could be more precise by checking
974 that the modification is after the current PC, but it
975 would require a specialized function to partially
976 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +0000977
bellardd720b932004-04-25 17:57:43 +0000978 current_tb_modified = 1;
ths5fafdf22007-09-16 21:08:06 +0000979 cpu_restore_state(current_tb, env,
pbrook2e70f6e2008-06-29 01:03:05 +0000980 env->mem_io_pc, NULL);
aliguori6b917542008-11-18 19:46:41 +0000981 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
982 &current_flags);
bellardd720b932004-04-25 17:57:43 +0000983 }
984#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +0000985 /* we need to do that to handle the case where a signal
986 occurs while doing tb_phys_invalidate() */
987 saved_tb = NULL;
988 if (env) {
989 saved_tb = env->current_tb;
990 env->current_tb = NULL;
991 }
bellard9fa3e852004-01-04 18:06:42 +0000992 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +0000993 if (env) {
994 env->current_tb = saved_tb;
995 if (env->interrupt_request && env->current_tb)
996 cpu_interrupt(env, env->interrupt_request);
997 }
bellard9fa3e852004-01-04 18:06:42 +0000998 }
999 tb = tb_next;
1000 }
1001#if !defined(CONFIG_USER_ONLY)
1002 /* if no code remaining, no need to continue to use slow writes */
1003 if (!p->first_tb) {
1004 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001005 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001006 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001007 }
1008 }
1009#endif
1010#ifdef TARGET_HAS_PRECISE_SMC
1011 if (current_tb_modified) {
1012 /* we generate a block containing just the instruction
1013 modifying the memory. It will ensure that it cannot modify
1014 itself */
bellardea1c1802004-06-14 18:56:36 +00001015 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001016 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001017 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001018 }
1019#endif
1020}
1021
1022/* len must be <= 8 and start must be a multiple of len */
Anthony Liguoric227f092009-10-01 16:12:16 -05001023static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001024{
1025 PageDesc *p;
1026 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001027#if 0
bellarda4193c82004-06-03 14:01:43 +00001028 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001029 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1030 cpu_single_env->mem_io_vaddr, len,
1031 cpu_single_env->eip,
1032 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001033 }
1034#endif
bellard9fa3e852004-01-04 18:06:42 +00001035 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001036 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001037 return;
1038 if (p->code_bitmap) {
1039 offset = start & ~TARGET_PAGE_MASK;
1040 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1041 if (b & ((1 << len) - 1))
1042 goto do_invalidate;
1043 } else {
1044 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001045 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001046 }
1047}
1048
bellard9fa3e852004-01-04 18:06:42 +00001049#if !defined(CONFIG_SOFTMMU)
Anthony Liguoric227f092009-10-01 16:12:16 -05001050static void tb_invalidate_phys_page(target_phys_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001051 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001052{
aliguori6b917542008-11-18 19:46:41 +00001053 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001054 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001055 int n;
bellardd720b932004-04-25 17:57:43 +00001056#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001057 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001058 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001059 int current_tb_modified = 0;
1060 target_ulong current_pc = 0;
1061 target_ulong current_cs_base = 0;
1062 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001063#endif
bellard9fa3e852004-01-04 18:06:42 +00001064
1065 addr &= TARGET_PAGE_MASK;
1066 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001067 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001068 return;
1069 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001070#ifdef TARGET_HAS_PRECISE_SMC
1071 if (tb && pc != 0) {
1072 current_tb = tb_find_pc(pc);
1073 }
1074#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001075 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001076 n = (long)tb & 3;
1077 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001078#ifdef TARGET_HAS_PRECISE_SMC
1079 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001080 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001081 /* If we are modifying the current TB, we must stop
1082 its execution. We could be more precise by checking
1083 that the modification is after the current PC, but it
1084 would require a specialized function to partially
1085 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001086
bellardd720b932004-04-25 17:57:43 +00001087 current_tb_modified = 1;
1088 cpu_restore_state(current_tb, env, pc, puc);
aliguori6b917542008-11-18 19:46:41 +00001089 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1090 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001091 }
1092#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001093 tb_phys_invalidate(tb, addr);
1094 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001095 }
1096 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001097#ifdef TARGET_HAS_PRECISE_SMC
1098 if (current_tb_modified) {
1099 /* we generate a block containing just the instruction
1100 modifying the memory. It will ensure that it cannot modify
1101 itself */
bellardea1c1802004-06-14 18:56:36 +00001102 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001103 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001104 cpu_resume_from_signal(env, puc);
1105 }
1106#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001107}
bellard9fa3e852004-01-04 18:06:42 +00001108#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001109
1110/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001111static inline void tb_alloc_page(TranslationBlock *tb,
pbrook53a59602006-03-25 19:31:22 +00001112 unsigned int n, target_ulong page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001113{
1114 PageDesc *p;
bellard9fa3e852004-01-04 18:06:42 +00001115 TranslationBlock *last_first_tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001116
bellard9fa3e852004-01-04 18:06:42 +00001117 tb->page_addr[n] = page_addr;
bellard3a7d9292005-08-21 09:26:42 +00001118 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00001119 tb->page_next[n] = p->first_tb;
1120 last_first_tb = p->first_tb;
1121 p->first_tb = (TranslationBlock *)((long)tb | n);
1122 invalidate_page_bitmap(p);
1123
bellard107db442004-06-22 18:48:46 +00001124#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001125
bellard9fa3e852004-01-04 18:06:42 +00001126#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001127 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001128 target_ulong addr;
1129 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001130 int prot;
1131
bellardfd6ce8f2003-05-14 19:00:11 +00001132 /* force the host page as non writable (writes will have a
1133 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001134 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001135 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001136 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1137 addr += TARGET_PAGE_SIZE) {
1138
1139 p2 = page_find (addr >> TARGET_PAGE_BITS);
1140 if (!p2)
1141 continue;
1142 prot |= p2->flags;
1143 p2->flags &= ~PAGE_WRITE;
1144 page_get_flags(addr);
1145 }
ths5fafdf22007-09-16 21:08:06 +00001146 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001147 (prot & PAGE_BITS) & ~PAGE_WRITE);
1148#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001149 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001150 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001151#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001152 }
bellard9fa3e852004-01-04 18:06:42 +00001153#else
1154 /* if some code is already present, then the pages are already
1155 protected. So we handle the case where only the first TB is
1156 allocated in a physical page */
1157 if (!last_first_tb) {
bellard6a00d602005-11-21 23:25:50 +00001158 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001159 }
1160#endif
bellardd720b932004-04-25 17:57:43 +00001161
1162#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001163}
1164
1165/* Allocate a new translation block. Flush the translation buffer if
1166 too many translation blocks or too much generated code. */
bellardc27004e2005-01-03 23:35:10 +00001167TranslationBlock *tb_alloc(target_ulong pc)
bellardfd6ce8f2003-05-14 19:00:11 +00001168{
1169 TranslationBlock *tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001170
bellard26a5f132008-05-28 12:30:31 +00001171 if (nb_tbs >= code_gen_max_blocks ||
1172 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
bellardd4e81642003-05-25 16:46:15 +00001173 return NULL;
bellardfd6ce8f2003-05-14 19:00:11 +00001174 tb = &tbs[nb_tbs++];
1175 tb->pc = pc;
bellardb448f2f2004-02-25 23:24:04 +00001176 tb->cflags = 0;
bellardd4e81642003-05-25 16:46:15 +00001177 return tb;
1178}
1179
pbrook2e70f6e2008-06-29 01:03:05 +00001180void tb_free(TranslationBlock *tb)
1181{
thsbf20dc02008-06-30 17:22:19 +00001182 /* In practice this is mostly used for single use temporary TB
pbrook2e70f6e2008-06-29 01:03:05 +00001183 Ignore the hard cases and just back up if this TB happens to
1184 be the last one generated. */
1185 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1186 code_gen_ptr = tb->tc_ptr;
1187 nb_tbs--;
1188 }
1189}
1190
bellard9fa3e852004-01-04 18:06:42 +00001191/* add a new TB and link it to the physical page tables. phys_page2 is
1192 (-1) to indicate that only one page contains the TB. */
ths5fafdf22007-09-16 21:08:06 +00001193void tb_link_phys(TranslationBlock *tb,
bellard9fa3e852004-01-04 18:06:42 +00001194 target_ulong phys_pc, target_ulong phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001195{
bellard9fa3e852004-01-04 18:06:42 +00001196 unsigned int h;
1197 TranslationBlock **ptb;
1198
pbrookc8a706f2008-06-02 16:16:42 +00001199 /* Grab the mmap lock to stop another thread invalidating this TB
1200 before we are done. */
1201 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001202 /* add in the physical hash table */
1203 h = tb_phys_hash_func(phys_pc);
1204 ptb = &tb_phys_hash[h];
1205 tb->phys_hash_next = *ptb;
1206 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001207
1208 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001209 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1210 if (phys_page2 != -1)
1211 tb_alloc_page(tb, 1, phys_page2);
1212 else
1213 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001214
bellardd4e81642003-05-25 16:46:15 +00001215 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1216 tb->jmp_next[0] = NULL;
1217 tb->jmp_next[1] = NULL;
1218
1219 /* init original jump addresses */
1220 if (tb->tb_next_offset[0] != 0xffff)
1221 tb_reset_jump(tb, 0);
1222 if (tb->tb_next_offset[1] != 0xffff)
1223 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001224
1225#ifdef DEBUG_TB_CHECK
1226 tb_page_check();
1227#endif
pbrookc8a706f2008-06-02 16:16:42 +00001228 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001229}
1230
bellarda513fe12003-05-27 23:29:48 +00001231/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1232 tb[1].tc_ptr. Return NULL if not found */
1233TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1234{
1235 int m_min, m_max, m;
1236 unsigned long v;
1237 TranslationBlock *tb;
1238
1239 if (nb_tbs <= 0)
1240 return NULL;
1241 if (tc_ptr < (unsigned long)code_gen_buffer ||
1242 tc_ptr >= (unsigned long)code_gen_ptr)
1243 return NULL;
1244 /* binary search (cf Knuth) */
1245 m_min = 0;
1246 m_max = nb_tbs - 1;
1247 while (m_min <= m_max) {
1248 m = (m_min + m_max) >> 1;
1249 tb = &tbs[m];
1250 v = (unsigned long)tb->tc_ptr;
1251 if (v == tc_ptr)
1252 return tb;
1253 else if (tc_ptr < v) {
1254 m_max = m - 1;
1255 } else {
1256 m_min = m + 1;
1257 }
ths5fafdf22007-09-16 21:08:06 +00001258 }
bellarda513fe12003-05-27 23:29:48 +00001259 return &tbs[m_max];
1260}
bellard75012672003-06-21 13:11:07 +00001261
bellardea041c02003-06-25 16:16:50 +00001262static void tb_reset_jump_recursive(TranslationBlock *tb);
1263
1264static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1265{
1266 TranslationBlock *tb1, *tb_next, **ptb;
1267 unsigned int n1;
1268
1269 tb1 = tb->jmp_next[n];
1270 if (tb1 != NULL) {
1271 /* find head of list */
1272 for(;;) {
1273 n1 = (long)tb1 & 3;
1274 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1275 if (n1 == 2)
1276 break;
1277 tb1 = tb1->jmp_next[n1];
1278 }
1279 /* we are now sure now that tb jumps to tb1 */
1280 tb_next = tb1;
1281
1282 /* remove tb from the jmp_first list */
1283 ptb = &tb_next->jmp_first;
1284 for(;;) {
1285 tb1 = *ptb;
1286 n1 = (long)tb1 & 3;
1287 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1288 if (n1 == n && tb1 == tb)
1289 break;
1290 ptb = &tb1->jmp_next[n1];
1291 }
1292 *ptb = tb->jmp_next[n];
1293 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001294
bellardea041c02003-06-25 16:16:50 +00001295 /* suppress the jump to next tb in generated code */
1296 tb_reset_jump(tb, n);
1297
bellard01243112004-01-04 15:48:17 +00001298 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001299 tb_reset_jump_recursive(tb_next);
1300 }
1301}
1302
1303static void tb_reset_jump_recursive(TranslationBlock *tb)
1304{
1305 tb_reset_jump_recursive2(tb, 0);
1306 tb_reset_jump_recursive2(tb, 1);
1307}
1308
bellard1fddef42005-04-17 19:16:13 +00001309#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001310#if defined(CONFIG_USER_ONLY)
1311static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1312{
1313 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1314}
1315#else
bellardd720b932004-04-25 17:57:43 +00001316static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1317{
Anthony Liguoric227f092009-10-01 16:12:16 -05001318 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001319 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001320 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001321 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001322
pbrookc2f07f82006-04-08 17:14:56 +00001323 addr = cpu_get_phys_page_debug(env, pc);
1324 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1325 if (!p) {
1326 pd = IO_MEM_UNASSIGNED;
1327 } else {
1328 pd = p->phys_offset;
1329 }
1330 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001331 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001332}
bellardc27004e2005-01-03 23:35:10 +00001333#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001334#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001335
pbrook6658ffb2007-03-16 23:58:11 +00001336/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001337int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1338 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001339{
aliguorib4051332008-11-18 20:14:20 +00001340 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001341 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001342
aliguorib4051332008-11-18 20:14:20 +00001343 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1344 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1345 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1346 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1347 return -EINVAL;
1348 }
aliguoria1d1bb32008-11-18 20:07:32 +00001349 wp = qemu_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001350
aliguoria1d1bb32008-11-18 20:07:32 +00001351 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001352 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001353 wp->flags = flags;
1354
aliguori2dc9f412008-11-18 20:56:59 +00001355 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001356 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001357 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001358 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001359 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001360
pbrook6658ffb2007-03-16 23:58:11 +00001361 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001362
1363 if (watchpoint)
1364 *watchpoint = wp;
1365 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001366}
1367
aliguoria1d1bb32008-11-18 20:07:32 +00001368/* Remove a specific watchpoint. */
1369int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1370 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001371{
aliguorib4051332008-11-18 20:14:20 +00001372 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001373 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001374
Blue Swirl72cf2d42009-09-12 07:36:22 +00001375 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001376 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001377 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001378 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001379 return 0;
1380 }
1381 }
aliguoria1d1bb32008-11-18 20:07:32 +00001382 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001383}
1384
aliguoria1d1bb32008-11-18 20:07:32 +00001385/* Remove a specific watchpoint by reference. */
1386void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1387{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001388 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001389
aliguoria1d1bb32008-11-18 20:07:32 +00001390 tlb_flush_page(env, watchpoint->vaddr);
1391
1392 qemu_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001393}
1394
aliguoria1d1bb32008-11-18 20:07:32 +00001395/* Remove all matching watchpoints. */
1396void cpu_watchpoint_remove_all(CPUState *env, int mask)
1397{
aliguoric0ce9982008-11-25 22:13:57 +00001398 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001399
Blue Swirl72cf2d42009-09-12 07:36:22 +00001400 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001401 if (wp->flags & mask)
1402 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001403 }
aliguoria1d1bb32008-11-18 20:07:32 +00001404}
1405
1406/* Add a breakpoint. */
1407int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1408 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001409{
bellard1fddef42005-04-17 19:16:13 +00001410#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001411 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001412
aliguoria1d1bb32008-11-18 20:07:32 +00001413 bp = qemu_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001414
1415 bp->pc = pc;
1416 bp->flags = flags;
1417
aliguori2dc9f412008-11-18 20:56:59 +00001418 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001419 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001420 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001421 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001422 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001423
1424 breakpoint_invalidate(env, pc);
1425
1426 if (breakpoint)
1427 *breakpoint = bp;
1428 return 0;
1429#else
1430 return -ENOSYS;
1431#endif
1432}
1433
1434/* Remove a specific breakpoint. */
1435int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1436{
1437#if defined(TARGET_HAS_ICE)
1438 CPUBreakpoint *bp;
1439
Blue Swirl72cf2d42009-09-12 07:36:22 +00001440 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001441 if (bp->pc == pc && bp->flags == flags) {
1442 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001443 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001444 }
bellard4c3a88a2003-07-26 12:06:08 +00001445 }
aliguoria1d1bb32008-11-18 20:07:32 +00001446 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001447#else
aliguoria1d1bb32008-11-18 20:07:32 +00001448 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001449#endif
1450}
1451
aliguoria1d1bb32008-11-18 20:07:32 +00001452/* Remove a specific breakpoint by reference. */
1453void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001454{
bellard1fddef42005-04-17 19:16:13 +00001455#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001456 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001457
aliguoria1d1bb32008-11-18 20:07:32 +00001458 breakpoint_invalidate(env, breakpoint->pc);
1459
1460 qemu_free(breakpoint);
1461#endif
1462}
1463
1464/* Remove all matching breakpoints. */
1465void cpu_breakpoint_remove_all(CPUState *env, int mask)
1466{
1467#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001468 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001469
Blue Swirl72cf2d42009-09-12 07:36:22 +00001470 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001471 if (bp->flags & mask)
1472 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001473 }
bellard4c3a88a2003-07-26 12:06:08 +00001474#endif
1475}
1476
bellardc33a3462003-07-29 20:50:33 +00001477/* enable or disable single step mode. EXCP_DEBUG is returned by the
1478 CPU loop after each instruction */
1479void cpu_single_step(CPUState *env, int enabled)
1480{
bellard1fddef42005-04-17 19:16:13 +00001481#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001482 if (env->singlestep_enabled != enabled) {
1483 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001484 if (kvm_enabled())
1485 kvm_update_guest_debug(env, 0);
1486 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001487 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001488 /* XXX: only flush what is necessary */
1489 tb_flush(env);
1490 }
bellardc33a3462003-07-29 20:50:33 +00001491 }
1492#endif
1493}
1494
bellard34865132003-10-05 14:28:56 +00001495/* enable or disable low levels log */
1496void cpu_set_log(int log_flags)
1497{
1498 loglevel = log_flags;
1499 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001500 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001501 if (!logfile) {
1502 perror(logfilename);
1503 _exit(1);
1504 }
bellard9fa3e852004-01-04 18:06:42 +00001505#if !defined(CONFIG_SOFTMMU)
1506 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1507 {
blueswir1b55266b2008-09-20 08:07:15 +00001508 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001509 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1510 }
Filip Navarabf65f532009-07-27 10:02:04 -05001511#elif !defined(_WIN32)
1512 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001513 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001514#endif
pbrooke735b912007-06-30 13:53:24 +00001515 log_append = 1;
1516 }
1517 if (!loglevel && logfile) {
1518 fclose(logfile);
1519 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001520 }
1521}
1522
1523void cpu_set_log_filename(const char *filename)
1524{
1525 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001526 if (logfile) {
1527 fclose(logfile);
1528 logfile = NULL;
1529 }
1530 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001531}
bellardc33a3462003-07-29 20:50:33 +00001532
aurel323098dba2009-03-07 21:28:24 +00001533static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001534{
pbrookd5975362008-06-07 20:50:51 +00001535 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1536 problem and hope the cpu will stop of its own accord. For userspace
1537 emulation this often isn't actually as bad as it sounds. Often
1538 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001539 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001540 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001541
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001542 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001543 tb = env->current_tb;
1544 /* if the cpu is currently executing code, we must unlink it and
1545 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001546 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001547 env->current_tb = NULL;
1548 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001549 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001550 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001551}
1552
1553/* mask must never be zero, except for A20 change call */
1554void cpu_interrupt(CPUState *env, int mask)
1555{
1556 int old_mask;
1557
1558 old_mask = env->interrupt_request;
1559 env->interrupt_request |= mask;
1560
aliguori8edac962009-04-24 18:03:45 +00001561#ifndef CONFIG_USER_ONLY
1562 /*
1563 * If called from iothread context, wake the target cpu in
1564 * case its halted.
1565 */
1566 if (!qemu_cpu_self(env)) {
1567 qemu_cpu_kick(env);
1568 return;
1569 }
1570#endif
1571
pbrook2e70f6e2008-06-29 01:03:05 +00001572 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001573 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001574#ifndef CONFIG_USER_ONLY
pbrook2e70f6e2008-06-29 01:03:05 +00001575 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001576 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001577 cpu_abort(env, "Raised interrupt while not in I/O function");
1578 }
1579#endif
1580 } else {
aurel323098dba2009-03-07 21:28:24 +00001581 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001582 }
1583}
1584
bellardb54ad042004-05-20 13:42:52 +00001585void cpu_reset_interrupt(CPUState *env, int mask)
1586{
1587 env->interrupt_request &= ~mask;
1588}
1589
aurel323098dba2009-03-07 21:28:24 +00001590void cpu_exit(CPUState *env)
1591{
1592 env->exit_request = 1;
1593 cpu_unlink_tb(env);
1594}
1595
blueswir1c7cd6a32008-10-02 18:27:46 +00001596const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001597 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001598 "show generated host assembly code for each compiled TB" },
1599 { CPU_LOG_TB_IN_ASM, "in_asm",
1600 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001601 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001602 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001603 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001604 "show micro ops "
1605#ifdef TARGET_I386
1606 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001607#endif
blueswir1e01a1152008-03-14 17:37:11 +00001608 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001609 { CPU_LOG_INT, "int",
1610 "show interrupts/exceptions in short format" },
1611 { CPU_LOG_EXEC, "exec",
1612 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001613 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001614 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001615#ifdef TARGET_I386
1616 { CPU_LOG_PCALL, "pcall",
1617 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001618 { CPU_LOG_RESET, "cpu_reset",
1619 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001620#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001621#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001622 { CPU_LOG_IOPORT, "ioport",
1623 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001624#endif
bellardf193c792004-03-21 17:06:25 +00001625 { 0, NULL, NULL },
1626};
1627
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001628#ifndef CONFIG_USER_ONLY
1629static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1630 = QLIST_HEAD_INITIALIZER(memory_client_list);
1631
1632static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1633 ram_addr_t size,
1634 ram_addr_t phys_offset)
1635{
1636 CPUPhysMemoryClient *client;
1637 QLIST_FOREACH(client, &memory_client_list, list) {
1638 client->set_memory(client, start_addr, size, phys_offset);
1639 }
1640}
1641
1642static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1643 target_phys_addr_t end)
1644{
1645 CPUPhysMemoryClient *client;
1646 QLIST_FOREACH(client, &memory_client_list, list) {
1647 int r = client->sync_dirty_bitmap(client, start, end);
1648 if (r < 0)
1649 return r;
1650 }
1651 return 0;
1652}
1653
1654static int cpu_notify_migration_log(int enable)
1655{
1656 CPUPhysMemoryClient *client;
1657 QLIST_FOREACH(client, &memory_client_list, list) {
1658 int r = client->migration_log(client, enable);
1659 if (r < 0)
1660 return r;
1661 }
1662 return 0;
1663}
1664
1665static void phys_page_for_each_in_l1_map(PhysPageDesc **phys_map,
1666 CPUPhysMemoryClient *client)
1667{
1668 PhysPageDesc *pd;
1669 int l1, l2;
1670
1671 for (l1 = 0; l1 < L1_SIZE; ++l1) {
1672 pd = phys_map[l1];
1673 if (!pd) {
1674 continue;
1675 }
1676 for (l2 = 0; l2 < L2_SIZE; ++l2) {
1677 if (pd[l2].phys_offset == IO_MEM_UNASSIGNED) {
1678 continue;
1679 }
1680 client->set_memory(client, pd[l2].region_offset,
1681 TARGET_PAGE_SIZE, pd[l2].phys_offset);
1682 }
1683 }
1684}
1685
1686static void phys_page_for_each(CPUPhysMemoryClient *client)
1687{
1688#if TARGET_PHYS_ADDR_SPACE_BITS > 32
1689
1690#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
1691#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
1692#endif
1693 void **phys_map = (void **)l1_phys_map;
1694 int l1;
1695 if (!l1_phys_map) {
1696 return;
1697 }
1698 for (l1 = 0; l1 < L1_SIZE; ++l1) {
1699 if (phys_map[l1]) {
1700 phys_page_for_each_in_l1_map(phys_map[l1], client);
1701 }
1702 }
1703#else
1704 if (!l1_phys_map) {
1705 return;
1706 }
1707 phys_page_for_each_in_l1_map(l1_phys_map, client);
1708#endif
1709}
1710
1711void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1712{
1713 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1714 phys_page_for_each(client);
1715}
1716
1717void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1718{
1719 QLIST_REMOVE(client, list);
1720}
1721#endif
1722
bellardf193c792004-03-21 17:06:25 +00001723static int cmp1(const char *s1, int n, const char *s2)
1724{
1725 if (strlen(s2) != n)
1726 return 0;
1727 return memcmp(s1, s2, n) == 0;
1728}
ths3b46e622007-09-17 08:09:54 +00001729
bellardf193c792004-03-21 17:06:25 +00001730/* takes a comma separated list of log masks. Return 0 if error. */
1731int cpu_str_to_log_mask(const char *str)
1732{
blueswir1c7cd6a32008-10-02 18:27:46 +00001733 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001734 int mask;
1735 const char *p, *p1;
1736
1737 p = str;
1738 mask = 0;
1739 for(;;) {
1740 p1 = strchr(p, ',');
1741 if (!p1)
1742 p1 = p + strlen(p);
bellard8e3a9fd2004-10-09 17:32:58 +00001743 if(cmp1(p,p1-p,"all")) {
1744 for(item = cpu_log_items; item->mask != 0; item++) {
1745 mask |= item->mask;
1746 }
1747 } else {
bellardf193c792004-03-21 17:06:25 +00001748 for(item = cpu_log_items; item->mask != 0; item++) {
1749 if (cmp1(p, p1 - p, item->name))
1750 goto found;
1751 }
1752 return 0;
bellard8e3a9fd2004-10-09 17:32:58 +00001753 }
bellardf193c792004-03-21 17:06:25 +00001754 found:
1755 mask |= item->mask;
1756 if (*p1 != ',')
1757 break;
1758 p = p1 + 1;
1759 }
1760 return mask;
1761}
bellardea041c02003-06-25 16:16:50 +00001762
bellard75012672003-06-21 13:11:07 +00001763void cpu_abort(CPUState *env, const char *fmt, ...)
1764{
1765 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001766 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001767
1768 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001769 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001770 fprintf(stderr, "qemu: fatal: ");
1771 vfprintf(stderr, fmt, ap);
1772 fprintf(stderr, "\n");
1773#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001774 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1775#else
1776 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001777#endif
aliguori93fcfe32009-01-15 22:34:14 +00001778 if (qemu_log_enabled()) {
1779 qemu_log("qemu: fatal: ");
1780 qemu_log_vprintf(fmt, ap2);
1781 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001782#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001783 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001784#else
aliguori93fcfe32009-01-15 22:34:14 +00001785 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001786#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001787 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001788 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001789 }
pbrook493ae1f2007-11-23 16:53:59 +00001790 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001791 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001792#if defined(CONFIG_USER_ONLY)
1793 {
1794 struct sigaction act;
1795 sigfillset(&act.sa_mask);
1796 act.sa_handler = SIG_DFL;
1797 sigaction(SIGABRT, &act, NULL);
1798 }
1799#endif
bellard75012672003-06-21 13:11:07 +00001800 abort();
1801}
1802
thsc5be9f02007-02-28 20:20:53 +00001803CPUState *cpu_copy(CPUState *env)
1804{
ths01ba9812007-12-09 02:22:57 +00001805 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001806 CPUState *next_cpu = new_env->next_cpu;
1807 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001808#if defined(TARGET_HAS_ICE)
1809 CPUBreakpoint *bp;
1810 CPUWatchpoint *wp;
1811#endif
1812
thsc5be9f02007-02-28 20:20:53 +00001813 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001814
1815 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001816 new_env->next_cpu = next_cpu;
1817 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001818
1819 /* Clone all break/watchpoints.
1820 Note: Once we support ptrace with hw-debug register access, make sure
1821 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001822 QTAILQ_INIT(&env->breakpoints);
1823 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001824#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001825 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001826 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1827 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001828 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001829 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1830 wp->flags, NULL);
1831 }
1832#endif
1833
thsc5be9f02007-02-28 20:20:53 +00001834 return new_env;
1835}
1836
bellard01243112004-01-04 15:48:17 +00001837#if !defined(CONFIG_USER_ONLY)
1838
edgar_igl5c751e92008-05-06 08:44:21 +00001839static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1840{
1841 unsigned int i;
1842
1843 /* Discard jump cache entries for any tb which might potentially
1844 overlap the flushed page. */
1845 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1846 memset (&env->tb_jmp_cache[i], 0,
1847 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1848
1849 i = tb_jmp_cache_hash_page(addr);
1850 memset (&env->tb_jmp_cache[i], 0,
1851 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1852}
1853
Igor Kovalenko08738982009-07-12 02:15:40 +04001854static CPUTLBEntry s_cputlb_empty_entry = {
1855 .addr_read = -1,
1856 .addr_write = -1,
1857 .addr_code = -1,
1858 .addend = -1,
1859};
1860
bellardee8b7022004-02-03 23:35:10 +00001861/* NOTE: if flush_global is true, also flush global entries (not
1862 implemented yet) */
1863void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001864{
bellard33417e72003-08-10 21:47:01 +00001865 int i;
bellard01243112004-01-04 15:48:17 +00001866
bellard9fa3e852004-01-04 18:06:42 +00001867#if defined(DEBUG_TLB)
1868 printf("tlb_flush:\n");
1869#endif
bellard01243112004-01-04 15:48:17 +00001870 /* must reset current TB so that interrupts cannot modify the
1871 links while we are modifying them */
1872 env->current_tb = NULL;
1873
bellard33417e72003-08-10 21:47:01 +00001874 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001875 int mmu_idx;
1876 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001877 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001878 }
bellard33417e72003-08-10 21:47:01 +00001879 }
bellard9fa3e852004-01-04 18:06:42 +00001880
bellard8a40a182005-11-20 10:35:40 +00001881 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001882
bellarde3db7222005-01-26 22:00:47 +00001883 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001884}
1885
bellard274da6b2004-05-20 21:56:27 +00001886static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001887{
ths5fafdf22007-09-16 21:08:06 +00001888 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001889 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001890 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001891 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001892 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001893 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001894 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001895 }
bellard61382a52003-10-27 21:22:23 +00001896}
1897
bellard2e126692004-04-25 21:28:44 +00001898void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001899{
bellard8a40a182005-11-20 10:35:40 +00001900 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001901 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001902
bellard9fa3e852004-01-04 18:06:42 +00001903#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001904 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001905#endif
bellard01243112004-01-04 15:48:17 +00001906 /* must reset current TB so that interrupts cannot modify the
1907 links while we are modifying them */
1908 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001909
bellard61382a52003-10-27 21:22:23 +00001910 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001911 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001912 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1913 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001914
edgar_igl5c751e92008-05-06 08:44:21 +00001915 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001916}
1917
bellard9fa3e852004-01-04 18:06:42 +00001918/* update the TLBs so that writes to code in the virtual page 'addr'
1919 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001920static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001921{
ths5fafdf22007-09-16 21:08:06 +00001922 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001923 ram_addr + TARGET_PAGE_SIZE,
1924 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001925}
1926
bellard9fa3e852004-01-04 18:06:42 +00001927/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001928 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001929static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001930 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001931{
bellard3a7d9292005-08-21 09:26:42 +00001932 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
bellard1ccde1c2004-02-06 19:46:14 +00001933}
1934
ths5fafdf22007-09-16 21:08:06 +00001935static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001936 unsigned long start, unsigned long length)
1937{
1938 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00001939 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1940 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001941 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001942 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001943 }
1944 }
1945}
1946
pbrook5579c7f2009-04-11 14:47:08 +00001947/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001948void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001949 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001950{
1951 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001952 unsigned long length, start1;
bellard0a962c02005-02-10 22:00:27 +00001953 int i, mask, len;
1954 uint8_t *p;
bellard1ccde1c2004-02-06 19:46:14 +00001955
1956 start &= TARGET_PAGE_MASK;
1957 end = TARGET_PAGE_ALIGN(end);
1958
1959 length = end - start;
1960 if (length == 0)
1961 return;
bellard0a962c02005-02-10 22:00:27 +00001962 len = length >> TARGET_PAGE_BITS;
bellardf23db162005-08-21 19:12:28 +00001963 mask = ~dirty_flags;
1964 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1965 for(i = 0; i < len; i++)
1966 p[i] &= mask;
1967
bellard1ccde1c2004-02-06 19:46:14 +00001968 /* we modify the TLB cache so that the dirty bit will be set again
1969 when accessing the range */
pbrook5579c7f2009-04-11 14:47:08 +00001970 start1 = (unsigned long)qemu_get_ram_ptr(start);
1971 /* Chek that we don't span multiple blocks - this breaks the
1972 address comparisons below. */
1973 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1974 != (end - 1) - start) {
1975 abort();
1976 }
1977
bellard6a00d602005-11-21 23:25:50 +00001978 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001979 int mmu_idx;
1980 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1981 for(i = 0; i < CPU_TLB_SIZE; i++)
1982 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1983 start1, length);
1984 }
bellard6a00d602005-11-21 23:25:50 +00001985 }
bellard1ccde1c2004-02-06 19:46:14 +00001986}
1987
aliguori74576192008-10-06 14:02:03 +00001988int cpu_physical_memory_set_dirty_tracking(int enable)
1989{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001990 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001991 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001992 ret = cpu_notify_migration_log(!!enable);
1993 return ret;
aliguori74576192008-10-06 14:02:03 +00001994}
1995
1996int cpu_physical_memory_get_dirty_tracking(void)
1997{
1998 return in_migration;
1999}
2000
Anthony Liguoric227f092009-10-01 16:12:16 -05002001int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2002 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00002003{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02002004 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02002005
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002006 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02002007 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00002008}
2009
bellard3a7d9292005-08-21 09:26:42 +00002010static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2011{
Anthony Liguoric227f092009-10-01 16:12:16 -05002012 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002013 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002014
bellard84b7b8e2005-11-28 21:19:04 +00002015 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002016 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2017 + tlb_entry->addend);
2018 ram_addr = qemu_ram_addr_from_host(p);
bellard3a7d9292005-08-21 09:26:42 +00002019 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002020 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002021 }
2022 }
2023}
2024
2025/* update the TLB according to the current state of the dirty bits */
2026void cpu_tlb_update_dirty(CPUState *env)
2027{
2028 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002029 int mmu_idx;
2030 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2031 for(i = 0; i < CPU_TLB_SIZE; i++)
2032 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2033 }
bellard3a7d9292005-08-21 09:26:42 +00002034}
2035
pbrook0f459d12008-06-09 00:20:13 +00002036static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002037{
pbrook0f459d12008-06-09 00:20:13 +00002038 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2039 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002040}
2041
pbrook0f459d12008-06-09 00:20:13 +00002042/* update the TLB corresponding to virtual page vaddr
2043 so that it is no longer dirty */
2044static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002045{
bellard1ccde1c2004-02-06 19:46:14 +00002046 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002047 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002048
pbrook0f459d12008-06-09 00:20:13 +00002049 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002050 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002051 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2052 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002053}
2054
bellard59817cc2004-02-16 22:01:13 +00002055/* add a new TLB entry. At most one entry for a given virtual address
2056 is permitted. Return 0 if OK or 2 if the page could not be mapped
2057 (can only happen in non SOFTMMU mode for I/O pages or pages
2058 conflicting with the host address space). */
ths5fafdf22007-09-16 21:08:06 +00002059int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002060 target_phys_addr_t paddr, int prot,
j_mayer6ebbf392007-10-14 07:07:08 +00002061 int mmu_idx, int is_softmmu)
bellard9fa3e852004-01-04 18:06:42 +00002062{
bellard92e873b2004-05-21 14:52:29 +00002063 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002064 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002065 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002066 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002067 target_ulong code_address;
Anthony Liguoric227f092009-10-01 16:12:16 -05002068 target_phys_addr_t addend;
bellard9fa3e852004-01-04 18:06:42 +00002069 int ret;
bellard84b7b8e2005-11-28 21:19:04 +00002070 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002071 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002072 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002073
bellard92e873b2004-05-21 14:52:29 +00002074 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002075 if (!p) {
2076 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002077 } else {
2078 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002079 }
2080#if defined(DEBUG_TLB)
j_mayer6ebbf392007-10-14 07:07:08 +00002081 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2082 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
bellard9fa3e852004-01-04 18:06:42 +00002083#endif
2084
2085 ret = 0;
pbrook0f459d12008-06-09 00:20:13 +00002086 address = vaddr;
2087 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2088 /* IO memory case (romd handled later) */
2089 address |= TLB_MMIO;
2090 }
pbrook5579c7f2009-04-11 14:47:08 +00002091 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002092 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2093 /* Normal RAM. */
2094 iotlb = pd & TARGET_PAGE_MASK;
2095 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2096 iotlb |= IO_MEM_NOTDIRTY;
2097 else
2098 iotlb |= IO_MEM_ROM;
2099 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002100 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002101 It would be nice to pass an offset from the base address
2102 of that region. This would avoid having to special case RAM,
2103 and avoid full address decoding in every device.
2104 We can't use the high bits of pd for this because
2105 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002106 iotlb = (pd & ~TARGET_PAGE_MASK);
2107 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002108 iotlb += p->region_offset;
2109 } else {
2110 iotlb += paddr;
2111 }
pbrook0f459d12008-06-09 00:20:13 +00002112 }
pbrook6658ffb2007-03-16 23:58:11 +00002113
pbrook0f459d12008-06-09 00:20:13 +00002114 code_address = address;
2115 /* Make accesses to pages with watchpoints go via the
2116 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002117 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002118 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
pbrook0f459d12008-06-09 00:20:13 +00002119 iotlb = io_mem_watch + paddr;
2120 /* TODO: The memory case can be optimized by not trapping
2121 reads of pages with a write breakpoint. */
2122 address |= TLB_MMIO;
pbrook6658ffb2007-03-16 23:58:11 +00002123 }
pbrook0f459d12008-06-09 00:20:13 +00002124 }
balrogd79acba2007-06-26 20:01:13 +00002125
pbrook0f459d12008-06-09 00:20:13 +00002126 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2127 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2128 te = &env->tlb_table[mmu_idx][index];
2129 te->addend = addend - vaddr;
2130 if (prot & PAGE_READ) {
2131 te->addr_read = address;
2132 } else {
2133 te->addr_read = -1;
2134 }
edgar_igl5c751e92008-05-06 08:44:21 +00002135
pbrook0f459d12008-06-09 00:20:13 +00002136 if (prot & PAGE_EXEC) {
2137 te->addr_code = code_address;
2138 } else {
2139 te->addr_code = -1;
2140 }
2141 if (prot & PAGE_WRITE) {
2142 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2143 (pd & IO_MEM_ROMD)) {
2144 /* Write access calls the I/O callback. */
2145 te->addr_write = address | TLB_MMIO;
2146 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2147 !cpu_physical_memory_is_dirty(pd)) {
2148 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002149 } else {
pbrook0f459d12008-06-09 00:20:13 +00002150 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002151 }
pbrook0f459d12008-06-09 00:20:13 +00002152 } else {
2153 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002154 }
bellard9fa3e852004-01-04 18:06:42 +00002155 return ret;
2156}
2157
bellard01243112004-01-04 15:48:17 +00002158#else
2159
bellardee8b7022004-02-03 23:35:10 +00002160void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002161{
2162}
2163
bellard2e126692004-04-25 21:28:44 +00002164void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002165{
2166}
2167
ths5fafdf22007-09-16 21:08:06 +00002168int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002169 target_phys_addr_t paddr, int prot,
j_mayer6ebbf392007-10-14 07:07:08 +00002170 int mmu_idx, int is_softmmu)
bellard33417e72003-08-10 21:47:01 +00002171{
bellard9fa3e852004-01-04 18:06:42 +00002172 return 0;
2173}
bellard33417e72003-08-10 21:47:01 +00002174
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002175/*
2176 * Walks guest process memory "regions" one by one
2177 * and calls callback function 'fn' for each region.
2178 */
2179int walk_memory_regions(void *priv,
2180 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
bellard9fa3e852004-01-04 18:06:42 +00002181{
2182 unsigned long start, end;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002183 PageDesc *p = NULL;
bellard9fa3e852004-01-04 18:06:42 +00002184 int i, j, prot, prot1;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002185 int rc = 0;
bellard9fa3e852004-01-04 18:06:42 +00002186
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002187 start = end = -1;
bellard9fa3e852004-01-04 18:06:42 +00002188 prot = 0;
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002189
2190 for (i = 0; i <= L1_SIZE; i++) {
2191 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2192 for (j = 0; j < L2_SIZE; j++) {
2193 prot1 = (p == NULL) ? 0 : p[j].flags;
2194 /*
2195 * "region" is one continuous chunk of memory
2196 * that has same protection flags set.
2197 */
bellard9fa3e852004-01-04 18:06:42 +00002198 if (prot1 != prot) {
2199 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2200 if (start != -1) {
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002201 rc = (*fn)(priv, start, end, prot);
2202 /* callback can stop iteration by returning != 0 */
2203 if (rc != 0)
2204 return (rc);
bellard9fa3e852004-01-04 18:06:42 +00002205 }
2206 if (prot1 != 0)
2207 start = end;
2208 else
2209 start = -1;
2210 prot = prot1;
2211 }
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002212 if (p == NULL)
bellard9fa3e852004-01-04 18:06:42 +00002213 break;
2214 }
bellard33417e72003-08-10 21:47:01 +00002215 }
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002216 return (rc);
2217}
2218
2219static int dump_region(void *priv, unsigned long start,
2220 unsigned long end, unsigned long prot)
2221{
2222 FILE *f = (FILE *)priv;
2223
2224 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2225 start, end, end - start,
2226 ((prot & PAGE_READ) ? 'r' : '-'),
2227 ((prot & PAGE_WRITE) ? 'w' : '-'),
2228 ((prot & PAGE_EXEC) ? 'x' : '-'));
2229
2230 return (0);
2231}
2232
2233/* dump memory mappings */
2234void page_dump(FILE *f)
2235{
2236 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2237 "start", "end", "size", "prot");
2238 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002239}
2240
pbrook53a59602006-03-25 19:31:22 +00002241int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002242{
bellard9fa3e852004-01-04 18:06:42 +00002243 PageDesc *p;
2244
2245 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002246 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002247 return 0;
2248 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002249}
2250
bellard9fa3e852004-01-04 18:06:42 +00002251/* modify the flags of a page and invalidate the code if
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002252 necessary. The flag PAGE_WRITE_ORG is positioned automatically
bellard9fa3e852004-01-04 18:06:42 +00002253 depending on PAGE_WRITE */
pbrook53a59602006-03-25 19:31:22 +00002254void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002255{
2256 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002257 target_ulong addr;
bellard9fa3e852004-01-04 18:06:42 +00002258
pbrookc8a706f2008-06-02 16:16:42 +00002259 /* mmap_lock should already be held. */
bellard9fa3e852004-01-04 18:06:42 +00002260 start = start & TARGET_PAGE_MASK;
2261 end = TARGET_PAGE_ALIGN(end);
2262 if (flags & PAGE_WRITE)
2263 flags |= PAGE_WRITE_ORG;
bellard9fa3e852004-01-04 18:06:42 +00002264 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2265 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
pbrook17e23772008-06-09 13:47:45 +00002266 /* We may be called for host regions that are outside guest
2267 address space. */
2268 if (!p)
2269 return;
bellard9fa3e852004-01-04 18:06:42 +00002270 /* if the write protection is set, then we invalidate the code
2271 inside */
ths5fafdf22007-09-16 21:08:06 +00002272 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002273 (flags & PAGE_WRITE) &&
2274 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002275 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002276 }
2277 p->flags = flags;
2278 }
bellard9fa3e852004-01-04 18:06:42 +00002279}
2280
ths3d97b402007-11-02 19:02:07 +00002281int page_check_range(target_ulong start, target_ulong len, int flags)
2282{
2283 PageDesc *p;
2284 target_ulong end;
2285 target_ulong addr;
2286
balrog55f280c2008-10-28 10:24:11 +00002287 if (start + len < start)
2288 /* we've wrapped around */
2289 return -1;
2290
ths3d97b402007-11-02 19:02:07 +00002291 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2292 start = start & TARGET_PAGE_MASK;
2293
ths3d97b402007-11-02 19:02:07 +00002294 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2295 p = page_find(addr >> TARGET_PAGE_BITS);
2296 if( !p )
2297 return -1;
2298 if( !(p->flags & PAGE_VALID) )
2299 return -1;
2300
bellarddae32702007-11-14 10:51:00 +00002301 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002302 return -1;
bellarddae32702007-11-14 10:51:00 +00002303 if (flags & PAGE_WRITE) {
2304 if (!(p->flags & PAGE_WRITE_ORG))
2305 return -1;
2306 /* unprotect the page if it was put read-only because it
2307 contains translated code */
2308 if (!(p->flags & PAGE_WRITE)) {
2309 if (!page_unprotect(addr, 0, NULL))
2310 return -1;
2311 }
2312 return 0;
2313 }
ths3d97b402007-11-02 19:02:07 +00002314 }
2315 return 0;
2316}
2317
bellard9fa3e852004-01-04 18:06:42 +00002318/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002319 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002320int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002321{
2322 unsigned int page_index, prot, pindex;
2323 PageDesc *p, *p1;
pbrook53a59602006-03-25 19:31:22 +00002324 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002325
pbrookc8a706f2008-06-02 16:16:42 +00002326 /* Technically this isn't safe inside a signal handler. However we
2327 know this only ever happens in a synchronous SEGV handler, so in
2328 practice it seems to be ok. */
2329 mmap_lock();
2330
bellard83fb7ad2004-07-05 21:25:26 +00002331 host_start = address & qemu_host_page_mask;
bellard9fa3e852004-01-04 18:06:42 +00002332 page_index = host_start >> TARGET_PAGE_BITS;
2333 p1 = page_find(page_index);
pbrookc8a706f2008-06-02 16:16:42 +00002334 if (!p1) {
2335 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002336 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002337 }
bellard83fb7ad2004-07-05 21:25:26 +00002338 host_end = host_start + qemu_host_page_size;
bellard9fa3e852004-01-04 18:06:42 +00002339 p = p1;
2340 prot = 0;
2341 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2342 prot |= p->flags;
2343 p++;
2344 }
2345 /* if the page was really writable, then we change its
2346 protection back to writable */
2347 if (prot & PAGE_WRITE_ORG) {
2348 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2349 if (!(p1[pindex].flags & PAGE_WRITE)) {
ths5fafdf22007-09-16 21:08:06 +00002350 mprotect((void *)g2h(host_start), qemu_host_page_size,
bellard9fa3e852004-01-04 18:06:42 +00002351 (prot & PAGE_BITS) | PAGE_WRITE);
2352 p1[pindex].flags |= PAGE_WRITE;
2353 /* and since the content will be modified, we must invalidate
2354 the corresponding translated code. */
bellardd720b932004-04-25 17:57:43 +00002355 tb_invalidate_phys_page(address, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002356#ifdef DEBUG_TB_CHECK
2357 tb_invalidate_check(address);
2358#endif
pbrookc8a706f2008-06-02 16:16:42 +00002359 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002360 return 1;
2361 }
2362 }
pbrookc8a706f2008-06-02 16:16:42 +00002363 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002364 return 0;
2365}
2366
bellard6a00d602005-11-21 23:25:50 +00002367static inline void tlb_set_dirty(CPUState *env,
2368 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002369{
2370}
bellard9fa3e852004-01-04 18:06:42 +00002371#endif /* defined(CONFIG_USER_ONLY) */
2372
pbrooke2eef172008-06-08 01:09:01 +00002373#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002374
Paul Brookc04b2b72010-03-01 03:31:14 +00002375#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2376typedef struct subpage_t {
2377 target_phys_addr_t base;
2378 CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
2379 CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
2380 void *opaque[TARGET_PAGE_SIZE][2][4];
2381 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
2382} subpage_t;
2383
Anthony Liguoric227f092009-10-01 16:12:16 -05002384static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2385 ram_addr_t memory, ram_addr_t region_offset);
2386static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2387 ram_addr_t orig_memory, ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002388#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2389 need_subpage) \
2390 do { \
2391 if (addr > start_addr) \
2392 start_addr2 = 0; \
2393 else { \
2394 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2395 if (start_addr2 > 0) \
2396 need_subpage = 1; \
2397 } \
2398 \
blueswir149e9fba2007-05-30 17:25:06 +00002399 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002400 end_addr2 = TARGET_PAGE_SIZE - 1; \
2401 else { \
2402 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2403 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2404 need_subpage = 1; \
2405 } \
2406 } while (0)
2407
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002408/* register physical memory.
2409 For RAM, 'size' must be a multiple of the target page size.
2410 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002411 io memory page. The address used when calling the IO function is
2412 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002413 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002414 before calculating this offset. This should not be a problem unless
2415 the low bits of start_addr and region_offset differ. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002416void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2417 ram_addr_t size,
2418 ram_addr_t phys_offset,
2419 ram_addr_t region_offset)
bellard33417e72003-08-10 21:47:01 +00002420{
Anthony Liguoric227f092009-10-01 16:12:16 -05002421 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002422 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002423 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002424 ram_addr_t orig_size = size;
blueswir1db7b5422007-05-26 17:36:03 +00002425 void *subpage;
bellard33417e72003-08-10 21:47:01 +00002426
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002427 cpu_notify_set_memory(start_addr, size, phys_offset);
2428
pbrook67c4d232009-02-23 13:16:07 +00002429 if (phys_offset == IO_MEM_UNASSIGNED) {
2430 region_offset = start_addr;
2431 }
pbrook8da3ff12008-12-01 18:59:50 +00002432 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002433 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002434 end_addr = start_addr + (target_phys_addr_t)size;
blueswir149e9fba2007-05-30 17:25:06 +00002435 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
blueswir1db7b5422007-05-26 17:36:03 +00002436 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2437 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002438 ram_addr_t orig_memory = p->phys_offset;
2439 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002440 int need_subpage = 0;
2441
2442 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2443 need_subpage);
blueswir14254fab2008-01-01 16:57:19 +00002444 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
blueswir1db7b5422007-05-26 17:36:03 +00002445 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2446 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002447 &p->phys_offset, orig_memory,
2448 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002449 } else {
2450 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2451 >> IO_MEM_SHIFT];
2452 }
pbrook8da3ff12008-12-01 18:59:50 +00002453 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2454 region_offset);
2455 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002456 } else {
2457 p->phys_offset = phys_offset;
2458 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2459 (phys_offset & IO_MEM_ROMD))
2460 phys_offset += TARGET_PAGE_SIZE;
2461 }
2462 } else {
2463 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2464 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002465 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002466 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002467 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002468 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002469 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002470 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002471 int need_subpage = 0;
2472
2473 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2474 end_addr2, need_subpage);
2475
blueswir14254fab2008-01-01 16:57:19 +00002476 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
blueswir1db7b5422007-05-26 17:36:03 +00002477 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002478 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002479 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002480 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002481 phys_offset, region_offset);
2482 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002483 }
2484 }
2485 }
pbrook8da3ff12008-12-01 18:59:50 +00002486 region_offset += TARGET_PAGE_SIZE;
bellard33417e72003-08-10 21:47:01 +00002487 }
ths3b46e622007-09-17 08:09:54 +00002488
bellard9d420372006-06-25 22:25:22 +00002489 /* since each CPU stores ram addresses in its TLB cache, we must
2490 reset the modified entries */
2491 /* XXX: slow ! */
2492 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2493 tlb_flush(env, 1);
2494 }
bellard33417e72003-08-10 21:47:01 +00002495}
2496
bellardba863452006-09-24 18:41:10 +00002497/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002498ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002499{
2500 PhysPageDesc *p;
2501
2502 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2503 if (!p)
2504 return IO_MEM_UNASSIGNED;
2505 return p->phys_offset;
2506}
2507
Anthony Liguoric227f092009-10-01 16:12:16 -05002508void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002509{
2510 if (kvm_enabled())
2511 kvm_coalesce_mmio_region(addr, size);
2512}
2513
Anthony Liguoric227f092009-10-01 16:12:16 -05002514void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002515{
2516 if (kvm_enabled())
2517 kvm_uncoalesce_mmio_region(addr, size);
2518}
2519
Sheng Yang62a27442010-01-26 19:21:16 +08002520void qemu_flush_coalesced_mmio_buffer(void)
2521{
2522 if (kvm_enabled())
2523 kvm_flush_coalesced_mmio_buffer();
2524}
2525
Anthony Liguoric227f092009-10-01 16:12:16 -05002526ram_addr_t qemu_ram_alloc(ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002527{
2528 RAMBlock *new_block;
2529
pbrook94a6b542009-04-11 17:15:54 +00002530 size = TARGET_PAGE_ALIGN(size);
2531 new_block = qemu_malloc(sizeof(*new_block));
2532
Alexander Graf6b024942009-12-05 12:44:25 +01002533#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2534 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2535 new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
2536 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2537#else
pbrook94a6b542009-04-11 17:15:54 +00002538 new_block->host = qemu_vmalloc(size);
Alexander Graf6b024942009-12-05 12:44:25 +01002539#endif
Izik Eidusccb167e2009-10-08 16:39:39 +02002540#ifdef MADV_MERGEABLE
2541 madvise(new_block->host, size, MADV_MERGEABLE);
2542#endif
pbrook94a6b542009-04-11 17:15:54 +00002543 new_block->offset = last_ram_offset;
2544 new_block->length = size;
2545
2546 new_block->next = ram_blocks;
2547 ram_blocks = new_block;
2548
2549 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2550 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2551 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2552 0xff, size >> TARGET_PAGE_BITS);
2553
2554 last_ram_offset += size;
2555
Jan Kiszka6f0437e2009-04-26 18:03:40 +02002556 if (kvm_enabled())
2557 kvm_setup_guest_memory(new_block->host, size);
2558
pbrook94a6b542009-04-11 17:15:54 +00002559 return new_block->offset;
2560}
bellarde9a1ab12007-02-08 23:08:38 +00002561
Anthony Liguoric227f092009-10-01 16:12:16 -05002562void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002563{
pbrook94a6b542009-04-11 17:15:54 +00002564 /* TODO: implement this. */
bellarde9a1ab12007-02-08 23:08:38 +00002565}
2566
pbrookdc828ca2009-04-09 22:21:07 +00002567/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002568 With the exception of the softmmu code in this file, this should
2569 only be used for local memory (e.g. video ram) that the device owns,
2570 and knows it isn't going to access beyond the end of the block.
2571
2572 It should not be used for general purpose DMA.
2573 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2574 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002575void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002576{
pbrook94a6b542009-04-11 17:15:54 +00002577 RAMBlock *prev;
2578 RAMBlock **prevp;
2579 RAMBlock *block;
2580
pbrook94a6b542009-04-11 17:15:54 +00002581 prev = NULL;
2582 prevp = &ram_blocks;
2583 block = ram_blocks;
2584 while (block && (block->offset > addr
2585 || block->offset + block->length <= addr)) {
2586 if (prev)
2587 prevp = &prev->next;
2588 prev = block;
2589 block = block->next;
2590 }
2591 if (!block) {
2592 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2593 abort();
2594 }
2595 /* Move this entry to to start of the list. */
2596 if (prev) {
2597 prev->next = block->next;
2598 block->next = *prevp;
2599 *prevp = block;
2600 }
2601 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00002602}
2603
pbrook5579c7f2009-04-11 14:47:08 +00002604/* Some of the softmmu routines need to translate from a host pointer
2605 (typically a TLB entry) back to a ram offset. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002606ram_addr_t qemu_ram_addr_from_host(void *ptr)
pbrook5579c7f2009-04-11 14:47:08 +00002607{
pbrook94a6b542009-04-11 17:15:54 +00002608 RAMBlock *prev;
pbrook94a6b542009-04-11 17:15:54 +00002609 RAMBlock *block;
2610 uint8_t *host = ptr;
2611
pbrook94a6b542009-04-11 17:15:54 +00002612 prev = NULL;
pbrook94a6b542009-04-11 17:15:54 +00002613 block = ram_blocks;
2614 while (block && (block->host > host
2615 || block->host + block->length <= host)) {
pbrook94a6b542009-04-11 17:15:54 +00002616 prev = block;
2617 block = block->next;
2618 }
2619 if (!block) {
2620 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2621 abort();
2622 }
2623 return block->offset + (host - block->host);
pbrook5579c7f2009-04-11 14:47:08 +00002624}
2625
Anthony Liguoric227f092009-10-01 16:12:16 -05002626static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00002627{
pbrook67d3b952006-12-18 05:03:52 +00002628#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002629 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002630#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002631#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002632 do_unassigned_access(addr, 0, 0, 0, 1);
2633#endif
2634 return 0;
2635}
2636
Anthony Liguoric227f092009-10-01 16:12:16 -05002637static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002638{
2639#ifdef DEBUG_UNASSIGNED
2640 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2641#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002642#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002643 do_unassigned_access(addr, 0, 0, 0, 2);
2644#endif
2645 return 0;
2646}
2647
Anthony Liguoric227f092009-10-01 16:12:16 -05002648static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002649{
2650#ifdef DEBUG_UNASSIGNED
2651 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2652#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002653#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002654 do_unassigned_access(addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002655#endif
bellard33417e72003-08-10 21:47:01 +00002656 return 0;
2657}
2658
Anthony Liguoric227f092009-10-01 16:12:16 -05002659static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00002660{
pbrook67d3b952006-12-18 05:03:52 +00002661#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002662 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00002663#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002664#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002665 do_unassigned_access(addr, 1, 0, 0, 1);
2666#endif
2667}
2668
Anthony Liguoric227f092009-10-01 16:12:16 -05002669static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002670{
2671#ifdef DEBUG_UNASSIGNED
2672 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2673#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002674#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002675 do_unassigned_access(addr, 1, 0, 0, 2);
2676#endif
2677}
2678
Anthony Liguoric227f092009-10-01 16:12:16 -05002679static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002680{
2681#ifdef DEBUG_UNASSIGNED
2682 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2683#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002684#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002685 do_unassigned_access(addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002686#endif
bellard33417e72003-08-10 21:47:01 +00002687}
2688
Blue Swirld60efc62009-08-25 18:29:31 +00002689static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00002690 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00002691 unassigned_mem_readw,
2692 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00002693};
2694
Blue Swirld60efc62009-08-25 18:29:31 +00002695static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00002696 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00002697 unassigned_mem_writew,
2698 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00002699};
2700
Anthony Liguoric227f092009-10-01 16:12:16 -05002701static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002702 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002703{
bellard3a7d9292005-08-21 09:26:42 +00002704 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002705 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2706 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2707#if !defined(CONFIG_USER_ONLY)
2708 tb_invalidate_phys_page_fast(ram_addr, 1);
2709 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2710#endif
2711 }
pbrook5579c7f2009-04-11 14:47:08 +00002712 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002713 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2714 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2715 /* we remove the notdirty callback only if the code has been
2716 flushed */
2717 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002718 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002719}
2720
Anthony Liguoric227f092009-10-01 16:12:16 -05002721static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002722 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002723{
bellard3a7d9292005-08-21 09:26:42 +00002724 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002725 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2726 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2727#if !defined(CONFIG_USER_ONLY)
2728 tb_invalidate_phys_page_fast(ram_addr, 2);
2729 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2730#endif
2731 }
pbrook5579c7f2009-04-11 14:47:08 +00002732 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002733 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2734 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2735 /* we remove the notdirty callback only if the code has been
2736 flushed */
2737 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002738 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002739}
2740
Anthony Liguoric227f092009-10-01 16:12:16 -05002741static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002742 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002743{
bellard3a7d9292005-08-21 09:26:42 +00002744 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002745 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2746 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2747#if !defined(CONFIG_USER_ONLY)
2748 tb_invalidate_phys_page_fast(ram_addr, 4);
2749 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2750#endif
2751 }
pbrook5579c7f2009-04-11 14:47:08 +00002752 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002753 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2754 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2755 /* we remove the notdirty callback only if the code has been
2756 flushed */
2757 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002758 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002759}
2760
Blue Swirld60efc62009-08-25 18:29:31 +00002761static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00002762 NULL, /* never used */
2763 NULL, /* never used */
2764 NULL, /* never used */
2765};
2766
Blue Swirld60efc62009-08-25 18:29:31 +00002767static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00002768 notdirty_mem_writeb,
2769 notdirty_mem_writew,
2770 notdirty_mem_writel,
2771};
2772
pbrook0f459d12008-06-09 00:20:13 +00002773/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002774static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002775{
2776 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002777 target_ulong pc, cs_base;
2778 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002779 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002780 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002781 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002782
aliguori06d55cc2008-11-18 20:24:06 +00002783 if (env->watchpoint_hit) {
2784 /* We re-entered the check after replacing the TB. Now raise
2785 * the debug interrupt so that is will trigger after the
2786 * current instruction. */
2787 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2788 return;
2789 }
pbrook2e70f6e2008-06-29 01:03:05 +00002790 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002791 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002792 if ((vaddr == (wp->vaddr & len_mask) ||
2793 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002794 wp->flags |= BP_WATCHPOINT_HIT;
2795 if (!env->watchpoint_hit) {
2796 env->watchpoint_hit = wp;
2797 tb = tb_find_pc(env->mem_io_pc);
2798 if (!tb) {
2799 cpu_abort(env, "check_watchpoint: could not find TB for "
2800 "pc=%p", (void *)env->mem_io_pc);
2801 }
2802 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2803 tb_phys_invalidate(tb, -1);
2804 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2805 env->exception_index = EXCP_DEBUG;
2806 } else {
2807 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2808 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2809 }
2810 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00002811 }
aliguori6e140f22008-11-18 20:37:55 +00002812 } else {
2813 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002814 }
2815 }
2816}
2817
pbrook6658ffb2007-03-16 23:58:11 +00002818/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2819 so these check for a hit then pass through to the normal out-of-line
2820 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002821static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00002822{
aliguorib4051332008-11-18 20:14:20 +00002823 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002824 return ldub_phys(addr);
2825}
2826
Anthony Liguoric227f092009-10-01 16:12:16 -05002827static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00002828{
aliguorib4051332008-11-18 20:14:20 +00002829 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002830 return lduw_phys(addr);
2831}
2832
Anthony Liguoric227f092009-10-01 16:12:16 -05002833static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00002834{
aliguorib4051332008-11-18 20:14:20 +00002835 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00002836 return ldl_phys(addr);
2837}
2838
Anthony Liguoric227f092009-10-01 16:12:16 -05002839static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00002840 uint32_t val)
2841{
aliguorib4051332008-11-18 20:14:20 +00002842 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002843 stb_phys(addr, val);
2844}
2845
Anthony Liguoric227f092009-10-01 16:12:16 -05002846static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00002847 uint32_t val)
2848{
aliguorib4051332008-11-18 20:14:20 +00002849 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002850 stw_phys(addr, val);
2851}
2852
Anthony Liguoric227f092009-10-01 16:12:16 -05002853static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00002854 uint32_t val)
2855{
aliguorib4051332008-11-18 20:14:20 +00002856 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00002857 stl_phys(addr, val);
2858}
2859
Blue Swirld60efc62009-08-25 18:29:31 +00002860static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00002861 watch_mem_readb,
2862 watch_mem_readw,
2863 watch_mem_readl,
2864};
2865
Blue Swirld60efc62009-08-25 18:29:31 +00002866static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00002867 watch_mem_writeb,
2868 watch_mem_writew,
2869 watch_mem_writel,
2870};
pbrook6658ffb2007-03-16 23:58:11 +00002871
Anthony Liguoric227f092009-10-01 16:12:16 -05002872static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002873 unsigned int len)
2874{
blueswir1db7b5422007-05-26 17:36:03 +00002875 uint32_t ret;
2876 unsigned int idx;
2877
pbrook8da3ff12008-12-01 18:59:50 +00002878 idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00002879#if defined(DEBUG_SUBPAGE)
2880 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2881 mmio, len, addr, idx);
2882#endif
pbrook8da3ff12008-12-01 18:59:50 +00002883 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2884 addr + mmio->region_offset[idx][0][len]);
blueswir1db7b5422007-05-26 17:36:03 +00002885
2886 return ret;
2887}
2888
Anthony Liguoric227f092009-10-01 16:12:16 -05002889static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002890 uint32_t value, unsigned int len)
2891{
blueswir1db7b5422007-05-26 17:36:03 +00002892 unsigned int idx;
2893
pbrook8da3ff12008-12-01 18:59:50 +00002894 idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00002895#if defined(DEBUG_SUBPAGE)
2896 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2897 mmio, len, addr, idx, value);
2898#endif
pbrook8da3ff12008-12-01 18:59:50 +00002899 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2900 addr + mmio->region_offset[idx][1][len],
2901 value);
blueswir1db7b5422007-05-26 17:36:03 +00002902}
2903
Anthony Liguoric227f092009-10-01 16:12:16 -05002904static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00002905{
2906#if defined(DEBUG_SUBPAGE)
2907 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2908#endif
2909
2910 return subpage_readlen(opaque, addr, 0);
2911}
2912
Anthony Liguoric227f092009-10-01 16:12:16 -05002913static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002914 uint32_t value)
2915{
2916#if defined(DEBUG_SUBPAGE)
2917 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2918#endif
2919 subpage_writelen(opaque, addr, value, 0);
2920}
2921
Anthony Liguoric227f092009-10-01 16:12:16 -05002922static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00002923{
2924#if defined(DEBUG_SUBPAGE)
2925 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2926#endif
2927
2928 return subpage_readlen(opaque, addr, 1);
2929}
2930
Anthony Liguoric227f092009-10-01 16:12:16 -05002931static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00002932 uint32_t value)
2933{
2934#if defined(DEBUG_SUBPAGE)
2935 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2936#endif
2937 subpage_writelen(opaque, addr, value, 1);
2938}
2939
Anthony Liguoric227f092009-10-01 16:12:16 -05002940static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00002941{
2942#if defined(DEBUG_SUBPAGE)
2943 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2944#endif
2945
2946 return subpage_readlen(opaque, addr, 2);
2947}
2948
2949static void subpage_writel (void *opaque,
Anthony Liguoric227f092009-10-01 16:12:16 -05002950 target_phys_addr_t addr, uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00002951{
2952#if defined(DEBUG_SUBPAGE)
2953 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2954#endif
2955 subpage_writelen(opaque, addr, value, 2);
2956}
2957
Blue Swirld60efc62009-08-25 18:29:31 +00002958static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00002959 &subpage_readb,
2960 &subpage_readw,
2961 &subpage_readl,
2962};
2963
Blue Swirld60efc62009-08-25 18:29:31 +00002964static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00002965 &subpage_writeb,
2966 &subpage_writew,
2967 &subpage_writel,
2968};
2969
Anthony Liguoric227f092009-10-01 16:12:16 -05002970static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2971 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00002972{
2973 int idx, eidx;
blueswir14254fab2008-01-01 16:57:19 +00002974 unsigned int i;
blueswir1db7b5422007-05-26 17:36:03 +00002975
2976 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2977 return -1;
2978 idx = SUBPAGE_IDX(start);
2979 eidx = SUBPAGE_IDX(end);
2980#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00002981 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00002982 mmio, start, end, idx, eidx, memory);
2983#endif
2984 memory >>= IO_MEM_SHIFT;
2985 for (; idx <= eidx; idx++) {
blueswir14254fab2008-01-01 16:57:19 +00002986 for (i = 0; i < 4; i++) {
blueswir13ee89922008-01-02 19:45:26 +00002987 if (io_mem_read[memory][i]) {
2988 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2989 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
pbrook8da3ff12008-12-01 18:59:50 +00002990 mmio->region_offset[idx][0][i] = region_offset;
blueswir13ee89922008-01-02 19:45:26 +00002991 }
2992 if (io_mem_write[memory][i]) {
2993 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2994 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
pbrook8da3ff12008-12-01 18:59:50 +00002995 mmio->region_offset[idx][1][i] = region_offset;
blueswir13ee89922008-01-02 19:45:26 +00002996 }
blueswir14254fab2008-01-01 16:57:19 +00002997 }
blueswir1db7b5422007-05-26 17:36:03 +00002998 }
2999
3000 return 0;
3001}
3002
Anthony Liguoric227f092009-10-01 16:12:16 -05003003static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3004 ram_addr_t orig_memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003005{
Anthony Liguoric227f092009-10-01 16:12:16 -05003006 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003007 int subpage_memory;
3008
Anthony Liguoric227f092009-10-01 16:12:16 -05003009 mmio = qemu_mallocz(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003010
3011 mmio->base = base;
Avi Kivity1eed09c2009-06-14 11:38:51 +03003012 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
blueswir1db7b5422007-05-26 17:36:03 +00003013#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003014 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3015 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003016#endif
aliguori1eec6142009-02-05 22:06:18 +00003017 *phys = subpage_memory | IO_MEM_SUBPAGE;
3018 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
pbrook8da3ff12008-12-01 18:59:50 +00003019 region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003020
3021 return mmio;
3022}
3023
aliguori88715652009-02-11 15:20:58 +00003024static int get_free_io_mem_idx(void)
3025{
3026 int i;
3027
3028 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3029 if (!io_mem_used[i]) {
3030 io_mem_used[i] = 1;
3031 return i;
3032 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003033 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003034 return -1;
3035}
3036
bellard33417e72003-08-10 21:47:01 +00003037/* mem_read and mem_write are arrays of functions containing the
3038 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003039 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003040 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003041 modified. If it is zero, a new io zone is allocated. The return
3042 value can be used with cpu_register_physical_memory(). (-1) is
3043 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003044static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003045 CPUReadMemoryFunc * const *mem_read,
3046 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03003047 void *opaque)
bellard33417e72003-08-10 21:47:01 +00003048{
blueswir14254fab2008-01-01 16:57:19 +00003049 int i, subwidth = 0;
bellard33417e72003-08-10 21:47:01 +00003050
3051 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003052 io_index = get_free_io_mem_idx();
3053 if (io_index == -1)
3054 return io_index;
bellard33417e72003-08-10 21:47:01 +00003055 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003056 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003057 if (io_index >= IO_MEM_NB_ENTRIES)
3058 return -1;
3059 }
bellardb5ff1b32005-11-26 10:38:39 +00003060
bellard33417e72003-08-10 21:47:01 +00003061 for(i = 0;i < 3; i++) {
blueswir14254fab2008-01-01 16:57:19 +00003062 if (!mem_read[i] || !mem_write[i])
3063 subwidth = IO_MEM_SUBWIDTH;
bellard33417e72003-08-10 21:47:01 +00003064 io_mem_read[io_index][i] = mem_read[i];
3065 io_mem_write[io_index][i] = mem_write[i];
3066 }
bellarda4193c82004-06-03 14:01:43 +00003067 io_mem_opaque[io_index] = opaque;
blueswir14254fab2008-01-01 16:57:19 +00003068 return (io_index << IO_MEM_SHIFT) | subwidth;
bellard33417e72003-08-10 21:47:01 +00003069}
bellard61382a52003-10-27 21:22:23 +00003070
Blue Swirld60efc62009-08-25 18:29:31 +00003071int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3072 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03003073 void *opaque)
3074{
3075 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3076}
3077
aliguori88715652009-02-11 15:20:58 +00003078void cpu_unregister_io_memory(int io_table_address)
3079{
3080 int i;
3081 int io_index = io_table_address >> IO_MEM_SHIFT;
3082
3083 for (i=0;i < 3; i++) {
3084 io_mem_read[io_index][i] = unassigned_mem_read[i];
3085 io_mem_write[io_index][i] = unassigned_mem_write[i];
3086 }
3087 io_mem_opaque[io_index] = NULL;
3088 io_mem_used[io_index] = 0;
3089}
3090
Avi Kivitye9179ce2009-06-14 11:38:52 +03003091static void io_mem_init(void)
3092{
3093 int i;
3094
3095 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3096 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3097 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3098 for (i=0; i<5; i++)
3099 io_mem_used[i] = 1;
3100
3101 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3102 watch_mem_write, NULL);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003103}
3104
pbrooke2eef172008-06-08 01:09:01 +00003105#endif /* !defined(CONFIG_USER_ONLY) */
3106
bellard13eb76e2004-01-24 15:23:36 +00003107/* physical memory access (slow version, mainly for debug) */
3108#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003109int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3110 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003111{
3112 int l, flags;
3113 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003114 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003115
3116 while (len > 0) {
3117 page = addr & TARGET_PAGE_MASK;
3118 l = (page + TARGET_PAGE_SIZE) - addr;
3119 if (l > len)
3120 l = len;
3121 flags = page_get_flags(page);
3122 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003123 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003124 if (is_write) {
3125 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003126 return -1;
bellard579a97f2007-11-11 14:26:47 +00003127 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003128 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003129 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003130 memcpy(p, buf, l);
3131 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003132 } else {
3133 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003134 return -1;
bellard579a97f2007-11-11 14:26:47 +00003135 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003136 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003137 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003138 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003139 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003140 }
3141 len -= l;
3142 buf += l;
3143 addr += l;
3144 }
Paul Brooka68fe892010-03-01 00:08:59 +00003145 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003146}
bellard8df1cd02005-01-28 22:37:22 +00003147
bellard13eb76e2004-01-24 15:23:36 +00003148#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003149void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003150 int len, int is_write)
3151{
3152 int l, io_index;
3153 uint8_t *ptr;
3154 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003155 target_phys_addr_t page;
bellard2e126692004-04-25 21:28:44 +00003156 unsigned long pd;
bellard92e873b2004-05-21 14:52:29 +00003157 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003158
bellard13eb76e2004-01-24 15:23:36 +00003159 while (len > 0) {
3160 page = addr & TARGET_PAGE_MASK;
3161 l = (page + TARGET_PAGE_SIZE) - addr;
3162 if (l > len)
3163 l = len;
bellard92e873b2004-05-21 14:52:29 +00003164 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003165 if (!p) {
3166 pd = IO_MEM_UNASSIGNED;
3167 } else {
3168 pd = p->phys_offset;
3169 }
ths3b46e622007-09-17 08:09:54 +00003170
bellard13eb76e2004-01-24 15:23:36 +00003171 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003172 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003173 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003174 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003175 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003176 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003177 /* XXX: could force cpu_single_env to NULL to avoid
3178 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003179 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003180 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003181 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003182 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003183 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003184 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003185 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003186 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003187 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003188 l = 2;
3189 } else {
bellard1c213d12005-09-03 10:49:04 +00003190 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003191 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003192 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003193 l = 1;
3194 }
3195 } else {
bellardb448f2f2004-02-25 23:24:04 +00003196 unsigned long addr1;
3197 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003198 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003199 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003200 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003201 if (!cpu_physical_memory_is_dirty(addr1)) {
3202 /* invalidate code */
3203 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3204 /* set dirty bit */
ths5fafdf22007-09-16 21:08:06 +00003205 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
bellardf23db162005-08-21 19:12:28 +00003206 (0xff & ~CODE_DIRTY_FLAG);
bellard3a7d9292005-08-21 09:26:42 +00003207 }
bellard13eb76e2004-01-24 15:23:36 +00003208 }
3209 } else {
ths5fafdf22007-09-16 21:08:06 +00003210 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003211 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003212 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003213 /* I/O case */
3214 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003215 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003216 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3217 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003218 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003219 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003220 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003221 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003222 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003223 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003224 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003225 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003226 l = 2;
3227 } else {
bellard1c213d12005-09-03 10:49:04 +00003228 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003229 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003230 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003231 l = 1;
3232 }
3233 } else {
3234 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003235 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard13eb76e2004-01-24 15:23:36 +00003236 (addr & ~TARGET_PAGE_MASK);
3237 memcpy(buf, ptr, l);
3238 }
3239 }
3240 len -= l;
3241 buf += l;
3242 addr += l;
3243 }
3244}
bellard8df1cd02005-01-28 22:37:22 +00003245
bellardd0ecd2a2006-04-23 17:14:48 +00003246/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003247void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003248 const uint8_t *buf, int len)
3249{
3250 int l;
3251 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003252 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003253 unsigned long pd;
3254 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003255
bellardd0ecd2a2006-04-23 17:14:48 +00003256 while (len > 0) {
3257 page = addr & TARGET_PAGE_MASK;
3258 l = (page + TARGET_PAGE_SIZE) - addr;
3259 if (l > len)
3260 l = len;
3261 p = phys_page_find(page >> TARGET_PAGE_BITS);
3262 if (!p) {
3263 pd = IO_MEM_UNASSIGNED;
3264 } else {
3265 pd = p->phys_offset;
3266 }
ths3b46e622007-09-17 08:09:54 +00003267
bellardd0ecd2a2006-04-23 17:14:48 +00003268 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00003269 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3270 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003271 /* do nothing */
3272 } else {
3273 unsigned long addr1;
3274 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3275 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003276 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003277 memcpy(ptr, buf, l);
3278 }
3279 len -= l;
3280 buf += l;
3281 addr += l;
3282 }
3283}
3284
aliguori6d16c2f2009-01-22 16:59:11 +00003285typedef struct {
3286 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003287 target_phys_addr_t addr;
3288 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003289} BounceBuffer;
3290
3291static BounceBuffer bounce;
3292
aliguoriba223c22009-01-22 16:59:16 +00003293typedef struct MapClient {
3294 void *opaque;
3295 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003296 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003297} MapClient;
3298
Blue Swirl72cf2d42009-09-12 07:36:22 +00003299static QLIST_HEAD(map_client_list, MapClient) map_client_list
3300 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003301
3302void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3303{
3304 MapClient *client = qemu_malloc(sizeof(*client));
3305
3306 client->opaque = opaque;
3307 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003308 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003309 return client;
3310}
3311
3312void cpu_unregister_map_client(void *_client)
3313{
3314 MapClient *client = (MapClient *)_client;
3315
Blue Swirl72cf2d42009-09-12 07:36:22 +00003316 QLIST_REMOVE(client, link);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003317 qemu_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003318}
3319
3320static void cpu_notify_map_clients(void)
3321{
3322 MapClient *client;
3323
Blue Swirl72cf2d42009-09-12 07:36:22 +00003324 while (!QLIST_EMPTY(&map_client_list)) {
3325 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003326 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003327 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003328 }
3329}
3330
aliguori6d16c2f2009-01-22 16:59:11 +00003331/* Map a physical memory region into a host virtual address.
3332 * May map a subset of the requested range, given by and returned in *plen.
3333 * May return NULL if resources needed to perform the mapping are exhausted.
3334 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003335 * Use cpu_register_map_client() to know when retrying the map operation is
3336 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003337 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003338void *cpu_physical_memory_map(target_phys_addr_t addr,
3339 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003340 int is_write)
3341{
Anthony Liguoric227f092009-10-01 16:12:16 -05003342 target_phys_addr_t len = *plen;
3343 target_phys_addr_t done = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003344 int l;
3345 uint8_t *ret = NULL;
3346 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003347 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003348 unsigned long pd;
3349 PhysPageDesc *p;
3350 unsigned long addr1;
3351
3352 while (len > 0) {
3353 page = addr & TARGET_PAGE_MASK;
3354 l = (page + TARGET_PAGE_SIZE) - addr;
3355 if (l > len)
3356 l = len;
3357 p = phys_page_find(page >> TARGET_PAGE_BITS);
3358 if (!p) {
3359 pd = IO_MEM_UNASSIGNED;
3360 } else {
3361 pd = p->phys_offset;
3362 }
3363
3364 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3365 if (done || bounce.buffer) {
3366 break;
3367 }
3368 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3369 bounce.addr = addr;
3370 bounce.len = l;
3371 if (!is_write) {
3372 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3373 }
3374 ptr = bounce.buffer;
3375 } else {
3376 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003377 ptr = qemu_get_ram_ptr(addr1);
aliguori6d16c2f2009-01-22 16:59:11 +00003378 }
3379 if (!done) {
3380 ret = ptr;
3381 } else if (ret + done != ptr) {
3382 break;
3383 }
3384
3385 len -= l;
3386 addr += l;
3387 done += l;
3388 }
3389 *plen = done;
3390 return ret;
3391}
3392
3393/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3394 * Will also mark the memory as dirty if is_write == 1. access_len gives
3395 * the amount of memory that was actually read or written by the caller.
3396 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003397void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3398 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003399{
3400 if (buffer != bounce.buffer) {
3401 if (is_write) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003402 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003403 while (access_len) {
3404 unsigned l;
3405 l = TARGET_PAGE_SIZE;
3406 if (l > access_len)
3407 l = access_len;
3408 if (!cpu_physical_memory_is_dirty(addr1)) {
3409 /* invalidate code */
3410 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3411 /* set dirty bit */
3412 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3413 (0xff & ~CODE_DIRTY_FLAG);
3414 }
3415 addr1 += l;
3416 access_len -= l;
3417 }
3418 }
3419 return;
3420 }
3421 if (is_write) {
3422 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3423 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003424 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003425 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003426 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003427}
bellardd0ecd2a2006-04-23 17:14:48 +00003428
bellard8df1cd02005-01-28 22:37:22 +00003429/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003430uint32_t ldl_phys(target_phys_addr_t addr)
bellard8df1cd02005-01-28 22:37:22 +00003431{
3432 int io_index;
3433 uint8_t *ptr;
3434 uint32_t val;
3435 unsigned long pd;
3436 PhysPageDesc *p;
3437
3438 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3439 if (!p) {
3440 pd = IO_MEM_UNASSIGNED;
3441 } else {
3442 pd = p->phys_offset;
3443 }
ths3b46e622007-09-17 08:09:54 +00003444
ths5fafdf22007-09-16 21:08:06 +00003445 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003446 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00003447 /* I/O case */
3448 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003449 if (p)
3450 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003451 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3452 } else {
3453 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003454 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00003455 (addr & ~TARGET_PAGE_MASK);
3456 val = ldl_p(ptr);
3457 }
3458 return val;
3459}
3460
bellard84b7b8e2005-11-28 21:19:04 +00003461/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003462uint64_t ldq_phys(target_phys_addr_t addr)
bellard84b7b8e2005-11-28 21:19:04 +00003463{
3464 int io_index;
3465 uint8_t *ptr;
3466 uint64_t val;
3467 unsigned long pd;
3468 PhysPageDesc *p;
3469
3470 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3471 if (!p) {
3472 pd = IO_MEM_UNASSIGNED;
3473 } else {
3474 pd = p->phys_offset;
3475 }
ths3b46e622007-09-17 08:09:54 +00003476
bellard2a4188a2006-06-25 21:54:59 +00003477 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3478 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00003479 /* I/O case */
3480 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003481 if (p)
3482 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard84b7b8e2005-11-28 21:19:04 +00003483#ifdef TARGET_WORDS_BIGENDIAN
3484 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3485 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3486#else
3487 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3488 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3489#endif
3490 } else {
3491 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003492 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00003493 (addr & ~TARGET_PAGE_MASK);
3494 val = ldq_p(ptr);
3495 }
3496 return val;
3497}
3498
bellardaab33092005-10-30 20:48:42 +00003499/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003500uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003501{
3502 uint8_t val;
3503 cpu_physical_memory_read(addr, &val, 1);
3504 return val;
3505}
3506
3507/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003508uint32_t lduw_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003509{
3510 uint16_t val;
3511 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3512 return tswap16(val);
3513}
3514
bellard8df1cd02005-01-28 22:37:22 +00003515/* warning: addr must be aligned. The ram page is not masked as dirty
3516 and the code inside is not invalidated. It is useful if the dirty
3517 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003518void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003519{
3520 int io_index;
3521 uint8_t *ptr;
3522 unsigned long pd;
3523 PhysPageDesc *p;
3524
3525 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3526 if (!p) {
3527 pd = IO_MEM_UNASSIGNED;
3528 } else {
3529 pd = p->phys_offset;
3530 }
ths3b46e622007-09-17 08:09:54 +00003531
bellard3a7d9292005-08-21 09:26:42 +00003532 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003533 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003534 if (p)
3535 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003536 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3537 } else {
aliguori74576192008-10-06 14:02:03 +00003538 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003539 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003540 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003541
3542 if (unlikely(in_migration)) {
3543 if (!cpu_physical_memory_is_dirty(addr1)) {
3544 /* invalidate code */
3545 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3546 /* set dirty bit */
3547 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3548 (0xff & ~CODE_DIRTY_FLAG);
3549 }
3550 }
bellard8df1cd02005-01-28 22:37:22 +00003551 }
3552}
3553
Anthony Liguoric227f092009-10-01 16:12:16 -05003554void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003555{
3556 int io_index;
3557 uint8_t *ptr;
3558 unsigned long pd;
3559 PhysPageDesc *p;
3560
3561 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3562 if (!p) {
3563 pd = IO_MEM_UNASSIGNED;
3564 } else {
3565 pd = p->phys_offset;
3566 }
ths3b46e622007-09-17 08:09:54 +00003567
j_mayerbc98a7e2007-04-04 07:55:12 +00003568 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3569 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003570 if (p)
3571 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00003572#ifdef TARGET_WORDS_BIGENDIAN
3573 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3574 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3575#else
3576 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3577 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3578#endif
3579 } else {
pbrook5579c7f2009-04-11 14:47:08 +00003580 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00003581 (addr & ~TARGET_PAGE_MASK);
3582 stq_p(ptr, val);
3583 }
3584}
3585
bellard8df1cd02005-01-28 22:37:22 +00003586/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003587void stl_phys(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003588{
3589 int io_index;
3590 uint8_t *ptr;
3591 unsigned long pd;
3592 PhysPageDesc *p;
3593
3594 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3595 if (!p) {
3596 pd = IO_MEM_UNASSIGNED;
3597 } else {
3598 pd = p->phys_offset;
3599 }
ths3b46e622007-09-17 08:09:54 +00003600
bellard3a7d9292005-08-21 09:26:42 +00003601 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003602 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003603 if (p)
3604 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003605 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3606 } else {
3607 unsigned long addr1;
3608 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3609 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003610 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003611 stl_p(ptr, val);
bellard3a7d9292005-08-21 09:26:42 +00003612 if (!cpu_physical_memory_is_dirty(addr1)) {
3613 /* invalidate code */
3614 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3615 /* set dirty bit */
bellardf23db162005-08-21 19:12:28 +00003616 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3617 (0xff & ~CODE_DIRTY_FLAG);
bellard3a7d9292005-08-21 09:26:42 +00003618 }
bellard8df1cd02005-01-28 22:37:22 +00003619 }
3620}
3621
bellardaab33092005-10-30 20:48:42 +00003622/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003623void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003624{
3625 uint8_t v = val;
3626 cpu_physical_memory_write(addr, &v, 1);
3627}
3628
3629/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003630void stw_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003631{
3632 uint16_t v = tswap16(val);
3633 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3634}
3635
3636/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003637void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003638{
3639 val = tswap64(val);
3640 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3641}
3642
aliguori5e2972f2009-03-28 17:51:36 +00003643/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00003644int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003645 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003646{
3647 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003648 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003649 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003650
3651 while (len > 0) {
3652 page = addr & TARGET_PAGE_MASK;
3653 phys_addr = cpu_get_phys_page_debug(env, page);
3654 /* if no physical page mapped, return an error */
3655 if (phys_addr == -1)
3656 return -1;
3657 l = (page + TARGET_PAGE_SIZE) - addr;
3658 if (l > len)
3659 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003660 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00003661 if (is_write)
3662 cpu_physical_memory_write_rom(phys_addr, buf, l);
3663 else
aliguori5e2972f2009-03-28 17:51:36 +00003664 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00003665 len -= l;
3666 buf += l;
3667 addr += l;
3668 }
3669 return 0;
3670}
Paul Brooka68fe892010-03-01 00:08:59 +00003671#endif
bellard13eb76e2004-01-24 15:23:36 +00003672
pbrook2e70f6e2008-06-29 01:03:05 +00003673/* in deterministic execution mode, instructions doing device I/Os
3674 must be at the end of the TB */
3675void cpu_io_recompile(CPUState *env, void *retaddr)
3676{
3677 TranslationBlock *tb;
3678 uint32_t n, cflags;
3679 target_ulong pc, cs_base;
3680 uint64_t flags;
3681
3682 tb = tb_find_pc((unsigned long)retaddr);
3683 if (!tb) {
3684 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3685 retaddr);
3686 }
3687 n = env->icount_decr.u16.low + tb->icount;
3688 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3689 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00003690 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00003691 n = n - env->icount_decr.u16.low;
3692 /* Generate a new TB ending on the I/O insn. */
3693 n++;
3694 /* On MIPS and SH, delay slot instructions can only be restarted if
3695 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00003696 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00003697 branch. */
3698#if defined(TARGET_MIPS)
3699 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3700 env->active_tc.PC -= 4;
3701 env->icount_decr.u16.low++;
3702 env->hflags &= ~MIPS_HFLAG_BMASK;
3703 }
3704#elif defined(TARGET_SH4)
3705 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3706 && n > 1) {
3707 env->pc -= 2;
3708 env->icount_decr.u16.low++;
3709 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3710 }
3711#endif
3712 /* This should never happen. */
3713 if (n > CF_COUNT_MASK)
3714 cpu_abort(env, "TB too big during recompile");
3715
3716 cflags = n | CF_LAST_IO;
3717 pc = tb->pc;
3718 cs_base = tb->cs_base;
3719 flags = tb->flags;
3720 tb_phys_invalidate(tb, -1);
3721 /* FIXME: In theory this could raise an exception. In practice
3722 we have already translated the block once so it's probably ok. */
3723 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00003724 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00003725 the first in the TB) then we end up generating a whole new TB and
3726 repeating the fault, which is horribly inefficient.
3727 Better would be to execute just this insn uncached, or generate a
3728 second new TB. */
3729 cpu_resume_from_signal(env, NULL);
3730}
3731
bellarde3db7222005-01-26 22:00:47 +00003732void dump_exec_info(FILE *f,
3733 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3734{
3735 int i, target_code_size, max_target_code_size;
3736 int direct_jmp_count, direct_jmp2_count, cross_page;
3737 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00003738
bellarde3db7222005-01-26 22:00:47 +00003739 target_code_size = 0;
3740 max_target_code_size = 0;
3741 cross_page = 0;
3742 direct_jmp_count = 0;
3743 direct_jmp2_count = 0;
3744 for(i = 0; i < nb_tbs; i++) {
3745 tb = &tbs[i];
3746 target_code_size += tb->size;
3747 if (tb->size > max_target_code_size)
3748 max_target_code_size = tb->size;
3749 if (tb->page_addr[1] != -1)
3750 cross_page++;
3751 if (tb->tb_next_offset[0] != 0xffff) {
3752 direct_jmp_count++;
3753 if (tb->tb_next_offset[1] != 0xffff) {
3754 direct_jmp2_count++;
3755 }
3756 }
3757 }
3758 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00003759 cpu_fprintf(f, "Translation buffer state:\n");
bellard26a5f132008-05-28 12:30:31 +00003760 cpu_fprintf(f, "gen code size %ld/%ld\n",
3761 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3762 cpu_fprintf(f, "TB count %d/%d\n",
3763 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00003764 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00003765 nb_tbs ? target_code_size / nb_tbs : 0,
3766 max_target_code_size);
ths5fafdf22007-09-16 21:08:06 +00003767 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00003768 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3769 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00003770 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3771 cross_page,
bellarde3db7222005-01-26 22:00:47 +00003772 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3773 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00003774 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00003775 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3776 direct_jmp2_count,
3777 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00003778 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00003779 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3780 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3781 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00003782 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00003783}
3784
ths5fafdf22007-09-16 21:08:06 +00003785#if !defined(CONFIG_USER_ONLY)
bellard61382a52003-10-27 21:22:23 +00003786
3787#define MMUSUFFIX _cmmu
3788#define GETPC() NULL
3789#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00003790#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00003791
3792#define SHIFT 0
3793#include "softmmu_template.h"
3794
3795#define SHIFT 1
3796#include "softmmu_template.h"
3797
3798#define SHIFT 2
3799#include "softmmu_template.h"
3800
3801#define SHIFT 3
3802#include "softmmu_template.h"
3803
3804#undef env
3805
3806#endif