blob: aedfda4e04a89f91a11ef211ff3520fd2600f22d [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
bellard6180a182003-09-30 21:04:53 +000034#include "cpu.h"
35#include "exec-all.h"
aurel32ca10f862008-04-11 21:35:42 +000036#include "qemu-common.h"
bellardb67d9a52008-05-23 09:57:34 +000037#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000038#include "hw/hw.h"
aliguori74576192008-10-06 14:02:03 +000039#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000040#include "kvm.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000041#include "qemu-timer.h"
pbrook53a59602006-03-25 19:31:22 +000042#if defined(CONFIG_USER_ONLY)
43#include <qemu.h>
Riku Voipiofd052bf2010-01-25 14:30:49 +020044#include <signal.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010045#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
46#include <sys/param.h>
47#if __FreeBSD_version >= 700104
48#define HAVE_KINFO_GETVMMAP
49#define sigqueue sigqueue_freebsd /* avoid redefinition */
50#include <sys/time.h>
51#include <sys/proc.h>
52#include <machine/profile.h>
53#define _KERNEL
54#include <sys/user.h>
55#undef _KERNEL
56#undef sigqueue
57#include <libutil.h>
58#endif
59#endif
pbrook53a59602006-03-25 19:31:22 +000060#endif
bellard54936002003-05-13 00:25:15 +000061
bellardfd6ce8f2003-05-14 19:00:11 +000062//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000063//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000064//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000065//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000066
67/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000068//#define DEBUG_TB_CHECK
69//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000070
ths1196be32007-03-17 15:17:58 +000071//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000072//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000073
pbrook99773bd2006-04-16 15:14:59 +000074#if !defined(CONFIG_USER_ONLY)
75/* TB consistency checks only implemented for usermode emulation. */
76#undef DEBUG_TB_CHECK
77#endif
78
bellard9fa3e852004-01-04 18:06:42 +000079#define SMC_BITMAP_USE_THRESHOLD 10
80
blueswir1bdaf78e2008-10-04 07:24:27 +000081static TranslationBlock *tbs;
bellard26a5f132008-05-28 12:30:31 +000082int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000083TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000084static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000085/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050086spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000087
blueswir1141ac462008-07-26 15:05:57 +000088#if defined(__arm__) || defined(__sparc_v9__)
89/* The prologue must be reachable with a direct jump. ARM and Sparc64
90 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000091 section close to code segment. */
92#define code_gen_section \
93 __attribute__((__section__(".gen_code"))) \
94 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020095#elif defined(_WIN32)
96/* Maximum alignment for Win32 is 16. */
97#define code_gen_section \
98 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000099#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000107/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000108static unsigned long code_gen_buffer_max_size;
bellardfd6ce8f2003-05-14 19:00:11 +0000109uint8_t *code_gen_ptr;
110
pbrooke2eef172008-06-08 01:09:01 +0000111#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000112int phys_ram_fd;
bellard1ccde1c2004-02-06 19:46:14 +0000113uint8_t *phys_ram_dirty;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
116typedef struct RAMBlock {
117 uint8_t *host;
Anthony Liguoric227f092009-10-01 16:12:16 -0500118 ram_addr_t offset;
119 ram_addr_t length;
pbrook94a6b542009-04-11 17:15:54 +0000120 struct RAMBlock *next;
121} RAMBlock;
122
123static RAMBlock *ram_blocks;
124/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100125 then we can no longer assume contiguous ram offsets, and external uses
pbrook94a6b542009-04-11 17:15:54 +0000126 of this variable will break. */
Anthony Liguoric227f092009-10-01 16:12:16 -0500127ram_addr_t last_ram_offset;
pbrooke2eef172008-06-08 01:09:01 +0000128#endif
bellard9fa3e852004-01-04 18:06:42 +0000129
bellard6a00d602005-11-21 23:25:50 +0000130CPUState *first_cpu;
131/* current CPU in the current thread. It is only valid inside
132 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000133CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000134/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000135 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000136 2 = Adaptive rate instruction counting. */
137int use_icount = 0;
138/* Current instruction counter. While executing translated code this may
139 include some instructions that have not yet been executed. */
140int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000141
bellard54936002003-05-13 00:25:15 +0000142typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000143 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000144 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000145 /* in order to optimize self modifying code, we count the number
146 of lookups we do to a given page to use a bitmap */
147 unsigned int code_write_count;
148 uint8_t *code_bitmap;
149#if defined(CONFIG_USER_ONLY)
150 unsigned long flags;
151#endif
bellard54936002003-05-13 00:25:15 +0000152} PageDesc;
153
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155 while in user mode we want it to be based on virtual addresses. */
156#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000157#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
158# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
159#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800160# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000161#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000162#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800163# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000164#endif
bellard54936002003-05-13 00:25:15 +0000165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* Size of the L2 (and L3, etc) page tables. */
167#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000168#define L2_SIZE (1 << L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170/* The bits remaining after N lower levels of page tables. */
171#define P_L1_BITS_REM \
172 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
173#define V_L1_BITS_REM \
174 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
175
176/* Size of the L1 page table. Avoid silly small sizes. */
177#if P_L1_BITS_REM < 4
178#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
179#else
180#define P_L1_BITS P_L1_BITS_REM
181#endif
182
183#if V_L1_BITS_REM < 4
184#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
185#else
186#define V_L1_BITS V_L1_BITS_REM
187#endif
188
189#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
190#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
191
192#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
193#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
194
bellard83fb7ad2004-07-05 21:25:26 +0000195unsigned long qemu_real_host_page_size;
196unsigned long qemu_host_page_bits;
197unsigned long qemu_host_page_size;
198unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000199
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800200/* This is a multi-level map on the virtual address space.
201 The bottom level has pointers to PageDesc. */
202static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000203
pbrooke2eef172008-06-08 01:09:01 +0000204#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000205typedef struct PhysPageDesc {
206 /* offset in host memory of the page + io_index in the low bits */
207 ram_addr_t phys_offset;
208 ram_addr_t region_offset;
209} PhysPageDesc;
210
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800211/* This is a multi-level map on the physical address space.
212 The bottom level has pointers to PhysPageDesc. */
213static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000214
pbrooke2eef172008-06-08 01:09:01 +0000215static void io_mem_init(void);
216
bellard33417e72003-08-10 21:47:01 +0000217/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000218CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
219CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000220void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000221static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000222static int io_mem_watch;
223#endif
bellard33417e72003-08-10 21:47:01 +0000224
bellard34865132003-10-05 14:28:56 +0000225/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200226#ifdef WIN32
227static const char *logfilename = "qemu.log";
228#else
blueswir1d9b630f2008-10-05 09:57:08 +0000229static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200230#endif
bellard34865132003-10-05 14:28:56 +0000231FILE *logfile;
232int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000233static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000234
bellarde3db7222005-01-26 22:00:47 +0000235/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000236#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000237static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000238#endif
bellarde3db7222005-01-26 22:00:47 +0000239static int tb_flush_count;
240static int tb_phys_invalidate_count;
241
bellard7cb69ca2008-05-10 10:55:51 +0000242#ifdef _WIN32
243static void map_exec(void *addr, long size)
244{
245 DWORD old_protect;
246 VirtualProtect(addr, size,
247 PAGE_EXECUTE_READWRITE, &old_protect);
248
249}
250#else
251static void map_exec(void *addr, long size)
252{
bellard43694152008-05-29 09:35:57 +0000253 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000254
bellard43694152008-05-29 09:35:57 +0000255 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000256 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000257 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000258
259 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000260 end += page_size - 1;
261 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000262
263 mprotect((void *)start, end - start,
264 PROT_READ | PROT_WRITE | PROT_EXEC);
265}
266#endif
267
bellardb346ff42003-06-15 20:05:50 +0000268static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000269{
bellard83fb7ad2004-07-05 21:25:26 +0000270 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000271 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000272#ifdef _WIN32
273 {
274 SYSTEM_INFO system_info;
275
276 GetSystemInfo(&system_info);
277 qemu_real_host_page_size = system_info.dwPageSize;
278 }
279#else
280 qemu_real_host_page_size = getpagesize();
281#endif
bellard83fb7ad2004-07-05 21:25:26 +0000282 if (qemu_host_page_size == 0)
283 qemu_host_page_size = qemu_real_host_page_size;
284 if (qemu_host_page_size < TARGET_PAGE_SIZE)
285 qemu_host_page_size = TARGET_PAGE_SIZE;
286 qemu_host_page_bits = 0;
287 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
288 qemu_host_page_bits++;
289 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000290
Paul Brook2e9a5712010-05-05 16:32:59 +0100291#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000292 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100293#ifdef HAVE_KINFO_GETVMMAP
294 struct kinfo_vmentry *freep;
295 int i, cnt;
296
297 freep = kinfo_getvmmap(getpid(), &cnt);
298 if (freep) {
299 mmap_lock();
300 for (i = 0; i < cnt; i++) {
301 unsigned long startaddr, endaddr;
302
303 startaddr = freep[i].kve_start;
304 endaddr = freep[i].kve_end;
305 if (h2g_valid(startaddr)) {
306 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
307
308 if (h2g_valid(endaddr)) {
309 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200310 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100311 } else {
312#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
313 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200314 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100315#endif
316 }
317 }
318 }
319 free(freep);
320 mmap_unlock();
321 }
322#else
balrog50a95692007-12-12 01:16:23 +0000323 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000324
pbrook07765902008-05-31 16:33:53 +0000325 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800326
Aurelien Jarnofd436902010-04-10 17:20:36 +0200327 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000328 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800329 mmap_lock();
330
balrog50a95692007-12-12 01:16:23 +0000331 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800332 unsigned long startaddr, endaddr;
333 int n;
334
335 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
336
337 if (n == 2 && h2g_valid(startaddr)) {
338 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
339
340 if (h2g_valid(endaddr)) {
341 endaddr = h2g(endaddr);
342 } else {
343 endaddr = ~0ul;
344 }
345 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000346 }
347 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800348
balrog50a95692007-12-12 01:16:23 +0000349 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800350 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000351 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100352#endif
balrog50a95692007-12-12 01:16:23 +0000353 }
354#endif
bellard54936002003-05-13 00:25:15 +0000355}
356
Paul Brook41c1b1c2010-03-12 16:54:58 +0000357static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000358{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000359 PageDesc *pd;
360 void **lp;
361 int i;
362
pbrook17e23772008-06-09 13:47:45 +0000363#if defined(CONFIG_USER_ONLY)
Paul Brook2e9a5712010-05-05 16:32:59 +0100364 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800365# define ALLOC(P, SIZE) \
366 do { \
367 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
368 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800369 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000370#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800371# define ALLOC(P, SIZE) \
372 do { P = qemu_mallocz(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000373#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800374
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800375 /* Level 1. Always allocated. */
376 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
377
378 /* Level 2..N-1. */
379 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
380 void **p = *lp;
381
382 if (p == NULL) {
383 if (!alloc) {
384 return NULL;
385 }
386 ALLOC(p, sizeof(void *) * L2_SIZE);
387 *lp = p;
388 }
389
390 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000391 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800392
393 pd = *lp;
394 if (pd == NULL) {
395 if (!alloc) {
396 return NULL;
397 }
398 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
399 *lp = pd;
400 }
401
402#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800403
404 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000405}
406
Paul Brook41c1b1c2010-03-12 16:54:58 +0000407static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000408{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800409 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000410}
411
Paul Brook6d9a1302010-02-28 23:55:53 +0000412#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500413static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000414{
pbrooke3f4e2a2006-04-08 20:02:06 +0000415 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800416 void **lp;
417 int i;
bellard92e873b2004-05-21 14:52:29 +0000418
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800419 /* Level 1. Always allocated. */
420 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000421
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800422 /* Level 2..N-1. */
423 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
424 void **p = *lp;
425 if (p == NULL) {
426 if (!alloc) {
427 return NULL;
428 }
429 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
430 }
431 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000432 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800433
pbrooke3f4e2a2006-04-08 20:02:06 +0000434 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800435 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000436 int i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800437
438 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000439 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800440 }
441
442 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
443
pbrook67c4d232009-02-23 13:16:07 +0000444 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800445 pd[i].phys_offset = IO_MEM_UNASSIGNED;
446 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000447 }
bellard92e873b2004-05-21 14:52:29 +0000448 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800449
450 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000451}
452
Anthony Liguoric227f092009-10-01 16:12:16 -0500453static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000454{
bellard108c49b2005-07-24 12:55:09 +0000455 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000456}
457
Anthony Liguoric227f092009-10-01 16:12:16 -0500458static void tlb_protect_code(ram_addr_t ram_addr);
459static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000460 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000461#define mmap_lock() do { } while(0)
462#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000463#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000464
bellard43694152008-05-29 09:35:57 +0000465#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
466
467#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100468/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000469 user mode. It will change when a dedicated libc will be used */
470#define USE_STATIC_CODE_GEN_BUFFER
471#endif
472
473#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200474static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
475 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000476#endif
477
blueswir18fcd3692008-08-17 20:26:25 +0000478static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000479{
bellard43694152008-05-29 09:35:57 +0000480#ifdef USE_STATIC_CODE_GEN_BUFFER
481 code_gen_buffer = static_code_gen_buffer;
482 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
483 map_exec(code_gen_buffer, code_gen_buffer_size);
484#else
bellard26a5f132008-05-28 12:30:31 +0000485 code_gen_buffer_size = tb_size;
486 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000487#if defined(CONFIG_USER_ONLY)
488 /* in user mode, phys_ram_size is not meaningful */
489 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
490#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100491 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000492 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000493#endif
bellard26a5f132008-05-28 12:30:31 +0000494 }
495 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
496 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
497 /* The code gen buffer location may have constraints depending on
498 the host cpu and OS */
499#if defined(__linux__)
500 {
501 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000502 void *start = NULL;
503
bellard26a5f132008-05-28 12:30:31 +0000504 flags = MAP_PRIVATE | MAP_ANONYMOUS;
505#if defined(__x86_64__)
506 flags |= MAP_32BIT;
507 /* Cannot map more than that */
508 if (code_gen_buffer_size > (800 * 1024 * 1024))
509 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000510#elif defined(__sparc_v9__)
511 // Map the buffer below 2G, so we can use direct calls and branches
512 flags |= MAP_FIXED;
513 start = (void *) 0x60000000UL;
514 if (code_gen_buffer_size > (512 * 1024 * 1024))
515 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000516#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000517 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000518 flags |= MAP_FIXED;
519 start = (void *) 0x01000000UL;
520 if (code_gen_buffer_size > 16 * 1024 * 1024)
521 code_gen_buffer_size = 16 * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000522#endif
blueswir1141ac462008-07-26 15:05:57 +0000523 code_gen_buffer = mmap(start, code_gen_buffer_size,
524 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000525 flags, -1, 0);
526 if (code_gen_buffer == MAP_FAILED) {
527 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
528 exit(1);
529 }
530 }
Aurelien Jarnoa167ba52009-11-29 18:00:41 +0100531#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
aliguori06e67a82008-09-27 15:32:41 +0000532 {
533 int flags;
534 void *addr = NULL;
535 flags = MAP_PRIVATE | MAP_ANONYMOUS;
536#if defined(__x86_64__)
537 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
538 * 0x40000000 is free */
539 flags |= MAP_FIXED;
540 addr = (void *)0x40000000;
541 /* Cannot map more than that */
542 if (code_gen_buffer_size > (800 * 1024 * 1024))
543 code_gen_buffer_size = (800 * 1024 * 1024);
544#endif
545 code_gen_buffer = mmap(addr, code_gen_buffer_size,
546 PROT_WRITE | PROT_READ | PROT_EXEC,
547 flags, -1, 0);
548 if (code_gen_buffer == MAP_FAILED) {
549 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
550 exit(1);
551 }
552 }
bellard26a5f132008-05-28 12:30:31 +0000553#else
554 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000555 map_exec(code_gen_buffer, code_gen_buffer_size);
556#endif
bellard43694152008-05-29 09:35:57 +0000557#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000558 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
559 code_gen_buffer_max_size = code_gen_buffer_size -
Aurelien Jarno239fda32010-06-03 19:29:31 +0200560 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000561 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
562 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
563}
564
565/* Must be called before using the QEMU cpus. 'tb_size' is the size
566 (in bytes) allocated to the translation buffer. Zero means default
567 size. */
568void cpu_exec_init_all(unsigned long tb_size)
569{
bellard26a5f132008-05-28 12:30:31 +0000570 cpu_gen_init();
571 code_gen_alloc(tb_size);
572 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000573 page_init();
pbrooke2eef172008-06-08 01:09:01 +0000574#if !defined(CONFIG_USER_ONLY)
bellard26a5f132008-05-28 12:30:31 +0000575 io_mem_init();
pbrooke2eef172008-06-08 01:09:01 +0000576#endif
Richard Henderson9002ec72010-05-06 08:50:41 -0700577#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
578 /* There's no guest base to take into account, so go ahead and
579 initialize the prologue now. */
580 tcg_prologue_init(&tcg_ctx);
581#endif
bellard26a5f132008-05-28 12:30:31 +0000582}
583
pbrook9656f322008-07-01 20:01:19 +0000584#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
585
Juan Quintelae59fb372009-09-29 22:48:21 +0200586static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200587{
588 CPUState *env = opaque;
589
aurel323098dba2009-03-07 21:28:24 +0000590 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
591 version_id is increased. */
592 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000593 tlb_flush(env, 1);
594
595 return 0;
596}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200597
598static const VMStateDescription vmstate_cpu_common = {
599 .name = "cpu_common",
600 .version_id = 1,
601 .minimum_version_id = 1,
602 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200603 .post_load = cpu_common_post_load,
604 .fields = (VMStateField []) {
605 VMSTATE_UINT32(halted, CPUState),
606 VMSTATE_UINT32(interrupt_request, CPUState),
607 VMSTATE_END_OF_LIST()
608 }
609};
pbrook9656f322008-07-01 20:01:19 +0000610#endif
611
Glauber Costa950f1472009-06-09 12:15:18 -0400612CPUState *qemu_get_cpu(int cpu)
613{
614 CPUState *env = first_cpu;
615
616 while (env) {
617 if (env->cpu_index == cpu)
618 break;
619 env = env->next_cpu;
620 }
621
622 return env;
623}
624
bellard6a00d602005-11-21 23:25:50 +0000625void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000626{
bellard6a00d602005-11-21 23:25:50 +0000627 CPUState **penv;
628 int cpu_index;
629
pbrookc2764712009-03-07 15:24:59 +0000630#if defined(CONFIG_USER_ONLY)
631 cpu_list_lock();
632#endif
bellard6a00d602005-11-21 23:25:50 +0000633 env->next_cpu = NULL;
634 penv = &first_cpu;
635 cpu_index = 0;
636 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700637 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000638 cpu_index++;
639 }
640 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000641 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000642 QTAILQ_INIT(&env->breakpoints);
643 QTAILQ_INIT(&env->watchpoints);
bellard6a00d602005-11-21 23:25:50 +0000644 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000645#if defined(CONFIG_USER_ONLY)
646 cpu_list_unlock();
647#endif
pbrookb3c77242008-06-30 16:31:04 +0000648#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200649 vmstate_register(cpu_index, &vmstate_cpu_common, env);
pbrookb3c77242008-06-30 16:31:04 +0000650 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
651 cpu_save, cpu_load, env);
652#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000653}
654
bellard9fa3e852004-01-04 18:06:42 +0000655static inline void invalidate_page_bitmap(PageDesc *p)
656{
657 if (p->code_bitmap) {
bellard59817cc2004-02-16 22:01:13 +0000658 qemu_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000659 p->code_bitmap = NULL;
660 }
661 p->code_write_count = 0;
662}
663
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800664/* Set to NULL all the 'first_tb' fields in all PageDescs. */
665
666static void page_flush_tb_1 (int level, void **lp)
667{
668 int i;
669
670 if (*lp == NULL) {
671 return;
672 }
673 if (level == 0) {
674 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000675 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800676 pd[i].first_tb = NULL;
677 invalidate_page_bitmap(pd + i);
678 }
679 } else {
680 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000681 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800682 page_flush_tb_1 (level - 1, pp + i);
683 }
684 }
685}
686
bellardfd6ce8f2003-05-14 19:00:11 +0000687static void page_flush_tb(void)
688{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800689 int i;
690 for (i = 0; i < V_L1_SIZE; i++) {
691 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000692 }
693}
694
695/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000696/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000697void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000698{
bellard6a00d602005-11-21 23:25:50 +0000699 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000700#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000701 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
702 (unsigned long)(code_gen_ptr - code_gen_buffer),
703 nb_tbs, nb_tbs > 0 ?
704 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000705#endif
bellard26a5f132008-05-28 12:30:31 +0000706 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000707 cpu_abort(env1, "Internal error: code buffer overflow\n");
708
bellardfd6ce8f2003-05-14 19:00:11 +0000709 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000710
bellard6a00d602005-11-21 23:25:50 +0000711 for(env = first_cpu; env != NULL; env = env->next_cpu) {
712 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
713 }
bellard9fa3e852004-01-04 18:06:42 +0000714
bellard8a8a6082004-10-03 13:36:49 +0000715 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000716 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000717
bellardfd6ce8f2003-05-14 19:00:11 +0000718 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000719 /* XXX: flush processor icache at this point if cache flush is
720 expensive */
bellarde3db7222005-01-26 22:00:47 +0000721 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000722}
723
724#ifdef DEBUG_TB_CHECK
725
j_mayerbc98a7e2007-04-04 07:55:12 +0000726static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000727{
728 TranslationBlock *tb;
729 int i;
730 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000731 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
732 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000733 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
734 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000735 printf("ERROR invalidate: address=" TARGET_FMT_lx
736 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000737 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000738 }
739 }
740 }
741}
742
743/* verify that all the pages have correct rights for code */
744static void tb_page_check(void)
745{
746 TranslationBlock *tb;
747 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000748
pbrook99773bd2006-04-16 15:14:59 +0000749 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
750 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000751 flags1 = page_get_flags(tb->pc);
752 flags2 = page_get_flags(tb->pc + tb->size - 1);
753 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
754 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000755 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000756 }
757 }
758 }
759}
760
761#endif
762
763/* invalidate one TB */
764static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
765 int next_offset)
766{
767 TranslationBlock *tb1;
768 for(;;) {
769 tb1 = *ptb;
770 if (tb1 == tb) {
771 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
772 break;
773 }
774 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
775 }
776}
777
bellard9fa3e852004-01-04 18:06:42 +0000778static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
779{
780 TranslationBlock *tb1;
781 unsigned int n1;
782
783 for(;;) {
784 tb1 = *ptb;
785 n1 = (long)tb1 & 3;
786 tb1 = (TranslationBlock *)((long)tb1 & ~3);
787 if (tb1 == tb) {
788 *ptb = tb1->page_next[n1];
789 break;
790 }
791 ptb = &tb1->page_next[n1];
792 }
793}
794
bellardd4e81642003-05-25 16:46:15 +0000795static inline void tb_jmp_remove(TranslationBlock *tb, int n)
796{
797 TranslationBlock *tb1, **ptb;
798 unsigned int n1;
799
800 ptb = &tb->jmp_next[n];
801 tb1 = *ptb;
802 if (tb1) {
803 /* find tb(n) in circular list */
804 for(;;) {
805 tb1 = *ptb;
806 n1 = (long)tb1 & 3;
807 tb1 = (TranslationBlock *)((long)tb1 & ~3);
808 if (n1 == n && tb1 == tb)
809 break;
810 if (n1 == 2) {
811 ptb = &tb1->jmp_first;
812 } else {
813 ptb = &tb1->jmp_next[n1];
814 }
815 }
816 /* now we can suppress tb(n) from the list */
817 *ptb = tb->jmp_next[n];
818
819 tb->jmp_next[n] = NULL;
820 }
821}
822
823/* reset the jump entry 'n' of a TB so that it is not chained to
824 another TB */
825static inline void tb_reset_jump(TranslationBlock *tb, int n)
826{
827 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
828}
829
Paul Brook41c1b1c2010-03-12 16:54:58 +0000830void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000831{
bellard6a00d602005-11-21 23:25:50 +0000832 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000833 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000834 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000835 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000836 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000837
bellard9fa3e852004-01-04 18:06:42 +0000838 /* remove the TB from the hash list */
839 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
840 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000841 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000842 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000843
bellard9fa3e852004-01-04 18:06:42 +0000844 /* remove the TB from the page list */
845 if (tb->page_addr[0] != page_addr) {
846 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
847 tb_page_remove(&p->first_tb, tb);
848 invalidate_page_bitmap(p);
849 }
850 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
851 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
852 tb_page_remove(&p->first_tb, tb);
853 invalidate_page_bitmap(p);
854 }
855
bellard8a40a182005-11-20 10:35:40 +0000856 tb_invalidated_flag = 1;
857
858 /* remove the TB from the hash list */
859 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000860 for(env = first_cpu; env != NULL; env = env->next_cpu) {
861 if (env->tb_jmp_cache[h] == tb)
862 env->tb_jmp_cache[h] = NULL;
863 }
bellard8a40a182005-11-20 10:35:40 +0000864
865 /* suppress this TB from the two jump lists */
866 tb_jmp_remove(tb, 0);
867 tb_jmp_remove(tb, 1);
868
869 /* suppress any remaining jumps to this TB */
870 tb1 = tb->jmp_first;
871 for(;;) {
872 n1 = (long)tb1 & 3;
873 if (n1 == 2)
874 break;
875 tb1 = (TranslationBlock *)((long)tb1 & ~3);
876 tb2 = tb1->jmp_next[n1];
877 tb_reset_jump(tb1, n1);
878 tb1->jmp_next[n1] = NULL;
879 tb1 = tb2;
880 }
881 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
882
bellarde3db7222005-01-26 22:00:47 +0000883 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000884}
885
886static inline void set_bits(uint8_t *tab, int start, int len)
887{
888 int end, mask, end1;
889
890 end = start + len;
891 tab += start >> 3;
892 mask = 0xff << (start & 7);
893 if ((start & ~7) == (end & ~7)) {
894 if (start < end) {
895 mask &= ~(0xff << (end & 7));
896 *tab |= mask;
897 }
898 } else {
899 *tab++ |= mask;
900 start = (start + 8) & ~7;
901 end1 = end & ~7;
902 while (start < end1) {
903 *tab++ = 0xff;
904 start += 8;
905 }
906 if (start < end) {
907 mask = ~(0xff << (end & 7));
908 *tab |= mask;
909 }
910 }
911}
912
913static void build_page_bitmap(PageDesc *p)
914{
915 int n, tb_start, tb_end;
916 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000917
pbrookb2a70812008-06-09 13:57:23 +0000918 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000919
920 tb = p->first_tb;
921 while (tb != NULL) {
922 n = (long)tb & 3;
923 tb = (TranslationBlock *)((long)tb & ~3);
924 /* NOTE: this is subtle as a TB may span two physical pages */
925 if (n == 0) {
926 /* NOTE: tb_end may be after the end of the page, but
927 it is not a problem */
928 tb_start = tb->pc & ~TARGET_PAGE_MASK;
929 tb_end = tb_start + tb->size;
930 if (tb_end > TARGET_PAGE_SIZE)
931 tb_end = TARGET_PAGE_SIZE;
932 } else {
933 tb_start = 0;
934 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
935 }
936 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
937 tb = tb->page_next[n];
938 }
939}
940
pbrook2e70f6e2008-06-29 01:03:05 +0000941TranslationBlock *tb_gen_code(CPUState *env,
942 target_ulong pc, target_ulong cs_base,
943 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000944{
945 TranslationBlock *tb;
946 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000947 tb_page_addr_t phys_pc, phys_page2;
948 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000949 int code_gen_size;
950
Paul Brook41c1b1c2010-03-12 16:54:58 +0000951 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000952 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000953 if (!tb) {
954 /* flush must be done */
955 tb_flush(env);
956 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000957 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000958 /* Don't forget to invalidate previous TB info. */
959 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000960 }
961 tc_ptr = code_gen_ptr;
962 tb->tc_ptr = tc_ptr;
963 tb->cs_base = cs_base;
964 tb->flags = flags;
965 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +0000966 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +0000967 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +0000968
bellardd720b932004-04-25 17:57:43 +0000969 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +0000970 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +0000971 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +0000972 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +0000973 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +0000974 }
Paul Brook41c1b1c2010-03-12 16:54:58 +0000975 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +0000976 return tb;
bellardd720b932004-04-25 17:57:43 +0000977}
ths3b46e622007-09-17 08:09:54 +0000978
bellard9fa3e852004-01-04 18:06:42 +0000979/* invalidate all TBs which intersect with the target physical page
980 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +0000981 the same physical page. 'is_cpu_write_access' should be true if called
982 from a real cpu write access: the virtual CPU will exit the current
983 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +0000984void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +0000985 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +0000986{
aliguori6b917542008-11-18 19:46:41 +0000987 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +0000988 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000989 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +0000990 PageDesc *p;
991 int n;
992#ifdef TARGET_HAS_PRECISE_SMC
993 int current_tb_not_found = is_cpu_write_access;
994 TranslationBlock *current_tb = NULL;
995 int current_tb_modified = 0;
996 target_ulong current_pc = 0;
997 target_ulong current_cs_base = 0;
998 int current_flags = 0;
999#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001000
1001 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001002 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001003 return;
ths5fafdf22007-09-16 21:08:06 +00001004 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001005 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1006 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001007 /* build code bitmap */
1008 build_page_bitmap(p);
1009 }
1010
1011 /* we remove all the TBs in the range [start, end[ */
1012 /* XXX: see if in some cases it could be faster to invalidate all the code */
1013 tb = p->first_tb;
1014 while (tb != NULL) {
1015 n = (long)tb & 3;
1016 tb = (TranslationBlock *)((long)tb & ~3);
1017 tb_next = tb->page_next[n];
1018 /* NOTE: this is subtle as a TB may span two physical pages */
1019 if (n == 0) {
1020 /* NOTE: tb_end may be after the end of the page, but
1021 it is not a problem */
1022 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1023 tb_end = tb_start + tb->size;
1024 } else {
1025 tb_start = tb->page_addr[1];
1026 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1027 }
1028 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001029#ifdef TARGET_HAS_PRECISE_SMC
1030 if (current_tb_not_found) {
1031 current_tb_not_found = 0;
1032 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001033 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001034 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001035 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001036 }
1037 }
1038 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001039 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001040 /* If we are modifying the current TB, we must stop
1041 its execution. We could be more precise by checking
1042 that the modification is after the current PC, but it
1043 would require a specialized function to partially
1044 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001045
bellardd720b932004-04-25 17:57:43 +00001046 current_tb_modified = 1;
ths5fafdf22007-09-16 21:08:06 +00001047 cpu_restore_state(current_tb, env,
pbrook2e70f6e2008-06-29 01:03:05 +00001048 env->mem_io_pc, NULL);
aliguori6b917542008-11-18 19:46:41 +00001049 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1050 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001051 }
1052#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001053 /* we need to do that to handle the case where a signal
1054 occurs while doing tb_phys_invalidate() */
1055 saved_tb = NULL;
1056 if (env) {
1057 saved_tb = env->current_tb;
1058 env->current_tb = NULL;
1059 }
bellard9fa3e852004-01-04 18:06:42 +00001060 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001061 if (env) {
1062 env->current_tb = saved_tb;
1063 if (env->interrupt_request && env->current_tb)
1064 cpu_interrupt(env, env->interrupt_request);
1065 }
bellard9fa3e852004-01-04 18:06:42 +00001066 }
1067 tb = tb_next;
1068 }
1069#if !defined(CONFIG_USER_ONLY)
1070 /* if no code remaining, no need to continue to use slow writes */
1071 if (!p->first_tb) {
1072 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001073 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001074 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001075 }
1076 }
1077#endif
1078#ifdef TARGET_HAS_PRECISE_SMC
1079 if (current_tb_modified) {
1080 /* we generate a block containing just the instruction
1081 modifying the memory. It will ensure that it cannot modify
1082 itself */
bellardea1c1802004-06-14 18:56:36 +00001083 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001084 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001085 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001086 }
1087#endif
1088}
1089
1090/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001091static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001092{
1093 PageDesc *p;
1094 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001095#if 0
bellarda4193c82004-06-03 14:01:43 +00001096 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001097 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1098 cpu_single_env->mem_io_vaddr, len,
1099 cpu_single_env->eip,
1100 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001101 }
1102#endif
bellard9fa3e852004-01-04 18:06:42 +00001103 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001104 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001105 return;
1106 if (p->code_bitmap) {
1107 offset = start & ~TARGET_PAGE_MASK;
1108 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1109 if (b & ((1 << len) - 1))
1110 goto do_invalidate;
1111 } else {
1112 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001113 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001114 }
1115}
1116
bellard9fa3e852004-01-04 18:06:42 +00001117#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001118static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001119 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001120{
aliguori6b917542008-11-18 19:46:41 +00001121 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001122 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001123 int n;
bellardd720b932004-04-25 17:57:43 +00001124#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001125 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001126 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001127 int current_tb_modified = 0;
1128 target_ulong current_pc = 0;
1129 target_ulong current_cs_base = 0;
1130 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001131#endif
bellard9fa3e852004-01-04 18:06:42 +00001132
1133 addr &= TARGET_PAGE_MASK;
1134 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001135 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001136 return;
1137 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001138#ifdef TARGET_HAS_PRECISE_SMC
1139 if (tb && pc != 0) {
1140 current_tb = tb_find_pc(pc);
1141 }
1142#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001143 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001144 n = (long)tb & 3;
1145 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001146#ifdef TARGET_HAS_PRECISE_SMC
1147 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001148 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001149 /* If we are modifying the current TB, we must stop
1150 its execution. We could be more precise by checking
1151 that the modification is after the current PC, but it
1152 would require a specialized function to partially
1153 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001154
bellardd720b932004-04-25 17:57:43 +00001155 current_tb_modified = 1;
1156 cpu_restore_state(current_tb, env, pc, puc);
aliguori6b917542008-11-18 19:46:41 +00001157 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1158 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001159 }
1160#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001161 tb_phys_invalidate(tb, addr);
1162 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001163 }
1164 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001165#ifdef TARGET_HAS_PRECISE_SMC
1166 if (current_tb_modified) {
1167 /* we generate a block containing just the instruction
1168 modifying the memory. It will ensure that it cannot modify
1169 itself */
bellardea1c1802004-06-14 18:56:36 +00001170 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001171 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001172 cpu_resume_from_signal(env, puc);
1173 }
1174#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001175}
bellard9fa3e852004-01-04 18:06:42 +00001176#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001177
1178/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001179static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001180 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001181{
1182 PageDesc *p;
bellard9fa3e852004-01-04 18:06:42 +00001183 TranslationBlock *last_first_tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001184
bellard9fa3e852004-01-04 18:06:42 +00001185 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001186 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001187 tb->page_next[n] = p->first_tb;
1188 last_first_tb = p->first_tb;
1189 p->first_tb = (TranslationBlock *)((long)tb | n);
1190 invalidate_page_bitmap(p);
1191
bellard107db442004-06-22 18:48:46 +00001192#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001193
bellard9fa3e852004-01-04 18:06:42 +00001194#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001195 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001196 target_ulong addr;
1197 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001198 int prot;
1199
bellardfd6ce8f2003-05-14 19:00:11 +00001200 /* force the host page as non writable (writes will have a
1201 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001202 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001203 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001204 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1205 addr += TARGET_PAGE_SIZE) {
1206
1207 p2 = page_find (addr >> TARGET_PAGE_BITS);
1208 if (!p2)
1209 continue;
1210 prot |= p2->flags;
1211 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001212 }
ths5fafdf22007-09-16 21:08:06 +00001213 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001214 (prot & PAGE_BITS) & ~PAGE_WRITE);
1215#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001216 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001217 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001218#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001219 }
bellard9fa3e852004-01-04 18:06:42 +00001220#else
1221 /* if some code is already present, then the pages are already
1222 protected. So we handle the case where only the first TB is
1223 allocated in a physical page */
1224 if (!last_first_tb) {
bellard6a00d602005-11-21 23:25:50 +00001225 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001226 }
1227#endif
bellardd720b932004-04-25 17:57:43 +00001228
1229#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001230}
1231
1232/* Allocate a new translation block. Flush the translation buffer if
1233 too many translation blocks or too much generated code. */
bellardc27004e2005-01-03 23:35:10 +00001234TranslationBlock *tb_alloc(target_ulong pc)
bellardfd6ce8f2003-05-14 19:00:11 +00001235{
1236 TranslationBlock *tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001237
bellard26a5f132008-05-28 12:30:31 +00001238 if (nb_tbs >= code_gen_max_blocks ||
1239 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
bellardd4e81642003-05-25 16:46:15 +00001240 return NULL;
bellardfd6ce8f2003-05-14 19:00:11 +00001241 tb = &tbs[nb_tbs++];
1242 tb->pc = pc;
bellardb448f2f2004-02-25 23:24:04 +00001243 tb->cflags = 0;
bellardd4e81642003-05-25 16:46:15 +00001244 return tb;
1245}
1246
pbrook2e70f6e2008-06-29 01:03:05 +00001247void tb_free(TranslationBlock *tb)
1248{
thsbf20dc02008-06-30 17:22:19 +00001249 /* In practice this is mostly used for single use temporary TB
pbrook2e70f6e2008-06-29 01:03:05 +00001250 Ignore the hard cases and just back up if this TB happens to
1251 be the last one generated. */
1252 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1253 code_gen_ptr = tb->tc_ptr;
1254 nb_tbs--;
1255 }
1256}
1257
bellard9fa3e852004-01-04 18:06:42 +00001258/* add a new TB and link it to the physical page tables. phys_page2 is
1259 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001260void tb_link_page(TranslationBlock *tb,
1261 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001262{
bellard9fa3e852004-01-04 18:06:42 +00001263 unsigned int h;
1264 TranslationBlock **ptb;
1265
pbrookc8a706f2008-06-02 16:16:42 +00001266 /* Grab the mmap lock to stop another thread invalidating this TB
1267 before we are done. */
1268 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001269 /* add in the physical hash table */
1270 h = tb_phys_hash_func(phys_pc);
1271 ptb = &tb_phys_hash[h];
1272 tb->phys_hash_next = *ptb;
1273 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001274
1275 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001276 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1277 if (phys_page2 != -1)
1278 tb_alloc_page(tb, 1, phys_page2);
1279 else
1280 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001281
bellardd4e81642003-05-25 16:46:15 +00001282 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1283 tb->jmp_next[0] = NULL;
1284 tb->jmp_next[1] = NULL;
1285
1286 /* init original jump addresses */
1287 if (tb->tb_next_offset[0] != 0xffff)
1288 tb_reset_jump(tb, 0);
1289 if (tb->tb_next_offset[1] != 0xffff)
1290 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001291
1292#ifdef DEBUG_TB_CHECK
1293 tb_page_check();
1294#endif
pbrookc8a706f2008-06-02 16:16:42 +00001295 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001296}
1297
bellarda513fe12003-05-27 23:29:48 +00001298/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1299 tb[1].tc_ptr. Return NULL if not found */
1300TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1301{
1302 int m_min, m_max, m;
1303 unsigned long v;
1304 TranslationBlock *tb;
1305
1306 if (nb_tbs <= 0)
1307 return NULL;
1308 if (tc_ptr < (unsigned long)code_gen_buffer ||
1309 tc_ptr >= (unsigned long)code_gen_ptr)
1310 return NULL;
1311 /* binary search (cf Knuth) */
1312 m_min = 0;
1313 m_max = nb_tbs - 1;
1314 while (m_min <= m_max) {
1315 m = (m_min + m_max) >> 1;
1316 tb = &tbs[m];
1317 v = (unsigned long)tb->tc_ptr;
1318 if (v == tc_ptr)
1319 return tb;
1320 else if (tc_ptr < v) {
1321 m_max = m - 1;
1322 } else {
1323 m_min = m + 1;
1324 }
ths5fafdf22007-09-16 21:08:06 +00001325 }
bellarda513fe12003-05-27 23:29:48 +00001326 return &tbs[m_max];
1327}
bellard75012672003-06-21 13:11:07 +00001328
bellardea041c02003-06-25 16:16:50 +00001329static void tb_reset_jump_recursive(TranslationBlock *tb);
1330
1331static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1332{
1333 TranslationBlock *tb1, *tb_next, **ptb;
1334 unsigned int n1;
1335
1336 tb1 = tb->jmp_next[n];
1337 if (tb1 != NULL) {
1338 /* find head of list */
1339 for(;;) {
1340 n1 = (long)tb1 & 3;
1341 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1342 if (n1 == 2)
1343 break;
1344 tb1 = tb1->jmp_next[n1];
1345 }
1346 /* we are now sure now that tb jumps to tb1 */
1347 tb_next = tb1;
1348
1349 /* remove tb from the jmp_first list */
1350 ptb = &tb_next->jmp_first;
1351 for(;;) {
1352 tb1 = *ptb;
1353 n1 = (long)tb1 & 3;
1354 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1355 if (n1 == n && tb1 == tb)
1356 break;
1357 ptb = &tb1->jmp_next[n1];
1358 }
1359 *ptb = tb->jmp_next[n];
1360 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001361
bellardea041c02003-06-25 16:16:50 +00001362 /* suppress the jump to next tb in generated code */
1363 tb_reset_jump(tb, n);
1364
bellard01243112004-01-04 15:48:17 +00001365 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001366 tb_reset_jump_recursive(tb_next);
1367 }
1368}
1369
1370static void tb_reset_jump_recursive(TranslationBlock *tb)
1371{
1372 tb_reset_jump_recursive2(tb, 0);
1373 tb_reset_jump_recursive2(tb, 1);
1374}
1375
bellard1fddef42005-04-17 19:16:13 +00001376#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001377#if defined(CONFIG_USER_ONLY)
1378static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1379{
1380 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1381}
1382#else
bellardd720b932004-04-25 17:57:43 +00001383static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1384{
Anthony Liguoric227f092009-10-01 16:12:16 -05001385 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001386 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001387 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001388 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001389
pbrookc2f07f82006-04-08 17:14:56 +00001390 addr = cpu_get_phys_page_debug(env, pc);
1391 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1392 if (!p) {
1393 pd = IO_MEM_UNASSIGNED;
1394 } else {
1395 pd = p->phys_offset;
1396 }
1397 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001398 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001399}
bellardc27004e2005-01-03 23:35:10 +00001400#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001401#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001402
Paul Brookc527ee82010-03-01 03:31:14 +00001403#if defined(CONFIG_USER_ONLY)
1404void cpu_watchpoint_remove_all(CPUState *env, int mask)
1405
1406{
1407}
1408
1409int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1410 int flags, CPUWatchpoint **watchpoint)
1411{
1412 return -ENOSYS;
1413}
1414#else
pbrook6658ffb2007-03-16 23:58:11 +00001415/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001416int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1417 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001418{
aliguorib4051332008-11-18 20:14:20 +00001419 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001420 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001421
aliguorib4051332008-11-18 20:14:20 +00001422 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1423 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1424 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1425 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1426 return -EINVAL;
1427 }
aliguoria1d1bb32008-11-18 20:07:32 +00001428 wp = qemu_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001429
aliguoria1d1bb32008-11-18 20:07:32 +00001430 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001431 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001432 wp->flags = flags;
1433
aliguori2dc9f412008-11-18 20:56:59 +00001434 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001435 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001436 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001437 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001438 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001439
pbrook6658ffb2007-03-16 23:58:11 +00001440 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001441
1442 if (watchpoint)
1443 *watchpoint = wp;
1444 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001445}
1446
aliguoria1d1bb32008-11-18 20:07:32 +00001447/* Remove a specific watchpoint. */
1448int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1449 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001450{
aliguorib4051332008-11-18 20:14:20 +00001451 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001452 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001453
Blue Swirl72cf2d42009-09-12 07:36:22 +00001454 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001455 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001456 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001457 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001458 return 0;
1459 }
1460 }
aliguoria1d1bb32008-11-18 20:07:32 +00001461 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001462}
1463
aliguoria1d1bb32008-11-18 20:07:32 +00001464/* Remove a specific watchpoint by reference. */
1465void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1466{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001467 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001468
aliguoria1d1bb32008-11-18 20:07:32 +00001469 tlb_flush_page(env, watchpoint->vaddr);
1470
1471 qemu_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001472}
1473
aliguoria1d1bb32008-11-18 20:07:32 +00001474/* Remove all matching watchpoints. */
1475void cpu_watchpoint_remove_all(CPUState *env, int mask)
1476{
aliguoric0ce9982008-11-25 22:13:57 +00001477 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001478
Blue Swirl72cf2d42009-09-12 07:36:22 +00001479 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001480 if (wp->flags & mask)
1481 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001482 }
aliguoria1d1bb32008-11-18 20:07:32 +00001483}
Paul Brookc527ee82010-03-01 03:31:14 +00001484#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001485
1486/* Add a breakpoint. */
1487int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1488 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001489{
bellard1fddef42005-04-17 19:16:13 +00001490#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001491 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001492
aliguoria1d1bb32008-11-18 20:07:32 +00001493 bp = qemu_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001494
1495 bp->pc = pc;
1496 bp->flags = flags;
1497
aliguori2dc9f412008-11-18 20:56:59 +00001498 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001499 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001500 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001501 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001502 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001503
1504 breakpoint_invalidate(env, pc);
1505
1506 if (breakpoint)
1507 *breakpoint = bp;
1508 return 0;
1509#else
1510 return -ENOSYS;
1511#endif
1512}
1513
1514/* Remove a specific breakpoint. */
1515int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1516{
1517#if defined(TARGET_HAS_ICE)
1518 CPUBreakpoint *bp;
1519
Blue Swirl72cf2d42009-09-12 07:36:22 +00001520 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001521 if (bp->pc == pc && bp->flags == flags) {
1522 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001523 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001524 }
bellard4c3a88a2003-07-26 12:06:08 +00001525 }
aliguoria1d1bb32008-11-18 20:07:32 +00001526 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001527#else
aliguoria1d1bb32008-11-18 20:07:32 +00001528 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001529#endif
1530}
1531
aliguoria1d1bb32008-11-18 20:07:32 +00001532/* Remove a specific breakpoint by reference. */
1533void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001534{
bellard1fddef42005-04-17 19:16:13 +00001535#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001536 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001537
aliguoria1d1bb32008-11-18 20:07:32 +00001538 breakpoint_invalidate(env, breakpoint->pc);
1539
1540 qemu_free(breakpoint);
1541#endif
1542}
1543
1544/* Remove all matching breakpoints. */
1545void cpu_breakpoint_remove_all(CPUState *env, int mask)
1546{
1547#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001548 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001549
Blue Swirl72cf2d42009-09-12 07:36:22 +00001550 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001551 if (bp->flags & mask)
1552 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001553 }
bellard4c3a88a2003-07-26 12:06:08 +00001554#endif
1555}
1556
bellardc33a3462003-07-29 20:50:33 +00001557/* enable or disable single step mode. EXCP_DEBUG is returned by the
1558 CPU loop after each instruction */
1559void cpu_single_step(CPUState *env, int enabled)
1560{
bellard1fddef42005-04-17 19:16:13 +00001561#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001562 if (env->singlestep_enabled != enabled) {
1563 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001564 if (kvm_enabled())
1565 kvm_update_guest_debug(env, 0);
1566 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001567 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001568 /* XXX: only flush what is necessary */
1569 tb_flush(env);
1570 }
bellardc33a3462003-07-29 20:50:33 +00001571 }
1572#endif
1573}
1574
bellard34865132003-10-05 14:28:56 +00001575/* enable or disable low levels log */
1576void cpu_set_log(int log_flags)
1577{
1578 loglevel = log_flags;
1579 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001580 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001581 if (!logfile) {
1582 perror(logfilename);
1583 _exit(1);
1584 }
bellard9fa3e852004-01-04 18:06:42 +00001585#if !defined(CONFIG_SOFTMMU)
1586 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1587 {
blueswir1b55266b2008-09-20 08:07:15 +00001588 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001589 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1590 }
Filip Navarabf65f532009-07-27 10:02:04 -05001591#elif !defined(_WIN32)
1592 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001593 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001594#endif
pbrooke735b912007-06-30 13:53:24 +00001595 log_append = 1;
1596 }
1597 if (!loglevel && logfile) {
1598 fclose(logfile);
1599 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001600 }
1601}
1602
1603void cpu_set_log_filename(const char *filename)
1604{
1605 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001606 if (logfile) {
1607 fclose(logfile);
1608 logfile = NULL;
1609 }
1610 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001611}
bellardc33a3462003-07-29 20:50:33 +00001612
aurel323098dba2009-03-07 21:28:24 +00001613static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001614{
pbrookd5975362008-06-07 20:50:51 +00001615 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1616 problem and hope the cpu will stop of its own accord. For userspace
1617 emulation this often isn't actually as bad as it sounds. Often
1618 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001619 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001620 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001621
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001622 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001623 tb = env->current_tb;
1624 /* if the cpu is currently executing code, we must unlink it and
1625 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001626 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001627 env->current_tb = NULL;
1628 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001629 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001630 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001631}
1632
1633/* mask must never be zero, except for A20 change call */
1634void cpu_interrupt(CPUState *env, int mask)
1635{
1636 int old_mask;
1637
1638 old_mask = env->interrupt_request;
1639 env->interrupt_request |= mask;
1640
aliguori8edac962009-04-24 18:03:45 +00001641#ifndef CONFIG_USER_ONLY
1642 /*
1643 * If called from iothread context, wake the target cpu in
1644 * case its halted.
1645 */
1646 if (!qemu_cpu_self(env)) {
1647 qemu_cpu_kick(env);
1648 return;
1649 }
1650#endif
1651
pbrook2e70f6e2008-06-29 01:03:05 +00001652 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001653 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001654#ifndef CONFIG_USER_ONLY
pbrook2e70f6e2008-06-29 01:03:05 +00001655 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001656 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001657 cpu_abort(env, "Raised interrupt while not in I/O function");
1658 }
1659#endif
1660 } else {
aurel323098dba2009-03-07 21:28:24 +00001661 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001662 }
1663}
1664
bellardb54ad042004-05-20 13:42:52 +00001665void cpu_reset_interrupt(CPUState *env, int mask)
1666{
1667 env->interrupt_request &= ~mask;
1668}
1669
aurel323098dba2009-03-07 21:28:24 +00001670void cpu_exit(CPUState *env)
1671{
1672 env->exit_request = 1;
1673 cpu_unlink_tb(env);
1674}
1675
blueswir1c7cd6a32008-10-02 18:27:46 +00001676const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001677 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001678 "show generated host assembly code for each compiled TB" },
1679 { CPU_LOG_TB_IN_ASM, "in_asm",
1680 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001681 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001682 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001683 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001684 "show micro ops "
1685#ifdef TARGET_I386
1686 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001687#endif
blueswir1e01a1152008-03-14 17:37:11 +00001688 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001689 { CPU_LOG_INT, "int",
1690 "show interrupts/exceptions in short format" },
1691 { CPU_LOG_EXEC, "exec",
1692 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001693 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001694 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001695#ifdef TARGET_I386
1696 { CPU_LOG_PCALL, "pcall",
1697 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001698 { CPU_LOG_RESET, "cpu_reset",
1699 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001700#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001701#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001702 { CPU_LOG_IOPORT, "ioport",
1703 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001704#endif
bellardf193c792004-03-21 17:06:25 +00001705 { 0, NULL, NULL },
1706};
1707
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001708#ifndef CONFIG_USER_ONLY
1709static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1710 = QLIST_HEAD_INITIALIZER(memory_client_list);
1711
1712static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1713 ram_addr_t size,
1714 ram_addr_t phys_offset)
1715{
1716 CPUPhysMemoryClient *client;
1717 QLIST_FOREACH(client, &memory_client_list, list) {
1718 client->set_memory(client, start_addr, size, phys_offset);
1719 }
1720}
1721
1722static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1723 target_phys_addr_t end)
1724{
1725 CPUPhysMemoryClient *client;
1726 QLIST_FOREACH(client, &memory_client_list, list) {
1727 int r = client->sync_dirty_bitmap(client, start, end);
1728 if (r < 0)
1729 return r;
1730 }
1731 return 0;
1732}
1733
1734static int cpu_notify_migration_log(int enable)
1735{
1736 CPUPhysMemoryClient *client;
1737 QLIST_FOREACH(client, &memory_client_list, list) {
1738 int r = client->migration_log(client, enable);
1739 if (r < 0)
1740 return r;
1741 }
1742 return 0;
1743}
1744
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001745static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1746 int level, void **lp)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001747{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001748 int i;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001749
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001750 if (*lp == NULL) {
1751 return;
1752 }
1753 if (level == 0) {
1754 PhysPageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001755 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001756 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1757 client->set_memory(client, pd[i].region_offset,
1758 TARGET_PAGE_SIZE, pd[i].phys_offset);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001759 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001760 }
1761 } else {
1762 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001763 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001764 phys_page_for_each_1(client, level - 1, pp + i);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001765 }
1766 }
1767}
1768
1769static void phys_page_for_each(CPUPhysMemoryClient *client)
1770{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001771 int i;
1772 for (i = 0; i < P_L1_SIZE; ++i) {
1773 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1774 l1_phys_map + 1);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001775 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001776}
1777
1778void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1779{
1780 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1781 phys_page_for_each(client);
1782}
1783
1784void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1785{
1786 QLIST_REMOVE(client, list);
1787}
1788#endif
1789
bellardf193c792004-03-21 17:06:25 +00001790static int cmp1(const char *s1, int n, const char *s2)
1791{
1792 if (strlen(s2) != n)
1793 return 0;
1794 return memcmp(s1, s2, n) == 0;
1795}
ths3b46e622007-09-17 08:09:54 +00001796
bellardf193c792004-03-21 17:06:25 +00001797/* takes a comma separated list of log masks. Return 0 if error. */
1798int cpu_str_to_log_mask(const char *str)
1799{
blueswir1c7cd6a32008-10-02 18:27:46 +00001800 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001801 int mask;
1802 const char *p, *p1;
1803
1804 p = str;
1805 mask = 0;
1806 for(;;) {
1807 p1 = strchr(p, ',');
1808 if (!p1)
1809 p1 = p + strlen(p);
bellard8e3a9fd2004-10-09 17:32:58 +00001810 if(cmp1(p,p1-p,"all")) {
1811 for(item = cpu_log_items; item->mask != 0; item++) {
1812 mask |= item->mask;
1813 }
1814 } else {
bellardf193c792004-03-21 17:06:25 +00001815 for(item = cpu_log_items; item->mask != 0; item++) {
1816 if (cmp1(p, p1 - p, item->name))
1817 goto found;
1818 }
1819 return 0;
bellard8e3a9fd2004-10-09 17:32:58 +00001820 }
bellardf193c792004-03-21 17:06:25 +00001821 found:
1822 mask |= item->mask;
1823 if (*p1 != ',')
1824 break;
1825 p = p1 + 1;
1826 }
1827 return mask;
1828}
bellardea041c02003-06-25 16:16:50 +00001829
bellard75012672003-06-21 13:11:07 +00001830void cpu_abort(CPUState *env, const char *fmt, ...)
1831{
1832 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001833 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001834
1835 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001836 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001837 fprintf(stderr, "qemu: fatal: ");
1838 vfprintf(stderr, fmt, ap);
1839 fprintf(stderr, "\n");
1840#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001841 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1842#else
1843 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001844#endif
aliguori93fcfe32009-01-15 22:34:14 +00001845 if (qemu_log_enabled()) {
1846 qemu_log("qemu: fatal: ");
1847 qemu_log_vprintf(fmt, ap2);
1848 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001849#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001850 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001851#else
aliguori93fcfe32009-01-15 22:34:14 +00001852 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001853#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001854 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001855 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001856 }
pbrook493ae1f2007-11-23 16:53:59 +00001857 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001858 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001859#if defined(CONFIG_USER_ONLY)
1860 {
1861 struct sigaction act;
1862 sigfillset(&act.sa_mask);
1863 act.sa_handler = SIG_DFL;
1864 sigaction(SIGABRT, &act, NULL);
1865 }
1866#endif
bellard75012672003-06-21 13:11:07 +00001867 abort();
1868}
1869
thsc5be9f02007-02-28 20:20:53 +00001870CPUState *cpu_copy(CPUState *env)
1871{
ths01ba9812007-12-09 02:22:57 +00001872 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001873 CPUState *next_cpu = new_env->next_cpu;
1874 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001875#if defined(TARGET_HAS_ICE)
1876 CPUBreakpoint *bp;
1877 CPUWatchpoint *wp;
1878#endif
1879
thsc5be9f02007-02-28 20:20:53 +00001880 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001881
1882 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001883 new_env->next_cpu = next_cpu;
1884 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001885
1886 /* Clone all break/watchpoints.
1887 Note: Once we support ptrace with hw-debug register access, make sure
1888 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001889 QTAILQ_INIT(&env->breakpoints);
1890 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001891#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001892 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001893 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1894 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001895 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001896 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1897 wp->flags, NULL);
1898 }
1899#endif
1900
thsc5be9f02007-02-28 20:20:53 +00001901 return new_env;
1902}
1903
bellard01243112004-01-04 15:48:17 +00001904#if !defined(CONFIG_USER_ONLY)
1905
edgar_igl5c751e92008-05-06 08:44:21 +00001906static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1907{
1908 unsigned int i;
1909
1910 /* Discard jump cache entries for any tb which might potentially
1911 overlap the flushed page. */
1912 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1913 memset (&env->tb_jmp_cache[i], 0,
1914 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1915
1916 i = tb_jmp_cache_hash_page(addr);
1917 memset (&env->tb_jmp_cache[i], 0,
1918 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1919}
1920
Igor Kovalenko08738982009-07-12 02:15:40 +04001921static CPUTLBEntry s_cputlb_empty_entry = {
1922 .addr_read = -1,
1923 .addr_write = -1,
1924 .addr_code = -1,
1925 .addend = -1,
1926};
1927
bellardee8b7022004-02-03 23:35:10 +00001928/* NOTE: if flush_global is true, also flush global entries (not
1929 implemented yet) */
1930void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001931{
bellard33417e72003-08-10 21:47:01 +00001932 int i;
bellard01243112004-01-04 15:48:17 +00001933
bellard9fa3e852004-01-04 18:06:42 +00001934#if defined(DEBUG_TLB)
1935 printf("tlb_flush:\n");
1936#endif
bellard01243112004-01-04 15:48:17 +00001937 /* must reset current TB so that interrupts cannot modify the
1938 links while we are modifying them */
1939 env->current_tb = NULL;
1940
bellard33417e72003-08-10 21:47:01 +00001941 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001942 int mmu_idx;
1943 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001944 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001945 }
bellard33417e72003-08-10 21:47:01 +00001946 }
bellard9fa3e852004-01-04 18:06:42 +00001947
bellard8a40a182005-11-20 10:35:40 +00001948 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001949
Paul Brookd4c430a2010-03-17 02:14:28 +00001950 env->tlb_flush_addr = -1;
1951 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001952 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001953}
1954
bellard274da6b2004-05-20 21:56:27 +00001955static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001956{
ths5fafdf22007-09-16 21:08:06 +00001957 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001958 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001959 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001960 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001961 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001962 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001963 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001964 }
bellard61382a52003-10-27 21:22:23 +00001965}
1966
bellard2e126692004-04-25 21:28:44 +00001967void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001968{
bellard8a40a182005-11-20 10:35:40 +00001969 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001970 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001971
bellard9fa3e852004-01-04 18:06:42 +00001972#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001973 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001974#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001975 /* Check if we need to flush due to large pages. */
1976 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1977#if defined(DEBUG_TLB)
1978 printf("tlb_flush_page: forced full flush ("
1979 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1980 env->tlb_flush_addr, env->tlb_flush_mask);
1981#endif
1982 tlb_flush(env, 1);
1983 return;
1984 }
bellard01243112004-01-04 15:48:17 +00001985 /* must reset current TB so that interrupts cannot modify the
1986 links while we are modifying them */
1987 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001988
bellard61382a52003-10-27 21:22:23 +00001989 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001990 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001991 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1992 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001993
edgar_igl5c751e92008-05-06 08:44:21 +00001994 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001995}
1996
bellard9fa3e852004-01-04 18:06:42 +00001997/* update the TLBs so that writes to code in the virtual page 'addr'
1998 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001999static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002000{
ths5fafdf22007-09-16 21:08:06 +00002001 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002002 ram_addr + TARGET_PAGE_SIZE,
2003 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002004}
2005
bellard9fa3e852004-01-04 18:06:42 +00002006/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002007 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002008static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002009 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002010{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002011 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002012}
2013
ths5fafdf22007-09-16 21:08:06 +00002014static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002015 unsigned long start, unsigned long length)
2016{
2017 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00002018 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2019 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002020 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002021 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002022 }
2023 }
2024}
2025
pbrook5579c7f2009-04-11 14:47:08 +00002026/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002027void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002028 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002029{
2030 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002031 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002032 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002033
2034 start &= TARGET_PAGE_MASK;
2035 end = TARGET_PAGE_ALIGN(end);
2036
2037 length = end - start;
2038 if (length == 0)
2039 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002040 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002041
bellard1ccde1c2004-02-06 19:46:14 +00002042 /* we modify the TLB cache so that the dirty bit will be set again
2043 when accessing the range */
pbrook5579c7f2009-04-11 14:47:08 +00002044 start1 = (unsigned long)qemu_get_ram_ptr(start);
2045 /* Chek that we don't span multiple blocks - this breaks the
2046 address comparisons below. */
2047 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2048 != (end - 1) - start) {
2049 abort();
2050 }
2051
bellard6a00d602005-11-21 23:25:50 +00002052 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002053 int mmu_idx;
2054 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2055 for(i = 0; i < CPU_TLB_SIZE; i++)
2056 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2057 start1, length);
2058 }
bellard6a00d602005-11-21 23:25:50 +00002059 }
bellard1ccde1c2004-02-06 19:46:14 +00002060}
2061
aliguori74576192008-10-06 14:02:03 +00002062int cpu_physical_memory_set_dirty_tracking(int enable)
2063{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002064 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002065 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002066 ret = cpu_notify_migration_log(!!enable);
2067 return ret;
aliguori74576192008-10-06 14:02:03 +00002068}
2069
2070int cpu_physical_memory_get_dirty_tracking(void)
2071{
2072 return in_migration;
2073}
2074
Anthony Liguoric227f092009-10-01 16:12:16 -05002075int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2076 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00002077{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02002078 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02002079
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002080 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02002081 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00002082}
2083
bellard3a7d9292005-08-21 09:26:42 +00002084static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2085{
Anthony Liguoric227f092009-10-01 16:12:16 -05002086 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002087 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002088
bellard84b7b8e2005-11-28 21:19:04 +00002089 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002090 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2091 + tlb_entry->addend);
2092 ram_addr = qemu_ram_addr_from_host(p);
bellard3a7d9292005-08-21 09:26:42 +00002093 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002094 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002095 }
2096 }
2097}
2098
2099/* update the TLB according to the current state of the dirty bits */
2100void cpu_tlb_update_dirty(CPUState *env)
2101{
2102 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002103 int mmu_idx;
2104 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2105 for(i = 0; i < CPU_TLB_SIZE; i++)
2106 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2107 }
bellard3a7d9292005-08-21 09:26:42 +00002108}
2109
pbrook0f459d12008-06-09 00:20:13 +00002110static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002111{
pbrook0f459d12008-06-09 00:20:13 +00002112 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2113 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002114}
2115
pbrook0f459d12008-06-09 00:20:13 +00002116/* update the TLB corresponding to virtual page vaddr
2117 so that it is no longer dirty */
2118static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002119{
bellard1ccde1c2004-02-06 19:46:14 +00002120 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002121 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002122
pbrook0f459d12008-06-09 00:20:13 +00002123 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002124 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002125 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2126 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002127}
2128
Paul Brookd4c430a2010-03-17 02:14:28 +00002129/* Our TLB does not support large pages, so remember the area covered by
2130 large pages and trigger a full TLB flush if these are invalidated. */
2131static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2132 target_ulong size)
2133{
2134 target_ulong mask = ~(size - 1);
2135
2136 if (env->tlb_flush_addr == (target_ulong)-1) {
2137 env->tlb_flush_addr = vaddr & mask;
2138 env->tlb_flush_mask = mask;
2139 return;
2140 }
2141 /* Extend the existing region to include the new page.
2142 This is a compromise between unnecessary flushes and the cost
2143 of maintaining a full variable size TLB. */
2144 mask &= env->tlb_flush_mask;
2145 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2146 mask <<= 1;
2147 }
2148 env->tlb_flush_addr &= mask;
2149 env->tlb_flush_mask = mask;
2150}
2151
2152/* Add a new TLB entry. At most one entry for a given virtual address
2153 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2154 supplied size is only used by tlb_flush_page. */
2155void tlb_set_page(CPUState *env, target_ulong vaddr,
2156 target_phys_addr_t paddr, int prot,
2157 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002158{
bellard92e873b2004-05-21 14:52:29 +00002159 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002160 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002161 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002162 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002163 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002164 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002165 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002166 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002167 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002168
Paul Brookd4c430a2010-03-17 02:14:28 +00002169 assert(size >= TARGET_PAGE_SIZE);
2170 if (size != TARGET_PAGE_SIZE) {
2171 tlb_add_large_page(env, vaddr, size);
2172 }
bellard92e873b2004-05-21 14:52:29 +00002173 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002174 if (!p) {
2175 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002176 } else {
2177 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002178 }
2179#if defined(DEBUG_TLB)
j_mayer6ebbf392007-10-14 07:07:08 +00002180 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2181 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
bellard9fa3e852004-01-04 18:06:42 +00002182#endif
2183
pbrook0f459d12008-06-09 00:20:13 +00002184 address = vaddr;
2185 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2186 /* IO memory case (romd handled later) */
2187 address |= TLB_MMIO;
2188 }
pbrook5579c7f2009-04-11 14:47:08 +00002189 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002190 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2191 /* Normal RAM. */
2192 iotlb = pd & TARGET_PAGE_MASK;
2193 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2194 iotlb |= IO_MEM_NOTDIRTY;
2195 else
2196 iotlb |= IO_MEM_ROM;
2197 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002198 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002199 It would be nice to pass an offset from the base address
2200 of that region. This would avoid having to special case RAM,
2201 and avoid full address decoding in every device.
2202 We can't use the high bits of pd for this because
2203 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002204 iotlb = (pd & ~TARGET_PAGE_MASK);
2205 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002206 iotlb += p->region_offset;
2207 } else {
2208 iotlb += paddr;
2209 }
pbrook0f459d12008-06-09 00:20:13 +00002210 }
pbrook6658ffb2007-03-16 23:58:11 +00002211
pbrook0f459d12008-06-09 00:20:13 +00002212 code_address = address;
2213 /* Make accesses to pages with watchpoints go via the
2214 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002215 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002216 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
pbrook0f459d12008-06-09 00:20:13 +00002217 iotlb = io_mem_watch + paddr;
2218 /* TODO: The memory case can be optimized by not trapping
2219 reads of pages with a write breakpoint. */
2220 address |= TLB_MMIO;
pbrook6658ffb2007-03-16 23:58:11 +00002221 }
pbrook0f459d12008-06-09 00:20:13 +00002222 }
balrogd79acba2007-06-26 20:01:13 +00002223
pbrook0f459d12008-06-09 00:20:13 +00002224 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2225 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2226 te = &env->tlb_table[mmu_idx][index];
2227 te->addend = addend - vaddr;
2228 if (prot & PAGE_READ) {
2229 te->addr_read = address;
2230 } else {
2231 te->addr_read = -1;
2232 }
edgar_igl5c751e92008-05-06 08:44:21 +00002233
pbrook0f459d12008-06-09 00:20:13 +00002234 if (prot & PAGE_EXEC) {
2235 te->addr_code = code_address;
2236 } else {
2237 te->addr_code = -1;
2238 }
2239 if (prot & PAGE_WRITE) {
2240 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2241 (pd & IO_MEM_ROMD)) {
2242 /* Write access calls the I/O callback. */
2243 te->addr_write = address | TLB_MMIO;
2244 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2245 !cpu_physical_memory_is_dirty(pd)) {
2246 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002247 } else {
pbrook0f459d12008-06-09 00:20:13 +00002248 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002249 }
pbrook0f459d12008-06-09 00:20:13 +00002250 } else {
2251 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002252 }
bellard9fa3e852004-01-04 18:06:42 +00002253}
2254
bellard01243112004-01-04 15:48:17 +00002255#else
2256
bellardee8b7022004-02-03 23:35:10 +00002257void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002258{
2259}
2260
bellard2e126692004-04-25 21:28:44 +00002261void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002262{
2263}
2264
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002265/*
2266 * Walks guest process memory "regions" one by one
2267 * and calls callback function 'fn' for each region.
2268 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002269
2270struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002271{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002272 walk_memory_regions_fn fn;
2273 void *priv;
2274 unsigned long start;
2275 int prot;
2276};
bellard9fa3e852004-01-04 18:06:42 +00002277
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002278static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002279 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002280{
2281 if (data->start != -1ul) {
2282 int rc = data->fn(data->priv, data->start, end, data->prot);
2283 if (rc != 0) {
2284 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002285 }
bellard33417e72003-08-10 21:47:01 +00002286 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002287
2288 data->start = (new_prot ? end : -1ul);
2289 data->prot = new_prot;
2290
2291 return 0;
2292}
2293
2294static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002295 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002296{
Paul Brookb480d9b2010-03-12 23:23:29 +00002297 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002298 int i, rc;
2299
2300 if (*lp == NULL) {
2301 return walk_memory_regions_end(data, base, 0);
2302 }
2303
2304 if (level == 0) {
2305 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002306 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002307 int prot = pd[i].flags;
2308
2309 pa = base | (i << TARGET_PAGE_BITS);
2310 if (prot != data->prot) {
2311 rc = walk_memory_regions_end(data, pa, prot);
2312 if (rc != 0) {
2313 return rc;
2314 }
2315 }
2316 }
2317 } else {
2318 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002319 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002320 pa = base | ((abi_ulong)i <<
2321 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002322 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2323 if (rc != 0) {
2324 return rc;
2325 }
2326 }
2327 }
2328
2329 return 0;
2330}
2331
2332int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2333{
2334 struct walk_memory_regions_data data;
2335 unsigned long i;
2336
2337 data.fn = fn;
2338 data.priv = priv;
2339 data.start = -1ul;
2340 data.prot = 0;
2341
2342 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002343 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002344 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2345 if (rc != 0) {
2346 return rc;
2347 }
2348 }
2349
2350 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002351}
2352
Paul Brookb480d9b2010-03-12 23:23:29 +00002353static int dump_region(void *priv, abi_ulong start,
2354 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002355{
2356 FILE *f = (FILE *)priv;
2357
Paul Brookb480d9b2010-03-12 23:23:29 +00002358 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2359 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002360 start, end, end - start,
2361 ((prot & PAGE_READ) ? 'r' : '-'),
2362 ((prot & PAGE_WRITE) ? 'w' : '-'),
2363 ((prot & PAGE_EXEC) ? 'x' : '-'));
2364
2365 return (0);
2366}
2367
2368/* dump memory mappings */
2369void page_dump(FILE *f)
2370{
2371 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2372 "start", "end", "size", "prot");
2373 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002374}
2375
pbrook53a59602006-03-25 19:31:22 +00002376int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002377{
bellard9fa3e852004-01-04 18:06:42 +00002378 PageDesc *p;
2379
2380 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002381 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002382 return 0;
2383 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002384}
2385
Richard Henderson376a7902010-03-10 15:57:04 -08002386/* Modify the flags of a page and invalidate the code if necessary.
2387 The flag PAGE_WRITE_ORG is positioned automatically depending
2388 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002389void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002390{
Richard Henderson376a7902010-03-10 15:57:04 -08002391 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002392
Richard Henderson376a7902010-03-10 15:57:04 -08002393 /* This function should never be called with addresses outside the
2394 guest address space. If this assert fires, it probably indicates
2395 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002396#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2397 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002398#endif
2399 assert(start < end);
2400
bellard9fa3e852004-01-04 18:06:42 +00002401 start = start & TARGET_PAGE_MASK;
2402 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002403
2404 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002405 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002406 }
2407
2408 for (addr = start, len = end - start;
2409 len != 0;
2410 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2411 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2412
2413 /* If the write protection bit is set, then we invalidate
2414 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002415 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002416 (flags & PAGE_WRITE) &&
2417 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002418 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002419 }
2420 p->flags = flags;
2421 }
bellard9fa3e852004-01-04 18:06:42 +00002422}
2423
ths3d97b402007-11-02 19:02:07 +00002424int page_check_range(target_ulong start, target_ulong len, int flags)
2425{
2426 PageDesc *p;
2427 target_ulong end;
2428 target_ulong addr;
2429
Richard Henderson376a7902010-03-10 15:57:04 -08002430 /* This function should never be called with addresses outside the
2431 guest address space. If this assert fires, it probably indicates
2432 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002433#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2434 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002435#endif
2436
Richard Henderson3e0650a2010-03-29 10:54:42 -07002437 if (len == 0) {
2438 return 0;
2439 }
Richard Henderson376a7902010-03-10 15:57:04 -08002440 if (start + len - 1 < start) {
2441 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002442 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002443 }
balrog55f280c2008-10-28 10:24:11 +00002444
ths3d97b402007-11-02 19:02:07 +00002445 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2446 start = start & TARGET_PAGE_MASK;
2447
Richard Henderson376a7902010-03-10 15:57:04 -08002448 for (addr = start, len = end - start;
2449 len != 0;
2450 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002451 p = page_find(addr >> TARGET_PAGE_BITS);
2452 if( !p )
2453 return -1;
2454 if( !(p->flags & PAGE_VALID) )
2455 return -1;
2456
bellarddae32702007-11-14 10:51:00 +00002457 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002458 return -1;
bellarddae32702007-11-14 10:51:00 +00002459 if (flags & PAGE_WRITE) {
2460 if (!(p->flags & PAGE_WRITE_ORG))
2461 return -1;
2462 /* unprotect the page if it was put read-only because it
2463 contains translated code */
2464 if (!(p->flags & PAGE_WRITE)) {
2465 if (!page_unprotect(addr, 0, NULL))
2466 return -1;
2467 }
2468 return 0;
2469 }
ths3d97b402007-11-02 19:02:07 +00002470 }
2471 return 0;
2472}
2473
bellard9fa3e852004-01-04 18:06:42 +00002474/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002475 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002476int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002477{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002478 unsigned int prot;
2479 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002480 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002481
pbrookc8a706f2008-06-02 16:16:42 +00002482 /* Technically this isn't safe inside a signal handler. However we
2483 know this only ever happens in a synchronous SEGV handler, so in
2484 practice it seems to be ok. */
2485 mmap_lock();
2486
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002487 p = page_find(address >> TARGET_PAGE_BITS);
2488 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002489 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002490 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002491 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002492
bellard9fa3e852004-01-04 18:06:42 +00002493 /* if the page was really writable, then we change its
2494 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002495 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2496 host_start = address & qemu_host_page_mask;
2497 host_end = host_start + qemu_host_page_size;
2498
2499 prot = 0;
2500 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2501 p = page_find(addr >> TARGET_PAGE_BITS);
2502 p->flags |= PAGE_WRITE;
2503 prot |= p->flags;
2504
bellard9fa3e852004-01-04 18:06:42 +00002505 /* and since the content will be modified, we must invalidate
2506 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002507 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002508#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002509 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002510#endif
bellard9fa3e852004-01-04 18:06:42 +00002511 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002512 mprotect((void *)g2h(host_start), qemu_host_page_size,
2513 prot & PAGE_BITS);
2514
2515 mmap_unlock();
2516 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002517 }
pbrookc8a706f2008-06-02 16:16:42 +00002518 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002519 return 0;
2520}
2521
bellard6a00d602005-11-21 23:25:50 +00002522static inline void tlb_set_dirty(CPUState *env,
2523 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002524{
2525}
bellard9fa3e852004-01-04 18:06:42 +00002526#endif /* defined(CONFIG_USER_ONLY) */
2527
pbrooke2eef172008-06-08 01:09:01 +00002528#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002529
Paul Brookc04b2b72010-03-01 03:31:14 +00002530#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2531typedef struct subpage_t {
2532 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002533 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2534 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002535} subpage_t;
2536
Anthony Liguoric227f092009-10-01 16:12:16 -05002537static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2538 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002539static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2540 ram_addr_t orig_memory,
2541 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002542#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2543 need_subpage) \
2544 do { \
2545 if (addr > start_addr) \
2546 start_addr2 = 0; \
2547 else { \
2548 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2549 if (start_addr2 > 0) \
2550 need_subpage = 1; \
2551 } \
2552 \
blueswir149e9fba2007-05-30 17:25:06 +00002553 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002554 end_addr2 = TARGET_PAGE_SIZE - 1; \
2555 else { \
2556 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2557 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2558 need_subpage = 1; \
2559 } \
2560 } while (0)
2561
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002562/* register physical memory.
2563 For RAM, 'size' must be a multiple of the target page size.
2564 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002565 io memory page. The address used when calling the IO function is
2566 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002567 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002568 before calculating this offset. This should not be a problem unless
2569 the low bits of start_addr and region_offset differ. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002570void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2571 ram_addr_t size,
2572 ram_addr_t phys_offset,
2573 ram_addr_t region_offset)
bellard33417e72003-08-10 21:47:01 +00002574{
Anthony Liguoric227f092009-10-01 16:12:16 -05002575 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002576 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002577 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002578 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002579 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002580
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002581 cpu_notify_set_memory(start_addr, size, phys_offset);
2582
pbrook67c4d232009-02-23 13:16:07 +00002583 if (phys_offset == IO_MEM_UNASSIGNED) {
2584 region_offset = start_addr;
2585 }
pbrook8da3ff12008-12-01 18:59:50 +00002586 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002587 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002588 end_addr = start_addr + (target_phys_addr_t)size;
blueswir149e9fba2007-05-30 17:25:06 +00002589 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
blueswir1db7b5422007-05-26 17:36:03 +00002590 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2591 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002592 ram_addr_t orig_memory = p->phys_offset;
2593 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002594 int need_subpage = 0;
2595
2596 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2597 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002598 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002599 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2600 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002601 &p->phys_offset, orig_memory,
2602 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002603 } else {
2604 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2605 >> IO_MEM_SHIFT];
2606 }
pbrook8da3ff12008-12-01 18:59:50 +00002607 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2608 region_offset);
2609 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002610 } else {
2611 p->phys_offset = phys_offset;
2612 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2613 (phys_offset & IO_MEM_ROMD))
2614 phys_offset += TARGET_PAGE_SIZE;
2615 }
2616 } else {
2617 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2618 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002619 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002620 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002621 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002622 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002623 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002624 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002625 int need_subpage = 0;
2626
2627 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2628 end_addr2, need_subpage);
2629
Richard Hendersonf6405242010-04-22 16:47:31 -07002630 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002631 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002632 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002633 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002634 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002635 phys_offset, region_offset);
2636 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002637 }
2638 }
2639 }
pbrook8da3ff12008-12-01 18:59:50 +00002640 region_offset += TARGET_PAGE_SIZE;
bellard33417e72003-08-10 21:47:01 +00002641 }
ths3b46e622007-09-17 08:09:54 +00002642
bellard9d420372006-06-25 22:25:22 +00002643 /* since each CPU stores ram addresses in its TLB cache, we must
2644 reset the modified entries */
2645 /* XXX: slow ! */
2646 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2647 tlb_flush(env, 1);
2648 }
bellard33417e72003-08-10 21:47:01 +00002649}
2650
bellardba863452006-09-24 18:41:10 +00002651/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002652ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002653{
2654 PhysPageDesc *p;
2655
2656 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2657 if (!p)
2658 return IO_MEM_UNASSIGNED;
2659 return p->phys_offset;
2660}
2661
Anthony Liguoric227f092009-10-01 16:12:16 -05002662void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002663{
2664 if (kvm_enabled())
2665 kvm_coalesce_mmio_region(addr, size);
2666}
2667
Anthony Liguoric227f092009-10-01 16:12:16 -05002668void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002669{
2670 if (kvm_enabled())
2671 kvm_uncoalesce_mmio_region(addr, size);
2672}
2673
Sheng Yang62a27442010-01-26 19:21:16 +08002674void qemu_flush_coalesced_mmio_buffer(void)
2675{
2676 if (kvm_enabled())
2677 kvm_flush_coalesced_mmio_buffer();
2678}
2679
Marcelo Tosattic9027602010-03-01 20:25:08 -03002680#if defined(__linux__) && !defined(TARGET_S390X)
2681
2682#include <sys/vfs.h>
2683
2684#define HUGETLBFS_MAGIC 0x958458f6
2685
2686static long gethugepagesize(const char *path)
2687{
2688 struct statfs fs;
2689 int ret;
2690
2691 do {
2692 ret = statfs(path, &fs);
2693 } while (ret != 0 && errno == EINTR);
2694
2695 if (ret != 0) {
Michael Tokarev6adc0542010-03-27 16:35:37 +03002696 perror(path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002697 return 0;
2698 }
2699
2700 if (fs.f_type != HUGETLBFS_MAGIC)
2701 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2702
2703 return fs.f_bsize;
2704}
2705
2706static void *file_ram_alloc(ram_addr_t memory, const char *path)
2707{
2708 char *filename;
2709 void *area;
2710 int fd;
2711#ifdef MAP_POPULATE
2712 int flags;
2713#endif
2714 unsigned long hpagesize;
2715
2716 hpagesize = gethugepagesize(path);
2717 if (!hpagesize) {
2718 return NULL;
2719 }
2720
2721 if (memory < hpagesize) {
2722 return NULL;
2723 }
2724
2725 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2726 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2727 return NULL;
2728 }
2729
2730 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2731 return NULL;
2732 }
2733
2734 fd = mkstemp(filename);
2735 if (fd < 0) {
Michael Tokarev6adc0542010-03-27 16:35:37 +03002736 perror("unable to create backing store for hugepages");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002737 free(filename);
2738 return NULL;
2739 }
2740 unlink(filename);
2741 free(filename);
2742
2743 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2744
2745 /*
2746 * ftruncate is not supported by hugetlbfs in older
2747 * hosts, so don't bother bailing out on errors.
2748 * If anything goes wrong with it under other filesystems,
2749 * mmap will fail.
2750 */
2751 if (ftruncate(fd, memory))
2752 perror("ftruncate");
2753
2754#ifdef MAP_POPULATE
2755 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2756 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2757 * to sidestep this quirk.
2758 */
2759 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2760 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2761#else
2762 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2763#endif
2764 if (area == MAP_FAILED) {
2765 perror("file_ram_alloc: can't mmap RAM pages");
2766 close(fd);
2767 return (NULL);
2768 }
2769 return area;
2770}
2771#endif
2772
Anthony Liguoric227f092009-10-01 16:12:16 -05002773ram_addr_t qemu_ram_alloc(ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002774{
2775 RAMBlock *new_block;
2776
pbrook94a6b542009-04-11 17:15:54 +00002777 size = TARGET_PAGE_ALIGN(size);
2778 new_block = qemu_malloc(sizeof(*new_block));
2779
Marcelo Tosattic9027602010-03-01 20:25:08 -03002780 if (mem_path) {
2781#if defined (__linux__) && !defined(TARGET_S390X)
2782 new_block->host = file_ram_alloc(size, mem_path);
Marcelo Tosatti618a5682010-05-03 18:12:23 -03002783 if (!new_block->host) {
2784 new_block->host = qemu_vmalloc(size);
2785#ifdef MADV_MERGEABLE
2786 madvise(new_block->host, size, MADV_MERGEABLE);
2787#endif
2788 }
Alexander Graf6b024942009-12-05 12:44:25 +01002789#else
Marcelo Tosattic9027602010-03-01 20:25:08 -03002790 fprintf(stderr, "-mem-path option unsupported\n");
2791 exit(1);
2792#endif
2793 } else {
2794#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2795 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2796 new_block->host = mmap((void*)0x1000000, size,
2797 PROT_EXEC|PROT_READ|PROT_WRITE,
2798 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2799#else
2800 new_block->host = qemu_vmalloc(size);
Alexander Graf6b024942009-12-05 12:44:25 +01002801#endif
Izik Eidusccb167e2009-10-08 16:39:39 +02002802#ifdef MADV_MERGEABLE
Marcelo Tosattic9027602010-03-01 20:25:08 -03002803 madvise(new_block->host, size, MADV_MERGEABLE);
Izik Eidusccb167e2009-10-08 16:39:39 +02002804#endif
Marcelo Tosattic9027602010-03-01 20:25:08 -03002805 }
pbrook94a6b542009-04-11 17:15:54 +00002806 new_block->offset = last_ram_offset;
2807 new_block->length = size;
2808
2809 new_block->next = ram_blocks;
2810 ram_blocks = new_block;
2811
2812 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2813 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2814 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2815 0xff, size >> TARGET_PAGE_BITS);
2816
2817 last_ram_offset += size;
2818
Jan Kiszka6f0437e2009-04-26 18:03:40 +02002819 if (kvm_enabled())
2820 kvm_setup_guest_memory(new_block->host, size);
2821
pbrook94a6b542009-04-11 17:15:54 +00002822 return new_block->offset;
2823}
bellarde9a1ab12007-02-08 23:08:38 +00002824
Anthony Liguoric227f092009-10-01 16:12:16 -05002825void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002826{
pbrook94a6b542009-04-11 17:15:54 +00002827 /* TODO: implement this. */
bellarde9a1ab12007-02-08 23:08:38 +00002828}
2829
pbrookdc828ca2009-04-09 22:21:07 +00002830/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002831 With the exception of the softmmu code in this file, this should
2832 only be used for local memory (e.g. video ram) that the device owns,
2833 and knows it isn't going to access beyond the end of the block.
2834
2835 It should not be used for general purpose DMA.
2836 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2837 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002838void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002839{
pbrook94a6b542009-04-11 17:15:54 +00002840 RAMBlock *prev;
2841 RAMBlock **prevp;
2842 RAMBlock *block;
2843
pbrook94a6b542009-04-11 17:15:54 +00002844 prev = NULL;
2845 prevp = &ram_blocks;
2846 block = ram_blocks;
2847 while (block && (block->offset > addr
2848 || block->offset + block->length <= addr)) {
2849 if (prev)
2850 prevp = &prev->next;
2851 prev = block;
2852 block = block->next;
2853 }
2854 if (!block) {
2855 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2856 abort();
2857 }
2858 /* Move this entry to to start of the list. */
2859 if (prev) {
2860 prev->next = block->next;
2861 block->next = *prevp;
2862 *prevp = block;
2863 }
2864 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00002865}
2866
pbrook5579c7f2009-04-11 14:47:08 +00002867/* Some of the softmmu routines need to translate from a host pointer
2868 (typically a TLB entry) back to a ram offset. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002869ram_addr_t qemu_ram_addr_from_host(void *ptr)
pbrook5579c7f2009-04-11 14:47:08 +00002870{
pbrook94a6b542009-04-11 17:15:54 +00002871 RAMBlock *block;
2872 uint8_t *host = ptr;
2873
pbrook94a6b542009-04-11 17:15:54 +00002874 block = ram_blocks;
2875 while (block && (block->host > host
2876 || block->host + block->length <= host)) {
pbrook94a6b542009-04-11 17:15:54 +00002877 block = block->next;
2878 }
2879 if (!block) {
2880 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2881 abort();
2882 }
2883 return block->offset + (host - block->host);
pbrook5579c7f2009-04-11 14:47:08 +00002884}
2885
Anthony Liguoric227f092009-10-01 16:12:16 -05002886static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00002887{
pbrook67d3b952006-12-18 05:03:52 +00002888#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002889 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002890#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002891#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002892 do_unassigned_access(addr, 0, 0, 0, 1);
2893#endif
2894 return 0;
2895}
2896
Anthony Liguoric227f092009-10-01 16:12:16 -05002897static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002898{
2899#ifdef DEBUG_UNASSIGNED
2900 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2901#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002902#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002903 do_unassigned_access(addr, 0, 0, 0, 2);
2904#endif
2905 return 0;
2906}
2907
Anthony Liguoric227f092009-10-01 16:12:16 -05002908static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002909{
2910#ifdef DEBUG_UNASSIGNED
2911 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2912#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002913#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002914 do_unassigned_access(addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002915#endif
bellard33417e72003-08-10 21:47:01 +00002916 return 0;
2917}
2918
Anthony Liguoric227f092009-10-01 16:12:16 -05002919static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00002920{
pbrook67d3b952006-12-18 05:03:52 +00002921#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002922 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00002923#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002924#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002925 do_unassigned_access(addr, 1, 0, 0, 1);
2926#endif
2927}
2928
Anthony Liguoric227f092009-10-01 16:12:16 -05002929static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002930{
2931#ifdef DEBUG_UNASSIGNED
2932 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2933#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002934#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002935 do_unassigned_access(addr, 1, 0, 0, 2);
2936#endif
2937}
2938
Anthony Liguoric227f092009-10-01 16:12:16 -05002939static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002940{
2941#ifdef DEBUG_UNASSIGNED
2942 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2943#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002944#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002945 do_unassigned_access(addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002946#endif
bellard33417e72003-08-10 21:47:01 +00002947}
2948
Blue Swirld60efc62009-08-25 18:29:31 +00002949static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00002950 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00002951 unassigned_mem_readw,
2952 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00002953};
2954
Blue Swirld60efc62009-08-25 18:29:31 +00002955static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00002956 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00002957 unassigned_mem_writew,
2958 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00002959};
2960
Anthony Liguoric227f092009-10-01 16:12:16 -05002961static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002962 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002963{
bellard3a7d9292005-08-21 09:26:42 +00002964 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002965 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002966 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2967#if !defined(CONFIG_USER_ONLY)
2968 tb_invalidate_phys_page_fast(ram_addr, 1);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002969 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002970#endif
2971 }
pbrook5579c7f2009-04-11 14:47:08 +00002972 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002973 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002974 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002975 /* we remove the notdirty callback only if the code has been
2976 flushed */
2977 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002978 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002979}
2980
Anthony Liguoric227f092009-10-01 16:12:16 -05002981static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002982 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002983{
bellard3a7d9292005-08-21 09:26:42 +00002984 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002985 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002986 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2987#if !defined(CONFIG_USER_ONLY)
2988 tb_invalidate_phys_page_fast(ram_addr, 2);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002989 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002990#endif
2991 }
pbrook5579c7f2009-04-11 14:47:08 +00002992 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002993 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002994 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002995 /* we remove the notdirty callback only if the code has been
2996 flushed */
2997 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002998 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002999}
3000
Anthony Liguoric227f092009-10-01 16:12:16 -05003001static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003002 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003003{
bellard3a7d9292005-08-21 09:26:42 +00003004 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003005 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003006 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3007#if !defined(CONFIG_USER_ONLY)
3008 tb_invalidate_phys_page_fast(ram_addr, 4);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003009 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003010#endif
3011 }
pbrook5579c7f2009-04-11 14:47:08 +00003012 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003013 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003014 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003015 /* we remove the notdirty callback only if the code has been
3016 flushed */
3017 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003018 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003019}
3020
Blue Swirld60efc62009-08-25 18:29:31 +00003021static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00003022 NULL, /* never used */
3023 NULL, /* never used */
3024 NULL, /* never used */
3025};
3026
Blue Swirld60efc62009-08-25 18:29:31 +00003027static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00003028 notdirty_mem_writeb,
3029 notdirty_mem_writew,
3030 notdirty_mem_writel,
3031};
3032
pbrook0f459d12008-06-09 00:20:13 +00003033/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003034static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003035{
3036 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003037 target_ulong pc, cs_base;
3038 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003039 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003040 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003041 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003042
aliguori06d55cc2008-11-18 20:24:06 +00003043 if (env->watchpoint_hit) {
3044 /* We re-entered the check after replacing the TB. Now raise
3045 * the debug interrupt so that is will trigger after the
3046 * current instruction. */
3047 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3048 return;
3049 }
pbrook2e70f6e2008-06-29 01:03:05 +00003050 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003051 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003052 if ((vaddr == (wp->vaddr & len_mask) ||
3053 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003054 wp->flags |= BP_WATCHPOINT_HIT;
3055 if (!env->watchpoint_hit) {
3056 env->watchpoint_hit = wp;
3057 tb = tb_find_pc(env->mem_io_pc);
3058 if (!tb) {
3059 cpu_abort(env, "check_watchpoint: could not find TB for "
3060 "pc=%p", (void *)env->mem_io_pc);
3061 }
3062 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3063 tb_phys_invalidate(tb, -1);
3064 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3065 env->exception_index = EXCP_DEBUG;
3066 } else {
3067 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3068 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3069 }
3070 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003071 }
aliguori6e140f22008-11-18 20:37:55 +00003072 } else {
3073 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003074 }
3075 }
3076}
3077
pbrook6658ffb2007-03-16 23:58:11 +00003078/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3079 so these check for a hit then pass through to the normal out-of-line
3080 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003081static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003082{
aliguorib4051332008-11-18 20:14:20 +00003083 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003084 return ldub_phys(addr);
3085}
3086
Anthony Liguoric227f092009-10-01 16:12:16 -05003087static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003088{
aliguorib4051332008-11-18 20:14:20 +00003089 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003090 return lduw_phys(addr);
3091}
3092
Anthony Liguoric227f092009-10-01 16:12:16 -05003093static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003094{
aliguorib4051332008-11-18 20:14:20 +00003095 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003096 return ldl_phys(addr);
3097}
3098
Anthony Liguoric227f092009-10-01 16:12:16 -05003099static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003100 uint32_t val)
3101{
aliguorib4051332008-11-18 20:14:20 +00003102 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003103 stb_phys(addr, val);
3104}
3105
Anthony Liguoric227f092009-10-01 16:12:16 -05003106static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003107 uint32_t val)
3108{
aliguorib4051332008-11-18 20:14:20 +00003109 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003110 stw_phys(addr, val);
3111}
3112
Anthony Liguoric227f092009-10-01 16:12:16 -05003113static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003114 uint32_t val)
3115{
aliguorib4051332008-11-18 20:14:20 +00003116 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003117 stl_phys(addr, val);
3118}
3119
Blue Swirld60efc62009-08-25 18:29:31 +00003120static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003121 watch_mem_readb,
3122 watch_mem_readw,
3123 watch_mem_readl,
3124};
3125
Blue Swirld60efc62009-08-25 18:29:31 +00003126static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003127 watch_mem_writeb,
3128 watch_mem_writew,
3129 watch_mem_writel,
3130};
pbrook6658ffb2007-03-16 23:58:11 +00003131
Richard Hendersonf6405242010-04-22 16:47:31 -07003132static inline uint32_t subpage_readlen (subpage_t *mmio,
3133 target_phys_addr_t addr,
3134 unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003135{
Richard Hendersonf6405242010-04-22 16:47:31 -07003136 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003137#if defined(DEBUG_SUBPAGE)
3138 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3139 mmio, len, addr, idx);
3140#endif
blueswir1db7b5422007-05-26 17:36:03 +00003141
Richard Hendersonf6405242010-04-22 16:47:31 -07003142 addr += mmio->region_offset[idx];
3143 idx = mmio->sub_io_index[idx];
3144 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
blueswir1db7b5422007-05-26 17:36:03 +00003145}
3146
Anthony Liguoric227f092009-10-01 16:12:16 -05003147static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
Richard Hendersonf6405242010-04-22 16:47:31 -07003148 uint32_t value, unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003149{
Richard Hendersonf6405242010-04-22 16:47:31 -07003150 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003151#if defined(DEBUG_SUBPAGE)
Richard Hendersonf6405242010-04-22 16:47:31 -07003152 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3153 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003154#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003155
3156 addr += mmio->region_offset[idx];
3157 idx = mmio->sub_io_index[idx];
3158 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00003159}
3160
Anthony Liguoric227f092009-10-01 16:12:16 -05003161static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003162{
blueswir1db7b5422007-05-26 17:36:03 +00003163 return subpage_readlen(opaque, addr, 0);
3164}
3165
Anthony Liguoric227f092009-10-01 16:12:16 -05003166static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003167 uint32_t value)
3168{
blueswir1db7b5422007-05-26 17:36:03 +00003169 subpage_writelen(opaque, addr, value, 0);
3170}
3171
Anthony Liguoric227f092009-10-01 16:12:16 -05003172static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003173{
blueswir1db7b5422007-05-26 17:36:03 +00003174 return subpage_readlen(opaque, addr, 1);
3175}
3176
Anthony Liguoric227f092009-10-01 16:12:16 -05003177static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003178 uint32_t value)
3179{
blueswir1db7b5422007-05-26 17:36:03 +00003180 subpage_writelen(opaque, addr, value, 1);
3181}
3182
Anthony Liguoric227f092009-10-01 16:12:16 -05003183static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003184{
blueswir1db7b5422007-05-26 17:36:03 +00003185 return subpage_readlen(opaque, addr, 2);
3186}
3187
Richard Hendersonf6405242010-04-22 16:47:31 -07003188static void subpage_writel (void *opaque, target_phys_addr_t addr,
3189 uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003190{
blueswir1db7b5422007-05-26 17:36:03 +00003191 subpage_writelen(opaque, addr, value, 2);
3192}
3193
Blue Swirld60efc62009-08-25 18:29:31 +00003194static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003195 &subpage_readb,
3196 &subpage_readw,
3197 &subpage_readl,
3198};
3199
Blue Swirld60efc62009-08-25 18:29:31 +00003200static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003201 &subpage_writeb,
3202 &subpage_writew,
3203 &subpage_writel,
3204};
3205
Anthony Liguoric227f092009-10-01 16:12:16 -05003206static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3207 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003208{
3209 int idx, eidx;
3210
3211 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3212 return -1;
3213 idx = SUBPAGE_IDX(start);
3214 eidx = SUBPAGE_IDX(end);
3215#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003216 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003217 mmio, start, end, idx, eidx, memory);
3218#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003219 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003220 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003221 mmio->sub_io_index[idx] = memory;
3222 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003223 }
3224
3225 return 0;
3226}
3227
Richard Hendersonf6405242010-04-22 16:47:31 -07003228static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3229 ram_addr_t orig_memory,
3230 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003231{
Anthony Liguoric227f092009-10-01 16:12:16 -05003232 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003233 int subpage_memory;
3234
Anthony Liguoric227f092009-10-01 16:12:16 -05003235 mmio = qemu_mallocz(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003236
3237 mmio->base = base;
Avi Kivity1eed09c2009-06-14 11:38:51 +03003238 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
blueswir1db7b5422007-05-26 17:36:03 +00003239#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003240 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3241 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003242#endif
aliguori1eec6142009-02-05 22:06:18 +00003243 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003244 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003245
3246 return mmio;
3247}
3248
aliguori88715652009-02-11 15:20:58 +00003249static int get_free_io_mem_idx(void)
3250{
3251 int i;
3252
3253 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3254 if (!io_mem_used[i]) {
3255 io_mem_used[i] = 1;
3256 return i;
3257 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003258 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003259 return -1;
3260}
3261
bellard33417e72003-08-10 21:47:01 +00003262/* mem_read and mem_write are arrays of functions containing the
3263 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003264 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003265 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003266 modified. If it is zero, a new io zone is allocated. The return
3267 value can be used with cpu_register_physical_memory(). (-1) is
3268 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003269static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003270 CPUReadMemoryFunc * const *mem_read,
3271 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03003272 void *opaque)
bellard33417e72003-08-10 21:47:01 +00003273{
Richard Henderson3cab7212010-05-07 09:52:51 -07003274 int i;
3275
bellard33417e72003-08-10 21:47:01 +00003276 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003277 io_index = get_free_io_mem_idx();
3278 if (io_index == -1)
3279 return io_index;
bellard33417e72003-08-10 21:47:01 +00003280 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003281 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003282 if (io_index >= IO_MEM_NB_ENTRIES)
3283 return -1;
3284 }
bellardb5ff1b32005-11-26 10:38:39 +00003285
Richard Henderson3cab7212010-05-07 09:52:51 -07003286 for (i = 0; i < 3; ++i) {
3287 io_mem_read[io_index][i]
3288 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3289 }
3290 for (i = 0; i < 3; ++i) {
3291 io_mem_write[io_index][i]
3292 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3293 }
bellarda4193c82004-06-03 14:01:43 +00003294 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003295
3296 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003297}
bellard61382a52003-10-27 21:22:23 +00003298
Blue Swirld60efc62009-08-25 18:29:31 +00003299int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3300 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03003301 void *opaque)
3302{
3303 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3304}
3305
aliguori88715652009-02-11 15:20:58 +00003306void cpu_unregister_io_memory(int io_table_address)
3307{
3308 int i;
3309 int io_index = io_table_address >> IO_MEM_SHIFT;
3310
3311 for (i=0;i < 3; i++) {
3312 io_mem_read[io_index][i] = unassigned_mem_read[i];
3313 io_mem_write[io_index][i] = unassigned_mem_write[i];
3314 }
3315 io_mem_opaque[io_index] = NULL;
3316 io_mem_used[io_index] = 0;
3317}
3318
Avi Kivitye9179ce2009-06-14 11:38:52 +03003319static void io_mem_init(void)
3320{
3321 int i;
3322
3323 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3324 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3325 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3326 for (i=0; i<5; i++)
3327 io_mem_used[i] = 1;
3328
3329 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3330 watch_mem_write, NULL);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003331}
3332
pbrooke2eef172008-06-08 01:09:01 +00003333#endif /* !defined(CONFIG_USER_ONLY) */
3334
bellard13eb76e2004-01-24 15:23:36 +00003335/* physical memory access (slow version, mainly for debug) */
3336#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003337int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3338 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003339{
3340 int l, flags;
3341 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003342 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003343
3344 while (len > 0) {
3345 page = addr & TARGET_PAGE_MASK;
3346 l = (page + TARGET_PAGE_SIZE) - addr;
3347 if (l > len)
3348 l = len;
3349 flags = page_get_flags(page);
3350 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003351 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003352 if (is_write) {
3353 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003354 return -1;
bellard579a97f2007-11-11 14:26:47 +00003355 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003356 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003357 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003358 memcpy(p, buf, l);
3359 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003360 } else {
3361 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003362 return -1;
bellard579a97f2007-11-11 14:26:47 +00003363 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003364 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003365 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003366 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003367 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003368 }
3369 len -= l;
3370 buf += l;
3371 addr += l;
3372 }
Paul Brooka68fe892010-03-01 00:08:59 +00003373 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003374}
bellard8df1cd02005-01-28 22:37:22 +00003375
bellard13eb76e2004-01-24 15:23:36 +00003376#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003377void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003378 int len, int is_write)
3379{
3380 int l, io_index;
3381 uint8_t *ptr;
3382 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003383 target_phys_addr_t page;
bellard2e126692004-04-25 21:28:44 +00003384 unsigned long pd;
bellard92e873b2004-05-21 14:52:29 +00003385 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003386
bellard13eb76e2004-01-24 15:23:36 +00003387 while (len > 0) {
3388 page = addr & TARGET_PAGE_MASK;
3389 l = (page + TARGET_PAGE_SIZE) - addr;
3390 if (l > len)
3391 l = len;
bellard92e873b2004-05-21 14:52:29 +00003392 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003393 if (!p) {
3394 pd = IO_MEM_UNASSIGNED;
3395 } else {
3396 pd = p->phys_offset;
3397 }
ths3b46e622007-09-17 08:09:54 +00003398
bellard13eb76e2004-01-24 15:23:36 +00003399 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003400 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003401 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003402 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003403 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003404 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003405 /* XXX: could force cpu_single_env to NULL to avoid
3406 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003407 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003408 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003409 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003410 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003411 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003412 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003413 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003414 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003415 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003416 l = 2;
3417 } else {
bellard1c213d12005-09-03 10:49:04 +00003418 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003419 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003420 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003421 l = 1;
3422 }
3423 } else {
bellardb448f2f2004-02-25 23:24:04 +00003424 unsigned long addr1;
3425 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003426 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003427 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003428 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003429 if (!cpu_physical_memory_is_dirty(addr1)) {
3430 /* invalidate code */
3431 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3432 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003433 cpu_physical_memory_set_dirty_flags(
3434 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003435 }
bellard13eb76e2004-01-24 15:23:36 +00003436 }
3437 } else {
ths5fafdf22007-09-16 21:08:06 +00003438 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003439 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003440 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003441 /* I/O case */
3442 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003443 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003444 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3445 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003446 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003447 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003448 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003449 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003450 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003451 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003452 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003453 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003454 l = 2;
3455 } else {
bellard1c213d12005-09-03 10:49:04 +00003456 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003457 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003458 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003459 l = 1;
3460 }
3461 } else {
3462 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003463 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard13eb76e2004-01-24 15:23:36 +00003464 (addr & ~TARGET_PAGE_MASK);
3465 memcpy(buf, ptr, l);
3466 }
3467 }
3468 len -= l;
3469 buf += l;
3470 addr += l;
3471 }
3472}
bellard8df1cd02005-01-28 22:37:22 +00003473
bellardd0ecd2a2006-04-23 17:14:48 +00003474/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003475void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003476 const uint8_t *buf, int len)
3477{
3478 int l;
3479 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003480 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003481 unsigned long pd;
3482 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003483
bellardd0ecd2a2006-04-23 17:14:48 +00003484 while (len > 0) {
3485 page = addr & TARGET_PAGE_MASK;
3486 l = (page + TARGET_PAGE_SIZE) - addr;
3487 if (l > len)
3488 l = len;
3489 p = phys_page_find(page >> TARGET_PAGE_BITS);
3490 if (!p) {
3491 pd = IO_MEM_UNASSIGNED;
3492 } else {
3493 pd = p->phys_offset;
3494 }
ths3b46e622007-09-17 08:09:54 +00003495
bellardd0ecd2a2006-04-23 17:14:48 +00003496 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00003497 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3498 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003499 /* do nothing */
3500 } else {
3501 unsigned long addr1;
3502 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3503 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003504 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003505 memcpy(ptr, buf, l);
3506 }
3507 len -= l;
3508 buf += l;
3509 addr += l;
3510 }
3511}
3512
aliguori6d16c2f2009-01-22 16:59:11 +00003513typedef struct {
3514 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003515 target_phys_addr_t addr;
3516 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003517} BounceBuffer;
3518
3519static BounceBuffer bounce;
3520
aliguoriba223c22009-01-22 16:59:16 +00003521typedef struct MapClient {
3522 void *opaque;
3523 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003524 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003525} MapClient;
3526
Blue Swirl72cf2d42009-09-12 07:36:22 +00003527static QLIST_HEAD(map_client_list, MapClient) map_client_list
3528 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003529
3530void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3531{
3532 MapClient *client = qemu_malloc(sizeof(*client));
3533
3534 client->opaque = opaque;
3535 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003536 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003537 return client;
3538}
3539
3540void cpu_unregister_map_client(void *_client)
3541{
3542 MapClient *client = (MapClient *)_client;
3543
Blue Swirl72cf2d42009-09-12 07:36:22 +00003544 QLIST_REMOVE(client, link);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003545 qemu_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003546}
3547
3548static void cpu_notify_map_clients(void)
3549{
3550 MapClient *client;
3551
Blue Swirl72cf2d42009-09-12 07:36:22 +00003552 while (!QLIST_EMPTY(&map_client_list)) {
3553 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003554 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003555 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003556 }
3557}
3558
aliguori6d16c2f2009-01-22 16:59:11 +00003559/* Map a physical memory region into a host virtual address.
3560 * May map a subset of the requested range, given by and returned in *plen.
3561 * May return NULL if resources needed to perform the mapping are exhausted.
3562 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003563 * Use cpu_register_map_client() to know when retrying the map operation is
3564 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003565 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003566void *cpu_physical_memory_map(target_phys_addr_t addr,
3567 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003568 int is_write)
3569{
Anthony Liguoric227f092009-10-01 16:12:16 -05003570 target_phys_addr_t len = *plen;
3571 target_phys_addr_t done = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003572 int l;
3573 uint8_t *ret = NULL;
3574 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003575 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003576 unsigned long pd;
3577 PhysPageDesc *p;
3578 unsigned long addr1;
3579
3580 while (len > 0) {
3581 page = addr & TARGET_PAGE_MASK;
3582 l = (page + TARGET_PAGE_SIZE) - addr;
3583 if (l > len)
3584 l = len;
3585 p = phys_page_find(page >> TARGET_PAGE_BITS);
3586 if (!p) {
3587 pd = IO_MEM_UNASSIGNED;
3588 } else {
3589 pd = p->phys_offset;
3590 }
3591
3592 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3593 if (done || bounce.buffer) {
3594 break;
3595 }
3596 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3597 bounce.addr = addr;
3598 bounce.len = l;
3599 if (!is_write) {
3600 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3601 }
3602 ptr = bounce.buffer;
3603 } else {
3604 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003605 ptr = qemu_get_ram_ptr(addr1);
aliguori6d16c2f2009-01-22 16:59:11 +00003606 }
3607 if (!done) {
3608 ret = ptr;
3609 } else if (ret + done != ptr) {
3610 break;
3611 }
3612
3613 len -= l;
3614 addr += l;
3615 done += l;
3616 }
3617 *plen = done;
3618 return ret;
3619}
3620
3621/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3622 * Will also mark the memory as dirty if is_write == 1. access_len gives
3623 * the amount of memory that was actually read or written by the caller.
3624 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003625void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3626 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003627{
3628 if (buffer != bounce.buffer) {
3629 if (is_write) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003630 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003631 while (access_len) {
3632 unsigned l;
3633 l = TARGET_PAGE_SIZE;
3634 if (l > access_len)
3635 l = access_len;
3636 if (!cpu_physical_memory_is_dirty(addr1)) {
3637 /* invalidate code */
3638 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3639 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003640 cpu_physical_memory_set_dirty_flags(
3641 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003642 }
3643 addr1 += l;
3644 access_len -= l;
3645 }
3646 }
3647 return;
3648 }
3649 if (is_write) {
3650 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3651 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003652 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003653 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003654 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003655}
bellardd0ecd2a2006-04-23 17:14:48 +00003656
bellard8df1cd02005-01-28 22:37:22 +00003657/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003658uint32_t ldl_phys(target_phys_addr_t addr)
bellard8df1cd02005-01-28 22:37:22 +00003659{
3660 int io_index;
3661 uint8_t *ptr;
3662 uint32_t val;
3663 unsigned long pd;
3664 PhysPageDesc *p;
3665
3666 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3667 if (!p) {
3668 pd = IO_MEM_UNASSIGNED;
3669 } else {
3670 pd = p->phys_offset;
3671 }
ths3b46e622007-09-17 08:09:54 +00003672
ths5fafdf22007-09-16 21:08:06 +00003673 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003674 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00003675 /* I/O case */
3676 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003677 if (p)
3678 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003679 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3680 } else {
3681 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003682 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00003683 (addr & ~TARGET_PAGE_MASK);
3684 val = ldl_p(ptr);
3685 }
3686 return val;
3687}
3688
bellard84b7b8e2005-11-28 21:19:04 +00003689/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003690uint64_t ldq_phys(target_phys_addr_t addr)
bellard84b7b8e2005-11-28 21:19:04 +00003691{
3692 int io_index;
3693 uint8_t *ptr;
3694 uint64_t val;
3695 unsigned long pd;
3696 PhysPageDesc *p;
3697
3698 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3699 if (!p) {
3700 pd = IO_MEM_UNASSIGNED;
3701 } else {
3702 pd = p->phys_offset;
3703 }
ths3b46e622007-09-17 08:09:54 +00003704
bellard2a4188a2006-06-25 21:54:59 +00003705 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3706 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00003707 /* I/O case */
3708 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003709 if (p)
3710 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard84b7b8e2005-11-28 21:19:04 +00003711#ifdef TARGET_WORDS_BIGENDIAN
3712 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3713 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3714#else
3715 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3716 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3717#endif
3718 } else {
3719 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003720 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00003721 (addr & ~TARGET_PAGE_MASK);
3722 val = ldq_p(ptr);
3723 }
3724 return val;
3725}
3726
bellardaab33092005-10-30 20:48:42 +00003727/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003728uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003729{
3730 uint8_t val;
3731 cpu_physical_memory_read(addr, &val, 1);
3732 return val;
3733}
3734
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003735/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003736uint32_t lduw_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003737{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003738 int io_index;
3739 uint8_t *ptr;
3740 uint64_t val;
3741 unsigned long pd;
3742 PhysPageDesc *p;
3743
3744 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3745 if (!p) {
3746 pd = IO_MEM_UNASSIGNED;
3747 } else {
3748 pd = p->phys_offset;
3749 }
3750
3751 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3752 !(pd & IO_MEM_ROMD)) {
3753 /* I/O case */
3754 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3755 if (p)
3756 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3757 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3758 } else {
3759 /* RAM case */
3760 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3761 (addr & ~TARGET_PAGE_MASK);
3762 val = lduw_p(ptr);
3763 }
3764 return val;
bellardaab33092005-10-30 20:48:42 +00003765}
3766
bellard8df1cd02005-01-28 22:37:22 +00003767/* warning: addr must be aligned. The ram page is not masked as dirty
3768 and the code inside is not invalidated. It is useful if the dirty
3769 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003770void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003771{
3772 int io_index;
3773 uint8_t *ptr;
3774 unsigned long pd;
3775 PhysPageDesc *p;
3776
3777 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3778 if (!p) {
3779 pd = IO_MEM_UNASSIGNED;
3780 } else {
3781 pd = p->phys_offset;
3782 }
ths3b46e622007-09-17 08:09:54 +00003783
bellard3a7d9292005-08-21 09:26:42 +00003784 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003785 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003786 if (p)
3787 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003788 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3789 } else {
aliguori74576192008-10-06 14:02:03 +00003790 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003791 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003792 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003793
3794 if (unlikely(in_migration)) {
3795 if (!cpu_physical_memory_is_dirty(addr1)) {
3796 /* invalidate code */
3797 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3798 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003799 cpu_physical_memory_set_dirty_flags(
3800 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003801 }
3802 }
bellard8df1cd02005-01-28 22:37:22 +00003803 }
3804}
3805
Anthony Liguoric227f092009-10-01 16:12:16 -05003806void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003807{
3808 int io_index;
3809 uint8_t *ptr;
3810 unsigned long pd;
3811 PhysPageDesc *p;
3812
3813 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3814 if (!p) {
3815 pd = IO_MEM_UNASSIGNED;
3816 } else {
3817 pd = p->phys_offset;
3818 }
ths3b46e622007-09-17 08:09:54 +00003819
j_mayerbc98a7e2007-04-04 07:55:12 +00003820 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3821 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003822 if (p)
3823 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00003824#ifdef TARGET_WORDS_BIGENDIAN
3825 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3826 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3827#else
3828 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3829 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3830#endif
3831 } else {
pbrook5579c7f2009-04-11 14:47:08 +00003832 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00003833 (addr & ~TARGET_PAGE_MASK);
3834 stq_p(ptr, val);
3835 }
3836}
3837
bellard8df1cd02005-01-28 22:37:22 +00003838/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003839void stl_phys(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003840{
3841 int io_index;
3842 uint8_t *ptr;
3843 unsigned long pd;
3844 PhysPageDesc *p;
3845
3846 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3847 if (!p) {
3848 pd = IO_MEM_UNASSIGNED;
3849 } else {
3850 pd = p->phys_offset;
3851 }
ths3b46e622007-09-17 08:09:54 +00003852
bellard3a7d9292005-08-21 09:26:42 +00003853 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003854 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003855 if (p)
3856 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003857 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3858 } else {
3859 unsigned long addr1;
3860 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3861 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003862 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003863 stl_p(ptr, val);
bellard3a7d9292005-08-21 09:26:42 +00003864 if (!cpu_physical_memory_is_dirty(addr1)) {
3865 /* invalidate code */
3866 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3867 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003868 cpu_physical_memory_set_dirty_flags(addr1,
3869 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003870 }
bellard8df1cd02005-01-28 22:37:22 +00003871 }
3872}
3873
bellardaab33092005-10-30 20:48:42 +00003874/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003875void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003876{
3877 uint8_t v = val;
3878 cpu_physical_memory_write(addr, &v, 1);
3879}
3880
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003881/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003882void stw_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003883{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003884 int io_index;
3885 uint8_t *ptr;
3886 unsigned long pd;
3887 PhysPageDesc *p;
3888
3889 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3890 if (!p) {
3891 pd = IO_MEM_UNASSIGNED;
3892 } else {
3893 pd = p->phys_offset;
3894 }
3895
3896 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3897 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3898 if (p)
3899 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3900 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3901 } else {
3902 unsigned long addr1;
3903 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3904 /* RAM case */
3905 ptr = qemu_get_ram_ptr(addr1);
3906 stw_p(ptr, val);
3907 if (!cpu_physical_memory_is_dirty(addr1)) {
3908 /* invalidate code */
3909 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
3910 /* set dirty bit */
3911 cpu_physical_memory_set_dirty_flags(addr1,
3912 (0xff & ~CODE_DIRTY_FLAG));
3913 }
3914 }
bellardaab33092005-10-30 20:48:42 +00003915}
3916
3917/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003918void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003919{
3920 val = tswap64(val);
3921 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3922}
3923
aliguori5e2972f2009-03-28 17:51:36 +00003924/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00003925int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003926 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003927{
3928 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003929 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003930 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003931
3932 while (len > 0) {
3933 page = addr & TARGET_PAGE_MASK;
3934 phys_addr = cpu_get_phys_page_debug(env, page);
3935 /* if no physical page mapped, return an error */
3936 if (phys_addr == -1)
3937 return -1;
3938 l = (page + TARGET_PAGE_SIZE) - addr;
3939 if (l > len)
3940 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003941 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00003942 if (is_write)
3943 cpu_physical_memory_write_rom(phys_addr, buf, l);
3944 else
aliguori5e2972f2009-03-28 17:51:36 +00003945 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00003946 len -= l;
3947 buf += l;
3948 addr += l;
3949 }
3950 return 0;
3951}
Paul Brooka68fe892010-03-01 00:08:59 +00003952#endif
bellard13eb76e2004-01-24 15:23:36 +00003953
pbrook2e70f6e2008-06-29 01:03:05 +00003954/* in deterministic execution mode, instructions doing device I/Os
3955 must be at the end of the TB */
3956void cpu_io_recompile(CPUState *env, void *retaddr)
3957{
3958 TranslationBlock *tb;
3959 uint32_t n, cflags;
3960 target_ulong pc, cs_base;
3961 uint64_t flags;
3962
3963 tb = tb_find_pc((unsigned long)retaddr);
3964 if (!tb) {
3965 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3966 retaddr);
3967 }
3968 n = env->icount_decr.u16.low + tb->icount;
3969 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3970 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00003971 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00003972 n = n - env->icount_decr.u16.low;
3973 /* Generate a new TB ending on the I/O insn. */
3974 n++;
3975 /* On MIPS and SH, delay slot instructions can only be restarted if
3976 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00003977 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00003978 branch. */
3979#if defined(TARGET_MIPS)
3980 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3981 env->active_tc.PC -= 4;
3982 env->icount_decr.u16.low++;
3983 env->hflags &= ~MIPS_HFLAG_BMASK;
3984 }
3985#elif defined(TARGET_SH4)
3986 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3987 && n > 1) {
3988 env->pc -= 2;
3989 env->icount_decr.u16.low++;
3990 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3991 }
3992#endif
3993 /* This should never happen. */
3994 if (n > CF_COUNT_MASK)
3995 cpu_abort(env, "TB too big during recompile");
3996
3997 cflags = n | CF_LAST_IO;
3998 pc = tb->pc;
3999 cs_base = tb->cs_base;
4000 flags = tb->flags;
4001 tb_phys_invalidate(tb, -1);
4002 /* FIXME: In theory this could raise an exception. In practice
4003 we have already translated the block once so it's probably ok. */
4004 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004005 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004006 the first in the TB) then we end up generating a whole new TB and
4007 repeating the fault, which is horribly inefficient.
4008 Better would be to execute just this insn uncached, or generate a
4009 second new TB. */
4010 cpu_resume_from_signal(env, NULL);
4011}
4012
Paul Brookb3755a92010-03-12 16:54:58 +00004013#if !defined(CONFIG_USER_ONLY)
4014
bellarde3db7222005-01-26 22:00:47 +00004015void dump_exec_info(FILE *f,
4016 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4017{
4018 int i, target_code_size, max_target_code_size;
4019 int direct_jmp_count, direct_jmp2_count, cross_page;
4020 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004021
bellarde3db7222005-01-26 22:00:47 +00004022 target_code_size = 0;
4023 max_target_code_size = 0;
4024 cross_page = 0;
4025 direct_jmp_count = 0;
4026 direct_jmp2_count = 0;
4027 for(i = 0; i < nb_tbs; i++) {
4028 tb = &tbs[i];
4029 target_code_size += tb->size;
4030 if (tb->size > max_target_code_size)
4031 max_target_code_size = tb->size;
4032 if (tb->page_addr[1] != -1)
4033 cross_page++;
4034 if (tb->tb_next_offset[0] != 0xffff) {
4035 direct_jmp_count++;
4036 if (tb->tb_next_offset[1] != 0xffff) {
4037 direct_jmp2_count++;
4038 }
4039 }
4040 }
4041 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004042 cpu_fprintf(f, "Translation buffer state:\n");
bellard26a5f132008-05-28 12:30:31 +00004043 cpu_fprintf(f, "gen code size %ld/%ld\n",
4044 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4045 cpu_fprintf(f, "TB count %d/%d\n",
4046 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004047 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004048 nb_tbs ? target_code_size / nb_tbs : 0,
4049 max_target_code_size);
ths5fafdf22007-09-16 21:08:06 +00004050 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004051 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4052 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004053 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4054 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004055 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4056 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004057 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004058 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4059 direct_jmp2_count,
4060 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004061 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004062 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4063 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4064 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004065 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004066}
4067
bellard61382a52003-10-27 21:22:23 +00004068#define MMUSUFFIX _cmmu
4069#define GETPC() NULL
4070#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004071#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004072
4073#define SHIFT 0
4074#include "softmmu_template.h"
4075
4076#define SHIFT 1
4077#include "softmmu_template.h"
4078
4079#define SHIFT 2
4080#include "softmmu_template.h"
4081
4082#define SHIFT 3
4083#include "softmmu_template.h"
4084
4085#undef env
4086
4087#endif