blob: 723ace48402e8c379136b7c6a834504ca4655582 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
29#include "exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000030#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000031#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000033#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000034#include "kvm.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
pbrook53a59602006-03-25 19:31:22 +000036#if defined(CONFIG_USER_ONLY)
37#include <qemu.h>
Riku Voipiofd052bf2010-01-25 14:30:49 +020038#include <signal.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010039#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40#include <sys/param.h>
41#if __FreeBSD_version >= 700104
42#define HAVE_KINFO_GETVMMAP
43#define sigqueue sigqueue_freebsd /* avoid redefinition */
44#include <sys/time.h>
45#include <sys/proc.h>
46#include <machine/profile.h>
47#define _KERNEL
48#include <sys/user.h>
49#undef _KERNEL
50#undef sigqueue
51#include <libutil.h>
52#endif
53#endif
pbrook53a59602006-03-25 19:31:22 +000054#endif
bellard54936002003-05-13 00:25:15 +000055
bellardfd6ce8f2003-05-14 19:00:11 +000056//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000057//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000058//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000059//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000060
61/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000062//#define DEBUG_TB_CHECK
63//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000064
ths1196be32007-03-17 15:17:58 +000065//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000066//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000067
pbrook99773bd2006-04-16 15:14:59 +000068#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
bellard9fa3e852004-01-04 18:06:42 +000073#define SMC_BITMAP_USE_THRESHOLD 10
74
blueswir1bdaf78e2008-10-04 07:24:27 +000075static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020076static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000077TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000078static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000079/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050080spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000081
blueswir1141ac462008-07-26 15:05:57 +000082#if defined(__arm__) || defined(__sparc_v9__)
83/* The prologue must be reachable with a direct jump. ARM and Sparc64
84 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000085 section close to code segment. */
86#define code_gen_section \
87 __attribute__((__section__(".gen_code"))) \
88 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020089#elif defined(_WIN32)
90/* Maximum alignment for Win32 is 16. */
91#define code_gen_section \
92 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000093#else
94#define code_gen_section \
95 __attribute__((aligned (32)))
96#endif
97
98uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +000099static uint8_t *code_gen_buffer;
100static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000101/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000102static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200103static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000104
pbrooke2eef172008-06-08 01:09:01 +0000105#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000106int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000107static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000108
Alex Williamsonf471a172010-06-11 11:11:42 -0600109RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
pbrooke2eef172008-06-08 01:09:01 +0000110#endif
bellard9fa3e852004-01-04 18:06:42 +0000111
bellard6a00d602005-11-21 23:25:50 +0000112CPUState *first_cpu;
113/* current CPU in the current thread. It is only valid inside
114 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000115CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000116/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000117 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000118 2 = Adaptive rate instruction counting. */
119int use_icount = 0;
120/* Current instruction counter. While executing translated code this may
121 include some instructions that have not yet been executed. */
122int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000123
bellard54936002003-05-13 00:25:15 +0000124typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000125 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000126 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000127 /* in order to optimize self modifying code, we count the number
128 of lookups we do to a given page to use a bitmap */
129 unsigned int code_write_count;
130 uint8_t *code_bitmap;
131#if defined(CONFIG_USER_ONLY)
132 unsigned long flags;
133#endif
bellard54936002003-05-13 00:25:15 +0000134} PageDesc;
135
Paul Brook41c1b1c2010-03-12 16:54:58 +0000136/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800137 while in user mode we want it to be based on virtual addresses. */
138#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000139#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
140# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
141#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800142# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000143#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000144#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800145# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000146#endif
bellard54936002003-05-13 00:25:15 +0000147
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148/* Size of the L2 (and L3, etc) page tables. */
149#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000150#define L2_SIZE (1 << L2_BITS)
151
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800152/* The bits remaining after N lower levels of page tables. */
153#define P_L1_BITS_REM \
154 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155#define V_L1_BITS_REM \
156 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157
158/* Size of the L1 page table. Avoid silly small sizes. */
159#if P_L1_BITS_REM < 4
160#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
161#else
162#define P_L1_BITS P_L1_BITS_REM
163#endif
164
165#if V_L1_BITS_REM < 4
166#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
167#else
168#define V_L1_BITS V_L1_BITS_REM
169#endif
170
171#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
172#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
173
174#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
175#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
176
bellard83fb7ad2004-07-05 21:25:26 +0000177unsigned long qemu_real_host_page_size;
178unsigned long qemu_host_page_bits;
179unsigned long qemu_host_page_size;
180unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000181
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800182/* This is a multi-level map on the virtual address space.
183 The bottom level has pointers to PageDesc. */
184static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000185
pbrooke2eef172008-06-08 01:09:01 +0000186#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000187typedef struct PhysPageDesc {
188 /* offset in host memory of the page + io_index in the low bits */
189 ram_addr_t phys_offset;
190 ram_addr_t region_offset;
191} PhysPageDesc;
192
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800193/* This is a multi-level map on the physical address space.
194 The bottom level has pointers to PhysPageDesc. */
195static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000196
pbrooke2eef172008-06-08 01:09:01 +0000197static void io_mem_init(void);
198
bellard33417e72003-08-10 21:47:01 +0000199/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000200CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
201CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000202void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000203static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000204static int io_mem_watch;
205#endif
bellard33417e72003-08-10 21:47:01 +0000206
bellard34865132003-10-05 14:28:56 +0000207/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200208#ifdef WIN32
209static const char *logfilename = "qemu.log";
210#else
blueswir1d9b630f2008-10-05 09:57:08 +0000211static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200212#endif
bellard34865132003-10-05 14:28:56 +0000213FILE *logfile;
214int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000215static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000216
bellarde3db7222005-01-26 22:00:47 +0000217/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000218#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000219static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000220#endif
bellarde3db7222005-01-26 22:00:47 +0000221static int tb_flush_count;
222static int tb_phys_invalidate_count;
223
bellard7cb69ca2008-05-10 10:55:51 +0000224#ifdef _WIN32
225static void map_exec(void *addr, long size)
226{
227 DWORD old_protect;
228 VirtualProtect(addr, size,
229 PAGE_EXECUTE_READWRITE, &old_protect);
230
231}
232#else
233static void map_exec(void *addr, long size)
234{
bellard43694152008-05-29 09:35:57 +0000235 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000236
bellard43694152008-05-29 09:35:57 +0000237 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000238 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000239 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000240
241 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000242 end += page_size - 1;
243 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000244
245 mprotect((void *)start, end - start,
246 PROT_READ | PROT_WRITE | PROT_EXEC);
247}
248#endif
249
bellardb346ff42003-06-15 20:05:50 +0000250static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000251{
bellard83fb7ad2004-07-05 21:25:26 +0000252 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000253 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000254#ifdef _WIN32
255 {
256 SYSTEM_INFO system_info;
257
258 GetSystemInfo(&system_info);
259 qemu_real_host_page_size = system_info.dwPageSize;
260 }
261#else
262 qemu_real_host_page_size = getpagesize();
263#endif
bellard83fb7ad2004-07-05 21:25:26 +0000264 if (qemu_host_page_size == 0)
265 qemu_host_page_size = qemu_real_host_page_size;
266 if (qemu_host_page_size < TARGET_PAGE_SIZE)
267 qemu_host_page_size = TARGET_PAGE_SIZE;
268 qemu_host_page_bits = 0;
269 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
270 qemu_host_page_bits++;
271 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000272
Paul Brook2e9a5712010-05-05 16:32:59 +0100273#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000274 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100275#ifdef HAVE_KINFO_GETVMMAP
276 struct kinfo_vmentry *freep;
277 int i, cnt;
278
279 freep = kinfo_getvmmap(getpid(), &cnt);
280 if (freep) {
281 mmap_lock();
282 for (i = 0; i < cnt; i++) {
283 unsigned long startaddr, endaddr;
284
285 startaddr = freep[i].kve_start;
286 endaddr = freep[i].kve_end;
287 if (h2g_valid(startaddr)) {
288 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
289
290 if (h2g_valid(endaddr)) {
291 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200292 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100293 } else {
294#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
295 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200296 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100297#endif
298 }
299 }
300 }
301 free(freep);
302 mmap_unlock();
303 }
304#else
balrog50a95692007-12-12 01:16:23 +0000305 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000306
pbrook07765902008-05-31 16:33:53 +0000307 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800308
Aurelien Jarnofd436902010-04-10 17:20:36 +0200309 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000310 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800311 mmap_lock();
312
balrog50a95692007-12-12 01:16:23 +0000313 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800314 unsigned long startaddr, endaddr;
315 int n;
316
317 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
318
319 if (n == 2 && h2g_valid(startaddr)) {
320 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
321
322 if (h2g_valid(endaddr)) {
323 endaddr = h2g(endaddr);
324 } else {
325 endaddr = ~0ul;
326 }
327 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000328 }
329 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800330
balrog50a95692007-12-12 01:16:23 +0000331 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800332 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000333 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100334#endif
balrog50a95692007-12-12 01:16:23 +0000335 }
336#endif
bellard54936002003-05-13 00:25:15 +0000337}
338
Paul Brook41c1b1c2010-03-12 16:54:58 +0000339static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000340{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000341 PageDesc *pd;
342 void **lp;
343 int i;
344
pbrook17e23772008-06-09 13:47:45 +0000345#if defined(CONFIG_USER_ONLY)
Paul Brook2e9a5712010-05-05 16:32:59 +0100346 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800347# define ALLOC(P, SIZE) \
348 do { \
349 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
350 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800351 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000352#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353# define ALLOC(P, SIZE) \
354 do { P = qemu_mallocz(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000355#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800356
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800357 /* Level 1. Always allocated. */
358 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
359
360 /* Level 2..N-1. */
361 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
362 void **p = *lp;
363
364 if (p == NULL) {
365 if (!alloc) {
366 return NULL;
367 }
368 ALLOC(p, sizeof(void *) * L2_SIZE);
369 *lp = p;
370 }
371
372 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000373 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800374
375 pd = *lp;
376 if (pd == NULL) {
377 if (!alloc) {
378 return NULL;
379 }
380 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
381 *lp = pd;
382 }
383
384#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800385
386 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000387}
388
Paul Brook41c1b1c2010-03-12 16:54:58 +0000389static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000390{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800391 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000392}
393
Paul Brook6d9a1302010-02-28 23:55:53 +0000394#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500395static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000396{
pbrooke3f4e2a2006-04-08 20:02:06 +0000397 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800398 void **lp;
399 int i;
bellard92e873b2004-05-21 14:52:29 +0000400
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800401 /* Level 1. Always allocated. */
402 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000403
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800404 /* Level 2..N-1. */
405 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
406 void **p = *lp;
407 if (p == NULL) {
408 if (!alloc) {
409 return NULL;
410 }
411 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
412 }
413 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000414 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800415
pbrooke3f4e2a2006-04-08 20:02:06 +0000416 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800417 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000418 int i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800419
420 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000421 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800422 }
423
424 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
425
pbrook67c4d232009-02-23 13:16:07 +0000426 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800427 pd[i].phys_offset = IO_MEM_UNASSIGNED;
428 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000429 }
bellard92e873b2004-05-21 14:52:29 +0000430 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800431
432 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000433}
434
Anthony Liguoric227f092009-10-01 16:12:16 -0500435static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000436{
bellard108c49b2005-07-24 12:55:09 +0000437 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000438}
439
Anthony Liguoric227f092009-10-01 16:12:16 -0500440static void tlb_protect_code(ram_addr_t ram_addr);
441static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000442 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000443#define mmap_lock() do { } while(0)
444#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000445#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000446
bellard43694152008-05-29 09:35:57 +0000447#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
448
449#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100450/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000451 user mode. It will change when a dedicated libc will be used */
452#define USE_STATIC_CODE_GEN_BUFFER
453#endif
454
455#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200456static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
457 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000458#endif
459
blueswir18fcd3692008-08-17 20:26:25 +0000460static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000461{
bellard43694152008-05-29 09:35:57 +0000462#ifdef USE_STATIC_CODE_GEN_BUFFER
463 code_gen_buffer = static_code_gen_buffer;
464 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
465 map_exec(code_gen_buffer, code_gen_buffer_size);
466#else
bellard26a5f132008-05-28 12:30:31 +0000467 code_gen_buffer_size = tb_size;
468 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000469#if defined(CONFIG_USER_ONLY)
470 /* in user mode, phys_ram_size is not meaningful */
471 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
472#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100473 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000474 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000475#endif
bellard26a5f132008-05-28 12:30:31 +0000476 }
477 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
478 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
479 /* The code gen buffer location may have constraints depending on
480 the host cpu and OS */
481#if defined(__linux__)
482 {
483 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000484 void *start = NULL;
485
bellard26a5f132008-05-28 12:30:31 +0000486 flags = MAP_PRIVATE | MAP_ANONYMOUS;
487#if defined(__x86_64__)
488 flags |= MAP_32BIT;
489 /* Cannot map more than that */
490 if (code_gen_buffer_size > (800 * 1024 * 1024))
491 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000492#elif defined(__sparc_v9__)
493 // Map the buffer below 2G, so we can use direct calls and branches
494 flags |= MAP_FIXED;
495 start = (void *) 0x60000000UL;
496 if (code_gen_buffer_size > (512 * 1024 * 1024))
497 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000498#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000499 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000500 flags |= MAP_FIXED;
501 start = (void *) 0x01000000UL;
502 if (code_gen_buffer_size > 16 * 1024 * 1024)
503 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700504#elif defined(__s390x__)
505 /* Map the buffer so that we can use direct calls and branches. */
506 /* We have a +- 4GB range on the branches; leave some slop. */
507 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
508 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
509 }
510 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000511#endif
blueswir1141ac462008-07-26 15:05:57 +0000512 code_gen_buffer = mmap(start, code_gen_buffer_size,
513 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000514 flags, -1, 0);
515 if (code_gen_buffer == MAP_FAILED) {
516 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
517 exit(1);
518 }
519 }
Bradcbb608a2010-12-20 21:25:40 -0500520#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
521 || defined(__DragonFly__) || defined(__OpenBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000522 {
523 int flags;
524 void *addr = NULL;
525 flags = MAP_PRIVATE | MAP_ANONYMOUS;
526#if defined(__x86_64__)
527 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
528 * 0x40000000 is free */
529 flags |= MAP_FIXED;
530 addr = (void *)0x40000000;
531 /* Cannot map more than that */
532 if (code_gen_buffer_size > (800 * 1024 * 1024))
533 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000534#elif defined(__sparc_v9__)
535 // Map the buffer below 2G, so we can use direct calls and branches
536 flags |= MAP_FIXED;
537 addr = (void *) 0x60000000UL;
538 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
539 code_gen_buffer_size = (512 * 1024 * 1024);
540 }
aliguori06e67a82008-09-27 15:32:41 +0000541#endif
542 code_gen_buffer = mmap(addr, code_gen_buffer_size,
543 PROT_WRITE | PROT_READ | PROT_EXEC,
544 flags, -1, 0);
545 if (code_gen_buffer == MAP_FAILED) {
546 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
547 exit(1);
548 }
549 }
bellard26a5f132008-05-28 12:30:31 +0000550#else
551 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000552 map_exec(code_gen_buffer, code_gen_buffer_size);
553#endif
bellard43694152008-05-29 09:35:57 +0000554#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000555 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
556 code_gen_buffer_max_size = code_gen_buffer_size -
Aurelien Jarno239fda32010-06-03 19:29:31 +0200557 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000558 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
559 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
560}
561
562/* Must be called before using the QEMU cpus. 'tb_size' is the size
563 (in bytes) allocated to the translation buffer. Zero means default
564 size. */
565void cpu_exec_init_all(unsigned long tb_size)
566{
bellard26a5f132008-05-28 12:30:31 +0000567 cpu_gen_init();
568 code_gen_alloc(tb_size);
569 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000570 page_init();
pbrooke2eef172008-06-08 01:09:01 +0000571#if !defined(CONFIG_USER_ONLY)
bellard26a5f132008-05-28 12:30:31 +0000572 io_mem_init();
pbrooke2eef172008-06-08 01:09:01 +0000573#endif
Richard Henderson9002ec72010-05-06 08:50:41 -0700574#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
575 /* There's no guest base to take into account, so go ahead and
576 initialize the prologue now. */
577 tcg_prologue_init(&tcg_ctx);
578#endif
bellard26a5f132008-05-28 12:30:31 +0000579}
580
pbrook9656f322008-07-01 20:01:19 +0000581#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
582
Juan Quintelae59fb372009-09-29 22:48:21 +0200583static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200584{
585 CPUState *env = opaque;
586
aurel323098dba2009-03-07 21:28:24 +0000587 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
588 version_id is increased. */
589 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000590 tlb_flush(env, 1);
591
592 return 0;
593}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200594
595static const VMStateDescription vmstate_cpu_common = {
596 .name = "cpu_common",
597 .version_id = 1,
598 .minimum_version_id = 1,
599 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200600 .post_load = cpu_common_post_load,
601 .fields = (VMStateField []) {
602 VMSTATE_UINT32(halted, CPUState),
603 VMSTATE_UINT32(interrupt_request, CPUState),
604 VMSTATE_END_OF_LIST()
605 }
606};
pbrook9656f322008-07-01 20:01:19 +0000607#endif
608
Glauber Costa950f1472009-06-09 12:15:18 -0400609CPUState *qemu_get_cpu(int cpu)
610{
611 CPUState *env = first_cpu;
612
613 while (env) {
614 if (env->cpu_index == cpu)
615 break;
616 env = env->next_cpu;
617 }
618
619 return env;
620}
621
bellard6a00d602005-11-21 23:25:50 +0000622void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000623{
bellard6a00d602005-11-21 23:25:50 +0000624 CPUState **penv;
625 int cpu_index;
626
pbrookc2764712009-03-07 15:24:59 +0000627#if defined(CONFIG_USER_ONLY)
628 cpu_list_lock();
629#endif
bellard6a00d602005-11-21 23:25:50 +0000630 env->next_cpu = NULL;
631 penv = &first_cpu;
632 cpu_index = 0;
633 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700634 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000635 cpu_index++;
636 }
637 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000638 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000639 QTAILQ_INIT(&env->breakpoints);
640 QTAILQ_INIT(&env->watchpoints);
bellard6a00d602005-11-21 23:25:50 +0000641 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000642#if defined(CONFIG_USER_ONLY)
643 cpu_list_unlock();
644#endif
pbrookb3c77242008-06-30 16:31:04 +0000645#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600646 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
647 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000648 cpu_save, cpu_load, env);
649#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000650}
651
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100652/* Allocate a new translation block. Flush the translation buffer if
653 too many translation blocks or too much generated code. */
654static TranslationBlock *tb_alloc(target_ulong pc)
655{
656 TranslationBlock *tb;
657
658 if (nb_tbs >= code_gen_max_blocks ||
659 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
660 return NULL;
661 tb = &tbs[nb_tbs++];
662 tb->pc = pc;
663 tb->cflags = 0;
664 return tb;
665}
666
667void tb_free(TranslationBlock *tb)
668{
669 /* In practice this is mostly used for single use temporary TB
670 Ignore the hard cases and just back up if this TB happens to
671 be the last one generated. */
672 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
673 code_gen_ptr = tb->tc_ptr;
674 nb_tbs--;
675 }
676}
677
bellard9fa3e852004-01-04 18:06:42 +0000678static inline void invalidate_page_bitmap(PageDesc *p)
679{
680 if (p->code_bitmap) {
bellard59817cc2004-02-16 22:01:13 +0000681 qemu_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000682 p->code_bitmap = NULL;
683 }
684 p->code_write_count = 0;
685}
686
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800687/* Set to NULL all the 'first_tb' fields in all PageDescs. */
688
689static void page_flush_tb_1 (int level, void **lp)
690{
691 int i;
692
693 if (*lp == NULL) {
694 return;
695 }
696 if (level == 0) {
697 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000698 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800699 pd[i].first_tb = NULL;
700 invalidate_page_bitmap(pd + i);
701 }
702 } else {
703 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000704 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800705 page_flush_tb_1 (level - 1, pp + i);
706 }
707 }
708}
709
bellardfd6ce8f2003-05-14 19:00:11 +0000710static void page_flush_tb(void)
711{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800712 int i;
713 for (i = 0; i < V_L1_SIZE; i++) {
714 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000715 }
716}
717
718/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000719/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000720void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000721{
bellard6a00d602005-11-21 23:25:50 +0000722 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000723#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000724 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
725 (unsigned long)(code_gen_ptr - code_gen_buffer),
726 nb_tbs, nb_tbs > 0 ?
727 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000728#endif
bellard26a5f132008-05-28 12:30:31 +0000729 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000730 cpu_abort(env1, "Internal error: code buffer overflow\n");
731
bellardfd6ce8f2003-05-14 19:00:11 +0000732 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000733
bellard6a00d602005-11-21 23:25:50 +0000734 for(env = first_cpu; env != NULL; env = env->next_cpu) {
735 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
736 }
bellard9fa3e852004-01-04 18:06:42 +0000737
bellard8a8a6082004-10-03 13:36:49 +0000738 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000739 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000740
bellardfd6ce8f2003-05-14 19:00:11 +0000741 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000742 /* XXX: flush processor icache at this point if cache flush is
743 expensive */
bellarde3db7222005-01-26 22:00:47 +0000744 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000745}
746
747#ifdef DEBUG_TB_CHECK
748
j_mayerbc98a7e2007-04-04 07:55:12 +0000749static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000750{
751 TranslationBlock *tb;
752 int i;
753 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000754 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
755 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000756 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
757 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000758 printf("ERROR invalidate: address=" TARGET_FMT_lx
759 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000760 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000761 }
762 }
763 }
764}
765
766/* verify that all the pages have correct rights for code */
767static void tb_page_check(void)
768{
769 TranslationBlock *tb;
770 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000771
pbrook99773bd2006-04-16 15:14:59 +0000772 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
773 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000774 flags1 = page_get_flags(tb->pc);
775 flags2 = page_get_flags(tb->pc + tb->size - 1);
776 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
777 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000778 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000779 }
780 }
781 }
782}
783
784#endif
785
786/* invalidate one TB */
787static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
788 int next_offset)
789{
790 TranslationBlock *tb1;
791 for(;;) {
792 tb1 = *ptb;
793 if (tb1 == tb) {
794 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
795 break;
796 }
797 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
798 }
799}
800
bellard9fa3e852004-01-04 18:06:42 +0000801static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
802{
803 TranslationBlock *tb1;
804 unsigned int n1;
805
806 for(;;) {
807 tb1 = *ptb;
808 n1 = (long)tb1 & 3;
809 tb1 = (TranslationBlock *)((long)tb1 & ~3);
810 if (tb1 == tb) {
811 *ptb = tb1->page_next[n1];
812 break;
813 }
814 ptb = &tb1->page_next[n1];
815 }
816}
817
bellardd4e81642003-05-25 16:46:15 +0000818static inline void tb_jmp_remove(TranslationBlock *tb, int n)
819{
820 TranslationBlock *tb1, **ptb;
821 unsigned int n1;
822
823 ptb = &tb->jmp_next[n];
824 tb1 = *ptb;
825 if (tb1) {
826 /* find tb(n) in circular list */
827 for(;;) {
828 tb1 = *ptb;
829 n1 = (long)tb1 & 3;
830 tb1 = (TranslationBlock *)((long)tb1 & ~3);
831 if (n1 == n && tb1 == tb)
832 break;
833 if (n1 == 2) {
834 ptb = &tb1->jmp_first;
835 } else {
836 ptb = &tb1->jmp_next[n1];
837 }
838 }
839 /* now we can suppress tb(n) from the list */
840 *ptb = tb->jmp_next[n];
841
842 tb->jmp_next[n] = NULL;
843 }
844}
845
846/* reset the jump entry 'n' of a TB so that it is not chained to
847 another TB */
848static inline void tb_reset_jump(TranslationBlock *tb, int n)
849{
850 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
851}
852
Paul Brook41c1b1c2010-03-12 16:54:58 +0000853void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000854{
bellard6a00d602005-11-21 23:25:50 +0000855 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000856 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000857 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000858 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000859 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000860
bellard9fa3e852004-01-04 18:06:42 +0000861 /* remove the TB from the hash list */
862 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
863 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000864 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000865 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000866
bellard9fa3e852004-01-04 18:06:42 +0000867 /* remove the TB from the page list */
868 if (tb->page_addr[0] != page_addr) {
869 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
870 tb_page_remove(&p->first_tb, tb);
871 invalidate_page_bitmap(p);
872 }
873 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
874 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
875 tb_page_remove(&p->first_tb, tb);
876 invalidate_page_bitmap(p);
877 }
878
bellard8a40a182005-11-20 10:35:40 +0000879 tb_invalidated_flag = 1;
880
881 /* remove the TB from the hash list */
882 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000883 for(env = first_cpu; env != NULL; env = env->next_cpu) {
884 if (env->tb_jmp_cache[h] == tb)
885 env->tb_jmp_cache[h] = NULL;
886 }
bellard8a40a182005-11-20 10:35:40 +0000887
888 /* suppress this TB from the two jump lists */
889 tb_jmp_remove(tb, 0);
890 tb_jmp_remove(tb, 1);
891
892 /* suppress any remaining jumps to this TB */
893 tb1 = tb->jmp_first;
894 for(;;) {
895 n1 = (long)tb1 & 3;
896 if (n1 == 2)
897 break;
898 tb1 = (TranslationBlock *)((long)tb1 & ~3);
899 tb2 = tb1->jmp_next[n1];
900 tb_reset_jump(tb1, n1);
901 tb1->jmp_next[n1] = NULL;
902 tb1 = tb2;
903 }
904 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
905
bellarde3db7222005-01-26 22:00:47 +0000906 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000907}
908
909static inline void set_bits(uint8_t *tab, int start, int len)
910{
911 int end, mask, end1;
912
913 end = start + len;
914 tab += start >> 3;
915 mask = 0xff << (start & 7);
916 if ((start & ~7) == (end & ~7)) {
917 if (start < end) {
918 mask &= ~(0xff << (end & 7));
919 *tab |= mask;
920 }
921 } else {
922 *tab++ |= mask;
923 start = (start + 8) & ~7;
924 end1 = end & ~7;
925 while (start < end1) {
926 *tab++ = 0xff;
927 start += 8;
928 }
929 if (start < end) {
930 mask = ~(0xff << (end & 7));
931 *tab |= mask;
932 }
933 }
934}
935
936static void build_page_bitmap(PageDesc *p)
937{
938 int n, tb_start, tb_end;
939 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000940
pbrookb2a70812008-06-09 13:57:23 +0000941 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000942
943 tb = p->first_tb;
944 while (tb != NULL) {
945 n = (long)tb & 3;
946 tb = (TranslationBlock *)((long)tb & ~3);
947 /* NOTE: this is subtle as a TB may span two physical pages */
948 if (n == 0) {
949 /* NOTE: tb_end may be after the end of the page, but
950 it is not a problem */
951 tb_start = tb->pc & ~TARGET_PAGE_MASK;
952 tb_end = tb_start + tb->size;
953 if (tb_end > TARGET_PAGE_SIZE)
954 tb_end = TARGET_PAGE_SIZE;
955 } else {
956 tb_start = 0;
957 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
958 }
959 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
960 tb = tb->page_next[n];
961 }
962}
963
pbrook2e70f6e2008-06-29 01:03:05 +0000964TranslationBlock *tb_gen_code(CPUState *env,
965 target_ulong pc, target_ulong cs_base,
966 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000967{
968 TranslationBlock *tb;
969 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000970 tb_page_addr_t phys_pc, phys_page2;
971 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000972 int code_gen_size;
973
Paul Brook41c1b1c2010-03-12 16:54:58 +0000974 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000975 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000976 if (!tb) {
977 /* flush must be done */
978 tb_flush(env);
979 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000980 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000981 /* Don't forget to invalidate previous TB info. */
982 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000983 }
984 tc_ptr = code_gen_ptr;
985 tb->tc_ptr = tc_ptr;
986 tb->cs_base = cs_base;
987 tb->flags = flags;
988 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +0000989 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +0000990 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +0000991
bellardd720b932004-04-25 17:57:43 +0000992 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +0000993 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +0000994 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +0000995 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +0000996 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +0000997 }
Paul Brook41c1b1c2010-03-12 16:54:58 +0000998 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +0000999 return tb;
bellardd720b932004-04-25 17:57:43 +00001000}
ths3b46e622007-09-17 08:09:54 +00001001
bellard9fa3e852004-01-04 18:06:42 +00001002/* invalidate all TBs which intersect with the target physical page
1003 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001004 the same physical page. 'is_cpu_write_access' should be true if called
1005 from a real cpu write access: the virtual CPU will exit the current
1006 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001007void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001008 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001009{
aliguori6b917542008-11-18 19:46:41 +00001010 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001011 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001012 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001013 PageDesc *p;
1014 int n;
1015#ifdef TARGET_HAS_PRECISE_SMC
1016 int current_tb_not_found = is_cpu_write_access;
1017 TranslationBlock *current_tb = NULL;
1018 int current_tb_modified = 0;
1019 target_ulong current_pc = 0;
1020 target_ulong current_cs_base = 0;
1021 int current_flags = 0;
1022#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001023
1024 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001025 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001026 return;
ths5fafdf22007-09-16 21:08:06 +00001027 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001028 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1029 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001030 /* build code bitmap */
1031 build_page_bitmap(p);
1032 }
1033
1034 /* we remove all the TBs in the range [start, end[ */
1035 /* XXX: see if in some cases it could be faster to invalidate all the code */
1036 tb = p->first_tb;
1037 while (tb != NULL) {
1038 n = (long)tb & 3;
1039 tb = (TranslationBlock *)((long)tb & ~3);
1040 tb_next = tb->page_next[n];
1041 /* NOTE: this is subtle as a TB may span two physical pages */
1042 if (n == 0) {
1043 /* NOTE: tb_end may be after the end of the page, but
1044 it is not a problem */
1045 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1046 tb_end = tb_start + tb->size;
1047 } else {
1048 tb_start = tb->page_addr[1];
1049 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1050 }
1051 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001052#ifdef TARGET_HAS_PRECISE_SMC
1053 if (current_tb_not_found) {
1054 current_tb_not_found = 0;
1055 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001056 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001057 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001058 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001059 }
1060 }
1061 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001062 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001063 /* If we are modifying the current TB, we must stop
1064 its execution. We could be more precise by checking
1065 that the modification is after the current PC, but it
1066 would require a specialized function to partially
1067 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001068
bellardd720b932004-04-25 17:57:43 +00001069 current_tb_modified = 1;
ths5fafdf22007-09-16 21:08:06 +00001070 cpu_restore_state(current_tb, env,
pbrook2e70f6e2008-06-29 01:03:05 +00001071 env->mem_io_pc, NULL);
aliguori6b917542008-11-18 19:46:41 +00001072 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1073 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001074 }
1075#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001076 /* we need to do that to handle the case where a signal
1077 occurs while doing tb_phys_invalidate() */
1078 saved_tb = NULL;
1079 if (env) {
1080 saved_tb = env->current_tb;
1081 env->current_tb = NULL;
1082 }
bellard9fa3e852004-01-04 18:06:42 +00001083 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001084 if (env) {
1085 env->current_tb = saved_tb;
1086 if (env->interrupt_request && env->current_tb)
1087 cpu_interrupt(env, env->interrupt_request);
1088 }
bellard9fa3e852004-01-04 18:06:42 +00001089 }
1090 tb = tb_next;
1091 }
1092#if !defined(CONFIG_USER_ONLY)
1093 /* if no code remaining, no need to continue to use slow writes */
1094 if (!p->first_tb) {
1095 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001096 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001097 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001098 }
1099 }
1100#endif
1101#ifdef TARGET_HAS_PRECISE_SMC
1102 if (current_tb_modified) {
1103 /* we generate a block containing just the instruction
1104 modifying the memory. It will ensure that it cannot modify
1105 itself */
bellardea1c1802004-06-14 18:56:36 +00001106 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001107 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001108 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001109 }
1110#endif
1111}
1112
1113/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001114static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001115{
1116 PageDesc *p;
1117 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001118#if 0
bellarda4193c82004-06-03 14:01:43 +00001119 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001120 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1121 cpu_single_env->mem_io_vaddr, len,
1122 cpu_single_env->eip,
1123 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001124 }
1125#endif
bellard9fa3e852004-01-04 18:06:42 +00001126 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001127 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001128 return;
1129 if (p->code_bitmap) {
1130 offset = start & ~TARGET_PAGE_MASK;
1131 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1132 if (b & ((1 << len) - 1))
1133 goto do_invalidate;
1134 } else {
1135 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001136 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001137 }
1138}
1139
bellard9fa3e852004-01-04 18:06:42 +00001140#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001141static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001142 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001143{
aliguori6b917542008-11-18 19:46:41 +00001144 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001145 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001146 int n;
bellardd720b932004-04-25 17:57:43 +00001147#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001148 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001149 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001150 int current_tb_modified = 0;
1151 target_ulong current_pc = 0;
1152 target_ulong current_cs_base = 0;
1153 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001154#endif
bellard9fa3e852004-01-04 18:06:42 +00001155
1156 addr &= TARGET_PAGE_MASK;
1157 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001158 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001159 return;
1160 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001161#ifdef TARGET_HAS_PRECISE_SMC
1162 if (tb && pc != 0) {
1163 current_tb = tb_find_pc(pc);
1164 }
1165#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001166 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001167 n = (long)tb & 3;
1168 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001169#ifdef TARGET_HAS_PRECISE_SMC
1170 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001171 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001172 /* If we are modifying the current TB, we must stop
1173 its execution. We could be more precise by checking
1174 that the modification is after the current PC, but it
1175 would require a specialized function to partially
1176 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001177
bellardd720b932004-04-25 17:57:43 +00001178 current_tb_modified = 1;
1179 cpu_restore_state(current_tb, env, pc, puc);
aliguori6b917542008-11-18 19:46:41 +00001180 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1181 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001182 }
1183#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001184 tb_phys_invalidate(tb, addr);
1185 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001186 }
1187 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001188#ifdef TARGET_HAS_PRECISE_SMC
1189 if (current_tb_modified) {
1190 /* we generate a block containing just the instruction
1191 modifying the memory. It will ensure that it cannot modify
1192 itself */
bellardea1c1802004-06-14 18:56:36 +00001193 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001194 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001195 cpu_resume_from_signal(env, puc);
1196 }
1197#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001198}
bellard9fa3e852004-01-04 18:06:42 +00001199#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001200
1201/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001202static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001203 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001204{
1205 PageDesc *p;
bellard9fa3e852004-01-04 18:06:42 +00001206 TranslationBlock *last_first_tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001207
bellard9fa3e852004-01-04 18:06:42 +00001208 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001209 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001210 tb->page_next[n] = p->first_tb;
1211 last_first_tb = p->first_tb;
1212 p->first_tb = (TranslationBlock *)((long)tb | n);
1213 invalidate_page_bitmap(p);
1214
bellard107db442004-06-22 18:48:46 +00001215#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001216
bellard9fa3e852004-01-04 18:06:42 +00001217#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001218 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001219 target_ulong addr;
1220 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001221 int prot;
1222
bellardfd6ce8f2003-05-14 19:00:11 +00001223 /* force the host page as non writable (writes will have a
1224 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001225 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001226 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001227 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1228 addr += TARGET_PAGE_SIZE) {
1229
1230 p2 = page_find (addr >> TARGET_PAGE_BITS);
1231 if (!p2)
1232 continue;
1233 prot |= p2->flags;
1234 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001235 }
ths5fafdf22007-09-16 21:08:06 +00001236 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001237 (prot & PAGE_BITS) & ~PAGE_WRITE);
1238#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001239 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001240 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001241#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001242 }
bellard9fa3e852004-01-04 18:06:42 +00001243#else
1244 /* if some code is already present, then the pages are already
1245 protected. So we handle the case where only the first TB is
1246 allocated in a physical page */
1247 if (!last_first_tb) {
bellard6a00d602005-11-21 23:25:50 +00001248 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001249 }
1250#endif
bellardd720b932004-04-25 17:57:43 +00001251
1252#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001253}
1254
bellard9fa3e852004-01-04 18:06:42 +00001255/* add a new TB and link it to the physical page tables. phys_page2 is
1256 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001257void tb_link_page(TranslationBlock *tb,
1258 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001259{
bellard9fa3e852004-01-04 18:06:42 +00001260 unsigned int h;
1261 TranslationBlock **ptb;
1262
pbrookc8a706f2008-06-02 16:16:42 +00001263 /* Grab the mmap lock to stop another thread invalidating this TB
1264 before we are done. */
1265 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001266 /* add in the physical hash table */
1267 h = tb_phys_hash_func(phys_pc);
1268 ptb = &tb_phys_hash[h];
1269 tb->phys_hash_next = *ptb;
1270 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001271
1272 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001273 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1274 if (phys_page2 != -1)
1275 tb_alloc_page(tb, 1, phys_page2);
1276 else
1277 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001278
bellardd4e81642003-05-25 16:46:15 +00001279 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1280 tb->jmp_next[0] = NULL;
1281 tb->jmp_next[1] = NULL;
1282
1283 /* init original jump addresses */
1284 if (tb->tb_next_offset[0] != 0xffff)
1285 tb_reset_jump(tb, 0);
1286 if (tb->tb_next_offset[1] != 0xffff)
1287 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001288
1289#ifdef DEBUG_TB_CHECK
1290 tb_page_check();
1291#endif
pbrookc8a706f2008-06-02 16:16:42 +00001292 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001293}
1294
bellarda513fe12003-05-27 23:29:48 +00001295/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1296 tb[1].tc_ptr. Return NULL if not found */
1297TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1298{
1299 int m_min, m_max, m;
1300 unsigned long v;
1301 TranslationBlock *tb;
1302
1303 if (nb_tbs <= 0)
1304 return NULL;
1305 if (tc_ptr < (unsigned long)code_gen_buffer ||
1306 tc_ptr >= (unsigned long)code_gen_ptr)
1307 return NULL;
1308 /* binary search (cf Knuth) */
1309 m_min = 0;
1310 m_max = nb_tbs - 1;
1311 while (m_min <= m_max) {
1312 m = (m_min + m_max) >> 1;
1313 tb = &tbs[m];
1314 v = (unsigned long)tb->tc_ptr;
1315 if (v == tc_ptr)
1316 return tb;
1317 else if (tc_ptr < v) {
1318 m_max = m - 1;
1319 } else {
1320 m_min = m + 1;
1321 }
ths5fafdf22007-09-16 21:08:06 +00001322 }
bellarda513fe12003-05-27 23:29:48 +00001323 return &tbs[m_max];
1324}
bellard75012672003-06-21 13:11:07 +00001325
bellardea041c02003-06-25 16:16:50 +00001326static void tb_reset_jump_recursive(TranslationBlock *tb);
1327
1328static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1329{
1330 TranslationBlock *tb1, *tb_next, **ptb;
1331 unsigned int n1;
1332
1333 tb1 = tb->jmp_next[n];
1334 if (tb1 != NULL) {
1335 /* find head of list */
1336 for(;;) {
1337 n1 = (long)tb1 & 3;
1338 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1339 if (n1 == 2)
1340 break;
1341 tb1 = tb1->jmp_next[n1];
1342 }
1343 /* we are now sure now that tb jumps to tb1 */
1344 tb_next = tb1;
1345
1346 /* remove tb from the jmp_first list */
1347 ptb = &tb_next->jmp_first;
1348 for(;;) {
1349 tb1 = *ptb;
1350 n1 = (long)tb1 & 3;
1351 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1352 if (n1 == n && tb1 == tb)
1353 break;
1354 ptb = &tb1->jmp_next[n1];
1355 }
1356 *ptb = tb->jmp_next[n];
1357 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001358
bellardea041c02003-06-25 16:16:50 +00001359 /* suppress the jump to next tb in generated code */
1360 tb_reset_jump(tb, n);
1361
bellard01243112004-01-04 15:48:17 +00001362 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001363 tb_reset_jump_recursive(tb_next);
1364 }
1365}
1366
1367static void tb_reset_jump_recursive(TranslationBlock *tb)
1368{
1369 tb_reset_jump_recursive2(tb, 0);
1370 tb_reset_jump_recursive2(tb, 1);
1371}
1372
bellard1fddef42005-04-17 19:16:13 +00001373#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001374#if defined(CONFIG_USER_ONLY)
1375static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1376{
1377 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1378}
1379#else
bellardd720b932004-04-25 17:57:43 +00001380static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1381{
Anthony Liguoric227f092009-10-01 16:12:16 -05001382 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001383 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001384 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001385 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001386
pbrookc2f07f82006-04-08 17:14:56 +00001387 addr = cpu_get_phys_page_debug(env, pc);
1388 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1389 if (!p) {
1390 pd = IO_MEM_UNASSIGNED;
1391 } else {
1392 pd = p->phys_offset;
1393 }
1394 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001395 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001396}
bellardc27004e2005-01-03 23:35:10 +00001397#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001398#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001399
Paul Brookc527ee82010-03-01 03:31:14 +00001400#if defined(CONFIG_USER_ONLY)
1401void cpu_watchpoint_remove_all(CPUState *env, int mask)
1402
1403{
1404}
1405
1406int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1407 int flags, CPUWatchpoint **watchpoint)
1408{
1409 return -ENOSYS;
1410}
1411#else
pbrook6658ffb2007-03-16 23:58:11 +00001412/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001413int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1414 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001415{
aliguorib4051332008-11-18 20:14:20 +00001416 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001417 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001418
aliguorib4051332008-11-18 20:14:20 +00001419 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1420 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1421 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1422 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1423 return -EINVAL;
1424 }
aliguoria1d1bb32008-11-18 20:07:32 +00001425 wp = qemu_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001426
aliguoria1d1bb32008-11-18 20:07:32 +00001427 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001428 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001429 wp->flags = flags;
1430
aliguori2dc9f412008-11-18 20:56:59 +00001431 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001432 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001433 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001434 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001435 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001436
pbrook6658ffb2007-03-16 23:58:11 +00001437 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001438
1439 if (watchpoint)
1440 *watchpoint = wp;
1441 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001442}
1443
aliguoria1d1bb32008-11-18 20:07:32 +00001444/* Remove a specific watchpoint. */
1445int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1446 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001447{
aliguorib4051332008-11-18 20:14:20 +00001448 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001449 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001450
Blue Swirl72cf2d42009-09-12 07:36:22 +00001451 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001452 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001453 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001454 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001455 return 0;
1456 }
1457 }
aliguoria1d1bb32008-11-18 20:07:32 +00001458 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001459}
1460
aliguoria1d1bb32008-11-18 20:07:32 +00001461/* Remove a specific watchpoint by reference. */
1462void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1463{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001464 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001465
aliguoria1d1bb32008-11-18 20:07:32 +00001466 tlb_flush_page(env, watchpoint->vaddr);
1467
1468 qemu_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001469}
1470
aliguoria1d1bb32008-11-18 20:07:32 +00001471/* Remove all matching watchpoints. */
1472void cpu_watchpoint_remove_all(CPUState *env, int mask)
1473{
aliguoric0ce9982008-11-25 22:13:57 +00001474 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001475
Blue Swirl72cf2d42009-09-12 07:36:22 +00001476 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001477 if (wp->flags & mask)
1478 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001479 }
aliguoria1d1bb32008-11-18 20:07:32 +00001480}
Paul Brookc527ee82010-03-01 03:31:14 +00001481#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001482
1483/* Add a breakpoint. */
1484int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1485 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001486{
bellard1fddef42005-04-17 19:16:13 +00001487#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001488 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001489
aliguoria1d1bb32008-11-18 20:07:32 +00001490 bp = qemu_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001491
1492 bp->pc = pc;
1493 bp->flags = flags;
1494
aliguori2dc9f412008-11-18 20:56:59 +00001495 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001496 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001497 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001498 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001499 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001500
1501 breakpoint_invalidate(env, pc);
1502
1503 if (breakpoint)
1504 *breakpoint = bp;
1505 return 0;
1506#else
1507 return -ENOSYS;
1508#endif
1509}
1510
1511/* Remove a specific breakpoint. */
1512int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1513{
1514#if defined(TARGET_HAS_ICE)
1515 CPUBreakpoint *bp;
1516
Blue Swirl72cf2d42009-09-12 07:36:22 +00001517 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001518 if (bp->pc == pc && bp->flags == flags) {
1519 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001520 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001521 }
bellard4c3a88a2003-07-26 12:06:08 +00001522 }
aliguoria1d1bb32008-11-18 20:07:32 +00001523 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001524#else
aliguoria1d1bb32008-11-18 20:07:32 +00001525 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001526#endif
1527}
1528
aliguoria1d1bb32008-11-18 20:07:32 +00001529/* Remove a specific breakpoint by reference. */
1530void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001531{
bellard1fddef42005-04-17 19:16:13 +00001532#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001533 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001534
aliguoria1d1bb32008-11-18 20:07:32 +00001535 breakpoint_invalidate(env, breakpoint->pc);
1536
1537 qemu_free(breakpoint);
1538#endif
1539}
1540
1541/* Remove all matching breakpoints. */
1542void cpu_breakpoint_remove_all(CPUState *env, int mask)
1543{
1544#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001545 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001546
Blue Swirl72cf2d42009-09-12 07:36:22 +00001547 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001548 if (bp->flags & mask)
1549 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001550 }
bellard4c3a88a2003-07-26 12:06:08 +00001551#endif
1552}
1553
bellardc33a3462003-07-29 20:50:33 +00001554/* enable or disable single step mode. EXCP_DEBUG is returned by the
1555 CPU loop after each instruction */
1556void cpu_single_step(CPUState *env, int enabled)
1557{
bellard1fddef42005-04-17 19:16:13 +00001558#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001559 if (env->singlestep_enabled != enabled) {
1560 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001561 if (kvm_enabled())
1562 kvm_update_guest_debug(env, 0);
1563 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001564 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001565 /* XXX: only flush what is necessary */
1566 tb_flush(env);
1567 }
bellardc33a3462003-07-29 20:50:33 +00001568 }
1569#endif
1570}
1571
bellard34865132003-10-05 14:28:56 +00001572/* enable or disable low levels log */
1573void cpu_set_log(int log_flags)
1574{
1575 loglevel = log_flags;
1576 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001577 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001578 if (!logfile) {
1579 perror(logfilename);
1580 _exit(1);
1581 }
bellard9fa3e852004-01-04 18:06:42 +00001582#if !defined(CONFIG_SOFTMMU)
1583 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1584 {
blueswir1b55266b2008-09-20 08:07:15 +00001585 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001586 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1587 }
Filip Navarabf65f532009-07-27 10:02:04 -05001588#elif !defined(_WIN32)
1589 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001590 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001591#endif
pbrooke735b912007-06-30 13:53:24 +00001592 log_append = 1;
1593 }
1594 if (!loglevel && logfile) {
1595 fclose(logfile);
1596 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001597 }
1598}
1599
1600void cpu_set_log_filename(const char *filename)
1601{
1602 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001603 if (logfile) {
1604 fclose(logfile);
1605 logfile = NULL;
1606 }
1607 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001608}
bellardc33a3462003-07-29 20:50:33 +00001609
aurel323098dba2009-03-07 21:28:24 +00001610static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001611{
pbrookd5975362008-06-07 20:50:51 +00001612 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1613 problem and hope the cpu will stop of its own accord. For userspace
1614 emulation this often isn't actually as bad as it sounds. Often
1615 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001616 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001617 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001618
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001619 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001620 tb = env->current_tb;
1621 /* if the cpu is currently executing code, we must unlink it and
1622 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001623 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001624 env->current_tb = NULL;
1625 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001626 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001627 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001628}
1629
1630/* mask must never be zero, except for A20 change call */
1631void cpu_interrupt(CPUState *env, int mask)
1632{
1633 int old_mask;
1634
1635 old_mask = env->interrupt_request;
1636 env->interrupt_request |= mask;
1637
aliguori8edac962009-04-24 18:03:45 +00001638#ifndef CONFIG_USER_ONLY
1639 /*
1640 * If called from iothread context, wake the target cpu in
1641 * case its halted.
1642 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001643 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001644 qemu_cpu_kick(env);
1645 return;
1646 }
1647#endif
1648
pbrook2e70f6e2008-06-29 01:03:05 +00001649 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001650 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001651#ifndef CONFIG_USER_ONLY
pbrook2e70f6e2008-06-29 01:03:05 +00001652 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001653 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001654 cpu_abort(env, "Raised interrupt while not in I/O function");
1655 }
1656#endif
1657 } else {
aurel323098dba2009-03-07 21:28:24 +00001658 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001659 }
1660}
1661
bellardb54ad042004-05-20 13:42:52 +00001662void cpu_reset_interrupt(CPUState *env, int mask)
1663{
1664 env->interrupt_request &= ~mask;
1665}
1666
aurel323098dba2009-03-07 21:28:24 +00001667void cpu_exit(CPUState *env)
1668{
1669 env->exit_request = 1;
1670 cpu_unlink_tb(env);
1671}
1672
blueswir1c7cd6a32008-10-02 18:27:46 +00001673const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001674 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001675 "show generated host assembly code for each compiled TB" },
1676 { CPU_LOG_TB_IN_ASM, "in_asm",
1677 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001678 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001679 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001680 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001681 "show micro ops "
1682#ifdef TARGET_I386
1683 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001684#endif
blueswir1e01a1152008-03-14 17:37:11 +00001685 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001686 { CPU_LOG_INT, "int",
1687 "show interrupts/exceptions in short format" },
1688 { CPU_LOG_EXEC, "exec",
1689 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001690 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001691 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001692#ifdef TARGET_I386
1693 { CPU_LOG_PCALL, "pcall",
1694 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001695 { CPU_LOG_RESET, "cpu_reset",
1696 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001697#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001698#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001699 { CPU_LOG_IOPORT, "ioport",
1700 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001701#endif
bellardf193c792004-03-21 17:06:25 +00001702 { 0, NULL, NULL },
1703};
1704
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001705#ifndef CONFIG_USER_ONLY
1706static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1707 = QLIST_HEAD_INITIALIZER(memory_client_list);
1708
1709static void cpu_notify_set_memory(target_phys_addr_t start_addr,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001710 ram_addr_t size,
1711 ram_addr_t phys_offset)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001712{
1713 CPUPhysMemoryClient *client;
1714 QLIST_FOREACH(client, &memory_client_list, list) {
1715 client->set_memory(client, start_addr, size, phys_offset);
1716 }
1717}
1718
1719static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001720 target_phys_addr_t end)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001721{
1722 CPUPhysMemoryClient *client;
1723 QLIST_FOREACH(client, &memory_client_list, list) {
1724 int r = client->sync_dirty_bitmap(client, start, end);
1725 if (r < 0)
1726 return r;
1727 }
1728 return 0;
1729}
1730
1731static int cpu_notify_migration_log(int enable)
1732{
1733 CPUPhysMemoryClient *client;
1734 QLIST_FOREACH(client, &memory_client_list, list) {
1735 int r = client->migration_log(client, enable);
1736 if (r < 0)
1737 return r;
1738 }
1739 return 0;
1740}
1741
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001742static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1743 int level, void **lp)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001744{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001745 int i;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001746
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001747 if (*lp == NULL) {
1748 return;
1749 }
1750 if (level == 0) {
1751 PhysPageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001752 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001753 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1754 client->set_memory(client, pd[i].region_offset,
1755 TARGET_PAGE_SIZE, pd[i].phys_offset);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001756 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001757 }
1758 } else {
1759 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001760 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001761 phys_page_for_each_1(client, level - 1, pp + i);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001762 }
1763 }
1764}
1765
1766static void phys_page_for_each(CPUPhysMemoryClient *client)
1767{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001768 int i;
1769 for (i = 0; i < P_L1_SIZE; ++i) {
1770 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1771 l1_phys_map + 1);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001772 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001773}
1774
1775void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1776{
1777 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1778 phys_page_for_each(client);
1779}
1780
1781void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1782{
1783 QLIST_REMOVE(client, list);
1784}
1785#endif
1786
bellardf193c792004-03-21 17:06:25 +00001787static int cmp1(const char *s1, int n, const char *s2)
1788{
1789 if (strlen(s2) != n)
1790 return 0;
1791 return memcmp(s1, s2, n) == 0;
1792}
ths3b46e622007-09-17 08:09:54 +00001793
bellardf193c792004-03-21 17:06:25 +00001794/* takes a comma separated list of log masks. Return 0 if error. */
1795int cpu_str_to_log_mask(const char *str)
1796{
blueswir1c7cd6a32008-10-02 18:27:46 +00001797 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001798 int mask;
1799 const char *p, *p1;
1800
1801 p = str;
1802 mask = 0;
1803 for(;;) {
1804 p1 = strchr(p, ',');
1805 if (!p1)
1806 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001807 if(cmp1(p,p1-p,"all")) {
1808 for(item = cpu_log_items; item->mask != 0; item++) {
1809 mask |= item->mask;
1810 }
1811 } else {
1812 for(item = cpu_log_items; item->mask != 0; item++) {
1813 if (cmp1(p, p1 - p, item->name))
1814 goto found;
1815 }
1816 return 0;
bellardf193c792004-03-21 17:06:25 +00001817 }
bellardf193c792004-03-21 17:06:25 +00001818 found:
1819 mask |= item->mask;
1820 if (*p1 != ',')
1821 break;
1822 p = p1 + 1;
1823 }
1824 return mask;
1825}
bellardea041c02003-06-25 16:16:50 +00001826
bellard75012672003-06-21 13:11:07 +00001827void cpu_abort(CPUState *env, const char *fmt, ...)
1828{
1829 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001830 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001831
1832 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001833 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001834 fprintf(stderr, "qemu: fatal: ");
1835 vfprintf(stderr, fmt, ap);
1836 fprintf(stderr, "\n");
1837#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001838 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1839#else
1840 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001841#endif
aliguori93fcfe32009-01-15 22:34:14 +00001842 if (qemu_log_enabled()) {
1843 qemu_log("qemu: fatal: ");
1844 qemu_log_vprintf(fmt, ap2);
1845 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001846#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001847 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001848#else
aliguori93fcfe32009-01-15 22:34:14 +00001849 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001850#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001851 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001852 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001853 }
pbrook493ae1f2007-11-23 16:53:59 +00001854 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001855 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001856#if defined(CONFIG_USER_ONLY)
1857 {
1858 struct sigaction act;
1859 sigfillset(&act.sa_mask);
1860 act.sa_handler = SIG_DFL;
1861 sigaction(SIGABRT, &act, NULL);
1862 }
1863#endif
bellard75012672003-06-21 13:11:07 +00001864 abort();
1865}
1866
thsc5be9f02007-02-28 20:20:53 +00001867CPUState *cpu_copy(CPUState *env)
1868{
ths01ba9812007-12-09 02:22:57 +00001869 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001870 CPUState *next_cpu = new_env->next_cpu;
1871 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001872#if defined(TARGET_HAS_ICE)
1873 CPUBreakpoint *bp;
1874 CPUWatchpoint *wp;
1875#endif
1876
thsc5be9f02007-02-28 20:20:53 +00001877 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001878
1879 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001880 new_env->next_cpu = next_cpu;
1881 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001882
1883 /* Clone all break/watchpoints.
1884 Note: Once we support ptrace with hw-debug register access, make sure
1885 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001886 QTAILQ_INIT(&env->breakpoints);
1887 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001888#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001889 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001890 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1891 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001892 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001893 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1894 wp->flags, NULL);
1895 }
1896#endif
1897
thsc5be9f02007-02-28 20:20:53 +00001898 return new_env;
1899}
1900
bellard01243112004-01-04 15:48:17 +00001901#if !defined(CONFIG_USER_ONLY)
1902
edgar_igl5c751e92008-05-06 08:44:21 +00001903static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1904{
1905 unsigned int i;
1906
1907 /* Discard jump cache entries for any tb which might potentially
1908 overlap the flushed page. */
1909 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1910 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001911 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001912
1913 i = tb_jmp_cache_hash_page(addr);
1914 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001915 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001916}
1917
Igor Kovalenko08738982009-07-12 02:15:40 +04001918static CPUTLBEntry s_cputlb_empty_entry = {
1919 .addr_read = -1,
1920 .addr_write = -1,
1921 .addr_code = -1,
1922 .addend = -1,
1923};
1924
bellardee8b7022004-02-03 23:35:10 +00001925/* NOTE: if flush_global is true, also flush global entries (not
1926 implemented yet) */
1927void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001928{
bellard33417e72003-08-10 21:47:01 +00001929 int i;
bellard01243112004-01-04 15:48:17 +00001930
bellard9fa3e852004-01-04 18:06:42 +00001931#if defined(DEBUG_TLB)
1932 printf("tlb_flush:\n");
1933#endif
bellard01243112004-01-04 15:48:17 +00001934 /* must reset current TB so that interrupts cannot modify the
1935 links while we are modifying them */
1936 env->current_tb = NULL;
1937
bellard33417e72003-08-10 21:47:01 +00001938 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001939 int mmu_idx;
1940 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001941 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001942 }
bellard33417e72003-08-10 21:47:01 +00001943 }
bellard9fa3e852004-01-04 18:06:42 +00001944
bellard8a40a182005-11-20 10:35:40 +00001945 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001946
Paul Brookd4c430a2010-03-17 02:14:28 +00001947 env->tlb_flush_addr = -1;
1948 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001949 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001950}
1951
bellard274da6b2004-05-20 21:56:27 +00001952static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001953{
ths5fafdf22007-09-16 21:08:06 +00001954 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001955 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001956 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001957 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001958 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001959 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001960 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001961 }
bellard61382a52003-10-27 21:22:23 +00001962}
1963
bellard2e126692004-04-25 21:28:44 +00001964void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001965{
bellard8a40a182005-11-20 10:35:40 +00001966 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001967 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001968
bellard9fa3e852004-01-04 18:06:42 +00001969#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001970 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001971#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001972 /* Check if we need to flush due to large pages. */
1973 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1974#if defined(DEBUG_TLB)
1975 printf("tlb_flush_page: forced full flush ("
1976 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1977 env->tlb_flush_addr, env->tlb_flush_mask);
1978#endif
1979 tlb_flush(env, 1);
1980 return;
1981 }
bellard01243112004-01-04 15:48:17 +00001982 /* must reset current TB so that interrupts cannot modify the
1983 links while we are modifying them */
1984 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001985
bellard61382a52003-10-27 21:22:23 +00001986 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001987 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001988 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1989 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001990
edgar_igl5c751e92008-05-06 08:44:21 +00001991 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001992}
1993
bellard9fa3e852004-01-04 18:06:42 +00001994/* update the TLBs so that writes to code in the virtual page 'addr'
1995 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001996static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001997{
ths5fafdf22007-09-16 21:08:06 +00001998 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001999 ram_addr + TARGET_PAGE_SIZE,
2000 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002001}
2002
bellard9fa3e852004-01-04 18:06:42 +00002003/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002004 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002005static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002006 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002007{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002008 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002009}
2010
ths5fafdf22007-09-16 21:08:06 +00002011static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002012 unsigned long start, unsigned long length)
2013{
2014 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00002015 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2016 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002017 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002018 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002019 }
2020 }
2021}
2022
pbrook5579c7f2009-04-11 14:47:08 +00002023/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002024void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002025 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002026{
2027 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002028 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002029 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002030
2031 start &= TARGET_PAGE_MASK;
2032 end = TARGET_PAGE_ALIGN(end);
2033
2034 length = end - start;
2035 if (length == 0)
2036 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002037 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002038
bellard1ccde1c2004-02-06 19:46:14 +00002039 /* we modify the TLB cache so that the dirty bit will be set again
2040 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002041 start1 = (unsigned long)qemu_safe_ram_ptr(start);
pbrook5579c7f2009-04-11 14:47:08 +00002042 /* Chek that we don't span multiple blocks - this breaks the
2043 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002044 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002045 != (end - 1) - start) {
2046 abort();
2047 }
2048
bellard6a00d602005-11-21 23:25:50 +00002049 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002050 int mmu_idx;
2051 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2052 for(i = 0; i < CPU_TLB_SIZE; i++)
2053 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2054 start1, length);
2055 }
bellard6a00d602005-11-21 23:25:50 +00002056 }
bellard1ccde1c2004-02-06 19:46:14 +00002057}
2058
aliguori74576192008-10-06 14:02:03 +00002059int cpu_physical_memory_set_dirty_tracking(int enable)
2060{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002061 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002062 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002063 ret = cpu_notify_migration_log(!!enable);
2064 return ret;
aliguori74576192008-10-06 14:02:03 +00002065}
2066
2067int cpu_physical_memory_get_dirty_tracking(void)
2068{
2069 return in_migration;
2070}
2071
Anthony Liguoric227f092009-10-01 16:12:16 -05002072int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2073 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00002074{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02002075 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02002076
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002077 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02002078 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00002079}
2080
Anthony PERARDe5896b12011-02-07 12:19:23 +01002081int cpu_physical_log_start(target_phys_addr_t start_addr,
2082 ram_addr_t size)
2083{
2084 CPUPhysMemoryClient *client;
2085 QLIST_FOREACH(client, &memory_client_list, list) {
2086 if (client->log_start) {
2087 int r = client->log_start(client, start_addr, size);
2088 if (r < 0) {
2089 return r;
2090 }
2091 }
2092 }
2093 return 0;
2094}
2095
2096int cpu_physical_log_stop(target_phys_addr_t start_addr,
2097 ram_addr_t size)
2098{
2099 CPUPhysMemoryClient *client;
2100 QLIST_FOREACH(client, &memory_client_list, list) {
2101 if (client->log_stop) {
2102 int r = client->log_stop(client, start_addr, size);
2103 if (r < 0) {
2104 return r;
2105 }
2106 }
2107 }
2108 return 0;
2109}
2110
bellard3a7d9292005-08-21 09:26:42 +00002111static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2112{
Anthony Liguoric227f092009-10-01 16:12:16 -05002113 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002114 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002115
bellard84b7b8e2005-11-28 21:19:04 +00002116 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002117 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2118 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002119 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002120 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002121 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002122 }
2123 }
2124}
2125
2126/* update the TLB according to the current state of the dirty bits */
2127void cpu_tlb_update_dirty(CPUState *env)
2128{
2129 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002130 int mmu_idx;
2131 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2132 for(i = 0; i < CPU_TLB_SIZE; i++)
2133 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2134 }
bellard3a7d9292005-08-21 09:26:42 +00002135}
2136
pbrook0f459d12008-06-09 00:20:13 +00002137static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002138{
pbrook0f459d12008-06-09 00:20:13 +00002139 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2140 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002141}
2142
pbrook0f459d12008-06-09 00:20:13 +00002143/* update the TLB corresponding to virtual page vaddr
2144 so that it is no longer dirty */
2145static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002146{
bellard1ccde1c2004-02-06 19:46:14 +00002147 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002148 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002149
pbrook0f459d12008-06-09 00:20:13 +00002150 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002151 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002152 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2153 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002154}
2155
Paul Brookd4c430a2010-03-17 02:14:28 +00002156/* Our TLB does not support large pages, so remember the area covered by
2157 large pages and trigger a full TLB flush if these are invalidated. */
2158static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2159 target_ulong size)
2160{
2161 target_ulong mask = ~(size - 1);
2162
2163 if (env->tlb_flush_addr == (target_ulong)-1) {
2164 env->tlb_flush_addr = vaddr & mask;
2165 env->tlb_flush_mask = mask;
2166 return;
2167 }
2168 /* Extend the existing region to include the new page.
2169 This is a compromise between unnecessary flushes and the cost
2170 of maintaining a full variable size TLB. */
2171 mask &= env->tlb_flush_mask;
2172 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2173 mask <<= 1;
2174 }
2175 env->tlb_flush_addr &= mask;
2176 env->tlb_flush_mask = mask;
2177}
2178
2179/* Add a new TLB entry. At most one entry for a given virtual address
2180 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2181 supplied size is only used by tlb_flush_page. */
2182void tlb_set_page(CPUState *env, target_ulong vaddr,
2183 target_phys_addr_t paddr, int prot,
2184 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002185{
bellard92e873b2004-05-21 14:52:29 +00002186 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002187 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002188 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002189 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002190 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002191 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002192 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002193 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002194 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002195
Paul Brookd4c430a2010-03-17 02:14:28 +00002196 assert(size >= TARGET_PAGE_SIZE);
2197 if (size != TARGET_PAGE_SIZE) {
2198 tlb_add_large_page(env, vaddr, size);
2199 }
bellard92e873b2004-05-21 14:52:29 +00002200 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002201 if (!p) {
2202 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002203 } else {
2204 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002205 }
2206#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002207 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2208 " prot=%x idx=%d pd=0x%08lx\n",
2209 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002210#endif
2211
pbrook0f459d12008-06-09 00:20:13 +00002212 address = vaddr;
2213 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2214 /* IO memory case (romd handled later) */
2215 address |= TLB_MMIO;
2216 }
pbrook5579c7f2009-04-11 14:47:08 +00002217 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002218 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2219 /* Normal RAM. */
2220 iotlb = pd & TARGET_PAGE_MASK;
2221 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2222 iotlb |= IO_MEM_NOTDIRTY;
2223 else
2224 iotlb |= IO_MEM_ROM;
2225 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002226 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002227 It would be nice to pass an offset from the base address
2228 of that region. This would avoid having to special case RAM,
2229 and avoid full address decoding in every device.
2230 We can't use the high bits of pd for this because
2231 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002232 iotlb = (pd & ~TARGET_PAGE_MASK);
2233 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002234 iotlb += p->region_offset;
2235 } else {
2236 iotlb += paddr;
2237 }
pbrook0f459d12008-06-09 00:20:13 +00002238 }
pbrook6658ffb2007-03-16 23:58:11 +00002239
pbrook0f459d12008-06-09 00:20:13 +00002240 code_address = address;
2241 /* Make accesses to pages with watchpoints go via the
2242 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002243 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002244 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002245 /* Avoid trapping reads of pages with a write breakpoint. */
2246 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2247 iotlb = io_mem_watch + paddr;
2248 address |= TLB_MMIO;
2249 break;
2250 }
pbrook6658ffb2007-03-16 23:58:11 +00002251 }
pbrook0f459d12008-06-09 00:20:13 +00002252 }
balrogd79acba2007-06-26 20:01:13 +00002253
pbrook0f459d12008-06-09 00:20:13 +00002254 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2255 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2256 te = &env->tlb_table[mmu_idx][index];
2257 te->addend = addend - vaddr;
2258 if (prot & PAGE_READ) {
2259 te->addr_read = address;
2260 } else {
2261 te->addr_read = -1;
2262 }
edgar_igl5c751e92008-05-06 08:44:21 +00002263
pbrook0f459d12008-06-09 00:20:13 +00002264 if (prot & PAGE_EXEC) {
2265 te->addr_code = code_address;
2266 } else {
2267 te->addr_code = -1;
2268 }
2269 if (prot & PAGE_WRITE) {
2270 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2271 (pd & IO_MEM_ROMD)) {
2272 /* Write access calls the I/O callback. */
2273 te->addr_write = address | TLB_MMIO;
2274 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2275 !cpu_physical_memory_is_dirty(pd)) {
2276 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002277 } else {
pbrook0f459d12008-06-09 00:20:13 +00002278 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002279 }
pbrook0f459d12008-06-09 00:20:13 +00002280 } else {
2281 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002282 }
bellard9fa3e852004-01-04 18:06:42 +00002283}
2284
bellard01243112004-01-04 15:48:17 +00002285#else
2286
bellardee8b7022004-02-03 23:35:10 +00002287void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002288{
2289}
2290
bellard2e126692004-04-25 21:28:44 +00002291void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002292{
2293}
2294
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002295/*
2296 * Walks guest process memory "regions" one by one
2297 * and calls callback function 'fn' for each region.
2298 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002299
2300struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002301{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002302 walk_memory_regions_fn fn;
2303 void *priv;
2304 unsigned long start;
2305 int prot;
2306};
bellard9fa3e852004-01-04 18:06:42 +00002307
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002308static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002309 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002310{
2311 if (data->start != -1ul) {
2312 int rc = data->fn(data->priv, data->start, end, data->prot);
2313 if (rc != 0) {
2314 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002315 }
bellard33417e72003-08-10 21:47:01 +00002316 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002317
2318 data->start = (new_prot ? end : -1ul);
2319 data->prot = new_prot;
2320
2321 return 0;
2322}
2323
2324static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002325 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002326{
Paul Brookb480d9b2010-03-12 23:23:29 +00002327 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002328 int i, rc;
2329
2330 if (*lp == NULL) {
2331 return walk_memory_regions_end(data, base, 0);
2332 }
2333
2334 if (level == 0) {
2335 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002336 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002337 int prot = pd[i].flags;
2338
2339 pa = base | (i << TARGET_PAGE_BITS);
2340 if (prot != data->prot) {
2341 rc = walk_memory_regions_end(data, pa, prot);
2342 if (rc != 0) {
2343 return rc;
2344 }
2345 }
2346 }
2347 } else {
2348 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002349 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002350 pa = base | ((abi_ulong)i <<
2351 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002352 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2353 if (rc != 0) {
2354 return rc;
2355 }
2356 }
2357 }
2358
2359 return 0;
2360}
2361
2362int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2363{
2364 struct walk_memory_regions_data data;
2365 unsigned long i;
2366
2367 data.fn = fn;
2368 data.priv = priv;
2369 data.start = -1ul;
2370 data.prot = 0;
2371
2372 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002373 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002374 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2375 if (rc != 0) {
2376 return rc;
2377 }
2378 }
2379
2380 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002381}
2382
Paul Brookb480d9b2010-03-12 23:23:29 +00002383static int dump_region(void *priv, abi_ulong start,
2384 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002385{
2386 FILE *f = (FILE *)priv;
2387
Paul Brookb480d9b2010-03-12 23:23:29 +00002388 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2389 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002390 start, end, end - start,
2391 ((prot & PAGE_READ) ? 'r' : '-'),
2392 ((prot & PAGE_WRITE) ? 'w' : '-'),
2393 ((prot & PAGE_EXEC) ? 'x' : '-'));
2394
2395 return (0);
2396}
2397
2398/* dump memory mappings */
2399void page_dump(FILE *f)
2400{
2401 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2402 "start", "end", "size", "prot");
2403 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002404}
2405
pbrook53a59602006-03-25 19:31:22 +00002406int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002407{
bellard9fa3e852004-01-04 18:06:42 +00002408 PageDesc *p;
2409
2410 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002411 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002412 return 0;
2413 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002414}
2415
Richard Henderson376a7902010-03-10 15:57:04 -08002416/* Modify the flags of a page and invalidate the code if necessary.
2417 The flag PAGE_WRITE_ORG is positioned automatically depending
2418 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002419void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002420{
Richard Henderson376a7902010-03-10 15:57:04 -08002421 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002422
Richard Henderson376a7902010-03-10 15:57:04 -08002423 /* This function should never be called with addresses outside the
2424 guest address space. If this assert fires, it probably indicates
2425 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002426#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2427 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002428#endif
2429 assert(start < end);
2430
bellard9fa3e852004-01-04 18:06:42 +00002431 start = start & TARGET_PAGE_MASK;
2432 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002433
2434 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002435 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002436 }
2437
2438 for (addr = start, len = end - start;
2439 len != 0;
2440 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2441 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2442
2443 /* If the write protection bit is set, then we invalidate
2444 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002445 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002446 (flags & PAGE_WRITE) &&
2447 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002448 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002449 }
2450 p->flags = flags;
2451 }
bellard9fa3e852004-01-04 18:06:42 +00002452}
2453
ths3d97b402007-11-02 19:02:07 +00002454int page_check_range(target_ulong start, target_ulong len, int flags)
2455{
2456 PageDesc *p;
2457 target_ulong end;
2458 target_ulong addr;
2459
Richard Henderson376a7902010-03-10 15:57:04 -08002460 /* This function should never be called with addresses outside the
2461 guest address space. If this assert fires, it probably indicates
2462 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002463#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2464 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002465#endif
2466
Richard Henderson3e0650a2010-03-29 10:54:42 -07002467 if (len == 0) {
2468 return 0;
2469 }
Richard Henderson376a7902010-03-10 15:57:04 -08002470 if (start + len - 1 < start) {
2471 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002472 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002473 }
balrog55f280c2008-10-28 10:24:11 +00002474
ths3d97b402007-11-02 19:02:07 +00002475 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2476 start = start & TARGET_PAGE_MASK;
2477
Richard Henderson376a7902010-03-10 15:57:04 -08002478 for (addr = start, len = end - start;
2479 len != 0;
2480 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002481 p = page_find(addr >> TARGET_PAGE_BITS);
2482 if( !p )
2483 return -1;
2484 if( !(p->flags & PAGE_VALID) )
2485 return -1;
2486
bellarddae32702007-11-14 10:51:00 +00002487 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002488 return -1;
bellarddae32702007-11-14 10:51:00 +00002489 if (flags & PAGE_WRITE) {
2490 if (!(p->flags & PAGE_WRITE_ORG))
2491 return -1;
2492 /* unprotect the page if it was put read-only because it
2493 contains translated code */
2494 if (!(p->flags & PAGE_WRITE)) {
2495 if (!page_unprotect(addr, 0, NULL))
2496 return -1;
2497 }
2498 return 0;
2499 }
ths3d97b402007-11-02 19:02:07 +00002500 }
2501 return 0;
2502}
2503
bellard9fa3e852004-01-04 18:06:42 +00002504/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002505 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002506int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002507{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002508 unsigned int prot;
2509 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002510 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002511
pbrookc8a706f2008-06-02 16:16:42 +00002512 /* Technically this isn't safe inside a signal handler. However we
2513 know this only ever happens in a synchronous SEGV handler, so in
2514 practice it seems to be ok. */
2515 mmap_lock();
2516
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002517 p = page_find(address >> TARGET_PAGE_BITS);
2518 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002519 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002520 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002521 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002522
bellard9fa3e852004-01-04 18:06:42 +00002523 /* if the page was really writable, then we change its
2524 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002525 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2526 host_start = address & qemu_host_page_mask;
2527 host_end = host_start + qemu_host_page_size;
2528
2529 prot = 0;
2530 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2531 p = page_find(addr >> TARGET_PAGE_BITS);
2532 p->flags |= PAGE_WRITE;
2533 prot |= p->flags;
2534
bellard9fa3e852004-01-04 18:06:42 +00002535 /* and since the content will be modified, we must invalidate
2536 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002537 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002538#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002539 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002540#endif
bellard9fa3e852004-01-04 18:06:42 +00002541 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002542 mprotect((void *)g2h(host_start), qemu_host_page_size,
2543 prot & PAGE_BITS);
2544
2545 mmap_unlock();
2546 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002547 }
pbrookc8a706f2008-06-02 16:16:42 +00002548 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002549 return 0;
2550}
2551
bellard6a00d602005-11-21 23:25:50 +00002552static inline void tlb_set_dirty(CPUState *env,
2553 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002554{
2555}
bellard9fa3e852004-01-04 18:06:42 +00002556#endif /* defined(CONFIG_USER_ONLY) */
2557
pbrooke2eef172008-06-08 01:09:01 +00002558#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002559
Paul Brookc04b2b72010-03-01 03:31:14 +00002560#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2561typedef struct subpage_t {
2562 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002563 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2564 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002565} subpage_t;
2566
Anthony Liguoric227f092009-10-01 16:12:16 -05002567static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2568 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002569static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2570 ram_addr_t orig_memory,
2571 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002572#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2573 need_subpage) \
2574 do { \
2575 if (addr > start_addr) \
2576 start_addr2 = 0; \
2577 else { \
2578 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2579 if (start_addr2 > 0) \
2580 need_subpage = 1; \
2581 } \
2582 \
blueswir149e9fba2007-05-30 17:25:06 +00002583 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002584 end_addr2 = TARGET_PAGE_SIZE - 1; \
2585 else { \
2586 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2587 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2588 need_subpage = 1; \
2589 } \
2590 } while (0)
2591
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002592/* register physical memory.
2593 For RAM, 'size' must be a multiple of the target page size.
2594 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002595 io memory page. The address used when calling the IO function is
2596 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002597 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002598 before calculating this offset. This should not be a problem unless
2599 the low bits of start_addr and region_offset differ. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002600void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2601 ram_addr_t size,
2602 ram_addr_t phys_offset,
2603 ram_addr_t region_offset)
bellard33417e72003-08-10 21:47:01 +00002604{
Anthony Liguoric227f092009-10-01 16:12:16 -05002605 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002606 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002607 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002608 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002609 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002610
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002611 cpu_notify_set_memory(start_addr, size, phys_offset);
2612
pbrook67c4d232009-02-23 13:16:07 +00002613 if (phys_offset == IO_MEM_UNASSIGNED) {
2614 region_offset = start_addr;
2615 }
pbrook8da3ff12008-12-01 18:59:50 +00002616 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002617 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002618 end_addr = start_addr + (target_phys_addr_t)size;
blueswir149e9fba2007-05-30 17:25:06 +00002619 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
blueswir1db7b5422007-05-26 17:36:03 +00002620 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2621 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002622 ram_addr_t orig_memory = p->phys_offset;
2623 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002624 int need_subpage = 0;
2625
2626 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2627 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002628 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002629 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2630 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002631 &p->phys_offset, orig_memory,
2632 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002633 } else {
2634 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2635 >> IO_MEM_SHIFT];
2636 }
pbrook8da3ff12008-12-01 18:59:50 +00002637 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2638 region_offset);
2639 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002640 } else {
2641 p->phys_offset = phys_offset;
2642 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2643 (phys_offset & IO_MEM_ROMD))
2644 phys_offset += TARGET_PAGE_SIZE;
2645 }
2646 } else {
2647 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2648 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002649 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002650 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002651 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002652 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002653 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002654 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002655 int need_subpage = 0;
2656
2657 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2658 end_addr2, need_subpage);
2659
Richard Hendersonf6405242010-04-22 16:47:31 -07002660 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002661 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002662 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002663 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002664 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002665 phys_offset, region_offset);
2666 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002667 }
2668 }
2669 }
pbrook8da3ff12008-12-01 18:59:50 +00002670 region_offset += TARGET_PAGE_SIZE;
bellard33417e72003-08-10 21:47:01 +00002671 }
ths3b46e622007-09-17 08:09:54 +00002672
bellard9d420372006-06-25 22:25:22 +00002673 /* since each CPU stores ram addresses in its TLB cache, we must
2674 reset the modified entries */
2675 /* XXX: slow ! */
2676 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2677 tlb_flush(env, 1);
2678 }
bellard33417e72003-08-10 21:47:01 +00002679}
2680
bellardba863452006-09-24 18:41:10 +00002681/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002682ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002683{
2684 PhysPageDesc *p;
2685
2686 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2687 if (!p)
2688 return IO_MEM_UNASSIGNED;
2689 return p->phys_offset;
2690}
2691
Anthony Liguoric227f092009-10-01 16:12:16 -05002692void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002693{
2694 if (kvm_enabled())
2695 kvm_coalesce_mmio_region(addr, size);
2696}
2697
Anthony Liguoric227f092009-10-01 16:12:16 -05002698void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002699{
2700 if (kvm_enabled())
2701 kvm_uncoalesce_mmio_region(addr, size);
2702}
2703
Sheng Yang62a27442010-01-26 19:21:16 +08002704void qemu_flush_coalesced_mmio_buffer(void)
2705{
2706 if (kvm_enabled())
2707 kvm_flush_coalesced_mmio_buffer();
2708}
2709
Marcelo Tosattic9027602010-03-01 20:25:08 -03002710#if defined(__linux__) && !defined(TARGET_S390X)
2711
2712#include <sys/vfs.h>
2713
2714#define HUGETLBFS_MAGIC 0x958458f6
2715
2716static long gethugepagesize(const char *path)
2717{
2718 struct statfs fs;
2719 int ret;
2720
2721 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002722 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002723 } while (ret != 0 && errno == EINTR);
2724
2725 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002726 perror(path);
2727 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002728 }
2729
2730 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002731 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002732
2733 return fs.f_bsize;
2734}
2735
Alex Williamson04b16652010-07-02 11:13:17 -06002736static void *file_ram_alloc(RAMBlock *block,
2737 ram_addr_t memory,
2738 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002739{
2740 char *filename;
2741 void *area;
2742 int fd;
2743#ifdef MAP_POPULATE
2744 int flags;
2745#endif
2746 unsigned long hpagesize;
2747
2748 hpagesize = gethugepagesize(path);
2749 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002750 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002751 }
2752
2753 if (memory < hpagesize) {
2754 return NULL;
2755 }
2756
2757 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2758 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2759 return NULL;
2760 }
2761
2762 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002763 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002764 }
2765
2766 fd = mkstemp(filename);
2767 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002768 perror("unable to create backing store for hugepages");
2769 free(filename);
2770 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002771 }
2772 unlink(filename);
2773 free(filename);
2774
2775 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2776
2777 /*
2778 * ftruncate is not supported by hugetlbfs in older
2779 * hosts, so don't bother bailing out on errors.
2780 * If anything goes wrong with it under other filesystems,
2781 * mmap will fail.
2782 */
2783 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002784 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002785
2786#ifdef MAP_POPULATE
2787 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2788 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2789 * to sidestep this quirk.
2790 */
2791 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2792 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2793#else
2794 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2795#endif
2796 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002797 perror("file_ram_alloc: can't mmap RAM pages");
2798 close(fd);
2799 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002800 }
Alex Williamson04b16652010-07-02 11:13:17 -06002801 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002802 return area;
2803}
2804#endif
2805
Alex Williamsond17b5282010-06-25 11:08:38 -06002806static ram_addr_t find_ram_offset(ram_addr_t size)
2807{
Alex Williamson04b16652010-07-02 11:13:17 -06002808 RAMBlock *block, *next_block;
Blue Swirl09d7ae92010-07-07 19:37:53 +00002809 ram_addr_t offset = 0, mingap = ULONG_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002810
2811 if (QLIST_EMPTY(&ram_list.blocks))
2812 return 0;
2813
2814 QLIST_FOREACH(block, &ram_list.blocks, next) {
2815 ram_addr_t end, next = ULONG_MAX;
2816
2817 end = block->offset + block->length;
2818
2819 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2820 if (next_block->offset >= end) {
2821 next = MIN(next, next_block->offset);
2822 }
2823 }
2824 if (next - end >= size && next - end < mingap) {
2825 offset = end;
2826 mingap = next - end;
2827 }
2828 }
2829 return offset;
2830}
2831
2832static ram_addr_t last_ram_offset(void)
2833{
Alex Williamsond17b5282010-06-25 11:08:38 -06002834 RAMBlock *block;
2835 ram_addr_t last = 0;
2836
2837 QLIST_FOREACH(block, &ram_list.blocks, next)
2838 last = MAX(last, block->offset + block->length);
2839
2840 return last;
2841}
2842
Cam Macdonell84b89d72010-07-26 18:10:57 -06002843ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002844 ram_addr_t size, void *host)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002845{
2846 RAMBlock *new_block, *block;
2847
2848 size = TARGET_PAGE_ALIGN(size);
2849 new_block = qemu_mallocz(sizeof(*new_block));
2850
2851 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2852 char *id = dev->parent_bus->info->get_dev_path(dev);
2853 if (id) {
2854 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2855 qemu_free(id);
2856 }
2857 }
2858 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2859
2860 QLIST_FOREACH(block, &ram_list.blocks, next) {
2861 if (!strcmp(block->idstr, new_block->idstr)) {
2862 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2863 new_block->idstr);
2864 abort();
2865 }
2866 }
2867
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002868 if (host) {
2869 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002870 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002871 } else {
2872 if (mem_path) {
2873#if defined (__linux__) && !defined(TARGET_S390X)
2874 new_block->host = file_ram_alloc(new_block, size, mem_path);
2875 if (!new_block->host) {
2876 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002877 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002878 }
2879#else
2880 fprintf(stderr, "-mem-path option unsupported\n");
2881 exit(1);
2882#endif
2883 } else {
2884#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2885 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2886 new_block->host = mmap((void*)0x1000000, size,
2887 PROT_EXEC|PROT_READ|PROT_WRITE,
2888 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2889#else
2890 new_block->host = qemu_vmalloc(size);
2891#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002892 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002893 }
2894 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002895
2896 new_block->offset = find_ram_offset(size);
2897 new_block->length = size;
2898
2899 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2900
2901 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2902 last_ram_offset() >> TARGET_PAGE_BITS);
2903 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2904 0xff, size >> TARGET_PAGE_BITS);
2905
2906 if (kvm_enabled())
2907 kvm_setup_guest_memory(new_block->host, size);
2908
2909 return new_block->offset;
2910}
2911
Alex Williamson1724f042010-06-25 11:09:35 -06002912ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002913{
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002914 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
pbrook94a6b542009-04-11 17:15:54 +00002915}
bellarde9a1ab12007-02-08 23:08:38 +00002916
Anthony Liguoric227f092009-10-01 16:12:16 -05002917void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002918{
Alex Williamson04b16652010-07-02 11:13:17 -06002919 RAMBlock *block;
2920
2921 QLIST_FOREACH(block, &ram_list.blocks, next) {
2922 if (addr == block->offset) {
2923 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002924 if (block->flags & RAM_PREALLOC_MASK) {
2925 ;
2926 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002927#if defined (__linux__) && !defined(TARGET_S390X)
2928 if (block->fd) {
2929 munmap(block->host, block->length);
2930 close(block->fd);
2931 } else {
2932 qemu_vfree(block->host);
2933 }
2934#endif
2935 } else {
2936#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2937 munmap(block->host, block->length);
2938#else
2939 qemu_vfree(block->host);
2940#endif
2941 }
2942 qemu_free(block);
2943 return;
2944 }
2945 }
2946
bellarde9a1ab12007-02-08 23:08:38 +00002947}
2948
Huang Yingcd19cfa2011-03-02 08:56:19 +01002949#ifndef _WIN32
2950void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2951{
2952 RAMBlock *block;
2953 ram_addr_t offset;
2954 int flags;
2955 void *area, *vaddr;
2956
2957 QLIST_FOREACH(block, &ram_list.blocks, next) {
2958 offset = addr - block->offset;
2959 if (offset < block->length) {
2960 vaddr = block->host + offset;
2961 if (block->flags & RAM_PREALLOC_MASK) {
2962 ;
2963 } else {
2964 flags = MAP_FIXED;
2965 munmap(vaddr, length);
2966 if (mem_path) {
2967#if defined(__linux__) && !defined(TARGET_S390X)
2968 if (block->fd) {
2969#ifdef MAP_POPULATE
2970 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2971 MAP_PRIVATE;
2972#else
2973 flags |= MAP_PRIVATE;
2974#endif
2975 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2976 flags, block->fd, offset);
2977 } else {
2978 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2979 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2980 flags, -1, 0);
2981 }
2982#endif
2983 } else {
2984#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2985 flags |= MAP_SHARED | MAP_ANONYMOUS;
2986 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2987 flags, -1, 0);
2988#else
2989 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2990 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2991 flags, -1, 0);
2992#endif
2993 }
2994 if (area != vaddr) {
2995 fprintf(stderr, "Could not remap addr: %lx@%lx\n",
2996 length, addr);
2997 exit(1);
2998 }
2999 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3000 }
3001 return;
3002 }
3003 }
3004}
3005#endif /* !_WIN32 */
3006
pbrookdc828ca2009-04-09 22:21:07 +00003007/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003008 With the exception of the softmmu code in this file, this should
3009 only be used for local memory (e.g. video ram) that the device owns,
3010 and knows it isn't going to access beyond the end of the block.
3011
3012 It should not be used for general purpose DMA.
3013 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3014 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003015void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003016{
pbrook94a6b542009-04-11 17:15:54 +00003017 RAMBlock *block;
3018
Alex Williamsonf471a172010-06-11 11:11:42 -06003019 QLIST_FOREACH(block, &ram_list.blocks, next) {
3020 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003021 /* Move this entry to to start of the list. */
3022 if (block != QLIST_FIRST(&ram_list.blocks)) {
3023 QLIST_REMOVE(block, next);
3024 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3025 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003026 return block->host + (addr - block->offset);
3027 }
pbrook94a6b542009-04-11 17:15:54 +00003028 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003029
3030 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3031 abort();
3032
3033 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003034}
3035
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003036/* Return a host pointer to ram allocated with qemu_ram_alloc.
3037 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3038 */
3039void *qemu_safe_ram_ptr(ram_addr_t addr)
3040{
3041 RAMBlock *block;
3042
3043 QLIST_FOREACH(block, &ram_list.blocks, next) {
3044 if (addr - block->offset < block->length) {
3045 return block->host + (addr - block->offset);
3046 }
3047 }
3048
3049 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3050 abort();
3051
3052 return NULL;
3053}
3054
Marcelo Tosattie8902612010-10-11 15:31:19 -03003055int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003056{
pbrook94a6b542009-04-11 17:15:54 +00003057 RAMBlock *block;
3058 uint8_t *host = ptr;
3059
Alex Williamsonf471a172010-06-11 11:11:42 -06003060 QLIST_FOREACH(block, &ram_list.blocks, next) {
3061 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003062 *ram_addr = block->offset + (host - block->host);
3063 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003064 }
pbrook94a6b542009-04-11 17:15:54 +00003065 }
Marcelo Tosattie8902612010-10-11 15:31:19 -03003066 return -1;
3067}
Alex Williamsonf471a172010-06-11 11:11:42 -06003068
Marcelo Tosattie8902612010-10-11 15:31:19 -03003069/* Some of the softmmu routines need to translate from a host pointer
3070 (typically a TLB entry) back to a ram offset. */
3071ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3072{
3073 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003074
Marcelo Tosattie8902612010-10-11 15:31:19 -03003075 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3076 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3077 abort();
3078 }
3079 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003080}
3081
Anthony Liguoric227f092009-10-01 16:12:16 -05003082static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00003083{
pbrook67d3b952006-12-18 05:03:52 +00003084#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003085 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003086#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02003087#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003088 do_unassigned_access(addr, 0, 0, 0, 1);
3089#endif
3090 return 0;
3091}
3092
Anthony Liguoric227f092009-10-01 16:12:16 -05003093static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003094{
3095#ifdef DEBUG_UNASSIGNED
3096 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3097#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02003098#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003099 do_unassigned_access(addr, 0, 0, 0, 2);
3100#endif
3101 return 0;
3102}
3103
Anthony Liguoric227f092009-10-01 16:12:16 -05003104static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003105{
3106#ifdef DEBUG_UNASSIGNED
3107 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3108#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02003109#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003110 do_unassigned_access(addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003111#endif
bellard33417e72003-08-10 21:47:01 +00003112 return 0;
3113}
3114
Anthony Liguoric227f092009-10-01 16:12:16 -05003115static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00003116{
pbrook67d3b952006-12-18 05:03:52 +00003117#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003118 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00003119#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02003120#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003121 do_unassigned_access(addr, 1, 0, 0, 1);
3122#endif
3123}
3124
Anthony Liguoric227f092009-10-01 16:12:16 -05003125static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003126{
3127#ifdef DEBUG_UNASSIGNED
3128 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3129#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02003130#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003131 do_unassigned_access(addr, 1, 0, 0, 2);
3132#endif
3133}
3134
Anthony Liguoric227f092009-10-01 16:12:16 -05003135static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003136{
3137#ifdef DEBUG_UNASSIGNED
3138 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3139#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02003140#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003141 do_unassigned_access(addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003142#endif
bellard33417e72003-08-10 21:47:01 +00003143}
3144
Blue Swirld60efc62009-08-25 18:29:31 +00003145static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00003146 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00003147 unassigned_mem_readw,
3148 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00003149};
3150
Blue Swirld60efc62009-08-25 18:29:31 +00003151static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00003152 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00003153 unassigned_mem_writew,
3154 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00003155};
3156
Anthony Liguoric227f092009-10-01 16:12:16 -05003157static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003158 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003159{
bellard3a7d9292005-08-21 09:26:42 +00003160 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003161 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003162 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3163#if !defined(CONFIG_USER_ONLY)
3164 tb_invalidate_phys_page_fast(ram_addr, 1);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003165 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003166#endif
3167 }
pbrook5579c7f2009-04-11 14:47:08 +00003168 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003169 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003170 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003171 /* we remove the notdirty callback only if the code has been
3172 flushed */
3173 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003174 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003175}
3176
Anthony Liguoric227f092009-10-01 16:12:16 -05003177static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003178 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003179{
bellard3a7d9292005-08-21 09:26:42 +00003180 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003181 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003182 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3183#if !defined(CONFIG_USER_ONLY)
3184 tb_invalidate_phys_page_fast(ram_addr, 2);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003185 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003186#endif
3187 }
pbrook5579c7f2009-04-11 14:47:08 +00003188 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003189 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003190 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003191 /* we remove the notdirty callback only if the code has been
3192 flushed */
3193 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003194 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003195}
3196
Anthony Liguoric227f092009-10-01 16:12:16 -05003197static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003198 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003199{
bellard3a7d9292005-08-21 09:26:42 +00003200 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003201 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003202 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3203#if !defined(CONFIG_USER_ONLY)
3204 tb_invalidate_phys_page_fast(ram_addr, 4);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003205 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003206#endif
3207 }
pbrook5579c7f2009-04-11 14:47:08 +00003208 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003209 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003210 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003211 /* we remove the notdirty callback only if the code has been
3212 flushed */
3213 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003214 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003215}
3216
Blue Swirld60efc62009-08-25 18:29:31 +00003217static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00003218 NULL, /* never used */
3219 NULL, /* never used */
3220 NULL, /* never used */
3221};
3222
Blue Swirld60efc62009-08-25 18:29:31 +00003223static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00003224 notdirty_mem_writeb,
3225 notdirty_mem_writew,
3226 notdirty_mem_writel,
3227};
3228
pbrook0f459d12008-06-09 00:20:13 +00003229/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003230static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003231{
3232 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003233 target_ulong pc, cs_base;
3234 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003235 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003236 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003237 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003238
aliguori06d55cc2008-11-18 20:24:06 +00003239 if (env->watchpoint_hit) {
3240 /* We re-entered the check after replacing the TB. Now raise
3241 * the debug interrupt so that is will trigger after the
3242 * current instruction. */
3243 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3244 return;
3245 }
pbrook2e70f6e2008-06-29 01:03:05 +00003246 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003247 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003248 if ((vaddr == (wp->vaddr & len_mask) ||
3249 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003250 wp->flags |= BP_WATCHPOINT_HIT;
3251 if (!env->watchpoint_hit) {
3252 env->watchpoint_hit = wp;
3253 tb = tb_find_pc(env->mem_io_pc);
3254 if (!tb) {
3255 cpu_abort(env, "check_watchpoint: could not find TB for "
3256 "pc=%p", (void *)env->mem_io_pc);
3257 }
3258 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3259 tb_phys_invalidate(tb, -1);
3260 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3261 env->exception_index = EXCP_DEBUG;
3262 } else {
3263 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3264 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3265 }
3266 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003267 }
aliguori6e140f22008-11-18 20:37:55 +00003268 } else {
3269 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003270 }
3271 }
3272}
3273
pbrook6658ffb2007-03-16 23:58:11 +00003274/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3275 so these check for a hit then pass through to the normal out-of-line
3276 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003277static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003278{
aliguorib4051332008-11-18 20:14:20 +00003279 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003280 return ldub_phys(addr);
3281}
3282
Anthony Liguoric227f092009-10-01 16:12:16 -05003283static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003284{
aliguorib4051332008-11-18 20:14:20 +00003285 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003286 return lduw_phys(addr);
3287}
3288
Anthony Liguoric227f092009-10-01 16:12:16 -05003289static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003290{
aliguorib4051332008-11-18 20:14:20 +00003291 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003292 return ldl_phys(addr);
3293}
3294
Anthony Liguoric227f092009-10-01 16:12:16 -05003295static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003296 uint32_t val)
3297{
aliguorib4051332008-11-18 20:14:20 +00003298 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003299 stb_phys(addr, val);
3300}
3301
Anthony Liguoric227f092009-10-01 16:12:16 -05003302static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003303 uint32_t val)
3304{
aliguorib4051332008-11-18 20:14:20 +00003305 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003306 stw_phys(addr, val);
3307}
3308
Anthony Liguoric227f092009-10-01 16:12:16 -05003309static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003310 uint32_t val)
3311{
aliguorib4051332008-11-18 20:14:20 +00003312 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003313 stl_phys(addr, val);
3314}
3315
Blue Swirld60efc62009-08-25 18:29:31 +00003316static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003317 watch_mem_readb,
3318 watch_mem_readw,
3319 watch_mem_readl,
3320};
3321
Blue Swirld60efc62009-08-25 18:29:31 +00003322static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003323 watch_mem_writeb,
3324 watch_mem_writew,
3325 watch_mem_writel,
3326};
pbrook6658ffb2007-03-16 23:58:11 +00003327
Richard Hendersonf6405242010-04-22 16:47:31 -07003328static inline uint32_t subpage_readlen (subpage_t *mmio,
3329 target_phys_addr_t addr,
3330 unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003331{
Richard Hendersonf6405242010-04-22 16:47:31 -07003332 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003333#if defined(DEBUG_SUBPAGE)
3334 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3335 mmio, len, addr, idx);
3336#endif
blueswir1db7b5422007-05-26 17:36:03 +00003337
Richard Hendersonf6405242010-04-22 16:47:31 -07003338 addr += mmio->region_offset[idx];
3339 idx = mmio->sub_io_index[idx];
3340 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
blueswir1db7b5422007-05-26 17:36:03 +00003341}
3342
Anthony Liguoric227f092009-10-01 16:12:16 -05003343static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
Richard Hendersonf6405242010-04-22 16:47:31 -07003344 uint32_t value, unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003345{
Richard Hendersonf6405242010-04-22 16:47:31 -07003346 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003347#if defined(DEBUG_SUBPAGE)
Richard Hendersonf6405242010-04-22 16:47:31 -07003348 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3349 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003350#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003351
3352 addr += mmio->region_offset[idx];
3353 idx = mmio->sub_io_index[idx];
3354 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00003355}
3356
Anthony Liguoric227f092009-10-01 16:12:16 -05003357static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003358{
blueswir1db7b5422007-05-26 17:36:03 +00003359 return subpage_readlen(opaque, addr, 0);
3360}
3361
Anthony Liguoric227f092009-10-01 16:12:16 -05003362static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003363 uint32_t value)
3364{
blueswir1db7b5422007-05-26 17:36:03 +00003365 subpage_writelen(opaque, addr, value, 0);
3366}
3367
Anthony Liguoric227f092009-10-01 16:12:16 -05003368static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003369{
blueswir1db7b5422007-05-26 17:36:03 +00003370 return subpage_readlen(opaque, addr, 1);
3371}
3372
Anthony Liguoric227f092009-10-01 16:12:16 -05003373static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003374 uint32_t value)
3375{
blueswir1db7b5422007-05-26 17:36:03 +00003376 subpage_writelen(opaque, addr, value, 1);
3377}
3378
Anthony Liguoric227f092009-10-01 16:12:16 -05003379static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003380{
blueswir1db7b5422007-05-26 17:36:03 +00003381 return subpage_readlen(opaque, addr, 2);
3382}
3383
Richard Hendersonf6405242010-04-22 16:47:31 -07003384static void subpage_writel (void *opaque, target_phys_addr_t addr,
3385 uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003386{
blueswir1db7b5422007-05-26 17:36:03 +00003387 subpage_writelen(opaque, addr, value, 2);
3388}
3389
Blue Swirld60efc62009-08-25 18:29:31 +00003390static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003391 &subpage_readb,
3392 &subpage_readw,
3393 &subpage_readl,
3394};
3395
Blue Swirld60efc62009-08-25 18:29:31 +00003396static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003397 &subpage_writeb,
3398 &subpage_writew,
3399 &subpage_writel,
3400};
3401
Anthony Liguoric227f092009-10-01 16:12:16 -05003402static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3403 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003404{
3405 int idx, eidx;
3406
3407 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3408 return -1;
3409 idx = SUBPAGE_IDX(start);
3410 eidx = SUBPAGE_IDX(end);
3411#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003412 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003413 mmio, start, end, idx, eidx, memory);
3414#endif
Gleb Natapov95c318f2010-07-29 10:41:45 +03003415 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3416 memory = IO_MEM_UNASSIGNED;
Richard Hendersonf6405242010-04-22 16:47:31 -07003417 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003418 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003419 mmio->sub_io_index[idx] = memory;
3420 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003421 }
3422
3423 return 0;
3424}
3425
Richard Hendersonf6405242010-04-22 16:47:31 -07003426static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3427 ram_addr_t orig_memory,
3428 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003429{
Anthony Liguoric227f092009-10-01 16:12:16 -05003430 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003431 int subpage_memory;
3432
Anthony Liguoric227f092009-10-01 16:12:16 -05003433 mmio = qemu_mallocz(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003434
3435 mmio->base = base;
Alexander Graf2507c122010-12-08 12:05:37 +01003436 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3437 DEVICE_NATIVE_ENDIAN);
blueswir1db7b5422007-05-26 17:36:03 +00003438#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003439 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3440 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003441#endif
aliguori1eec6142009-02-05 22:06:18 +00003442 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003443 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003444
3445 return mmio;
3446}
3447
aliguori88715652009-02-11 15:20:58 +00003448static int get_free_io_mem_idx(void)
3449{
3450 int i;
3451
3452 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3453 if (!io_mem_used[i]) {
3454 io_mem_used[i] = 1;
3455 return i;
3456 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003457 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003458 return -1;
3459}
3460
Alexander Grafdd310532010-12-08 12:05:36 +01003461/*
3462 * Usually, devices operate in little endian mode. There are devices out
3463 * there that operate in big endian too. Each device gets byte swapped
3464 * mmio if plugged onto a CPU that does the other endianness.
3465 *
3466 * CPU Device swap?
3467 *
3468 * little little no
3469 * little big yes
3470 * big little yes
3471 * big big no
3472 */
3473
3474typedef struct SwapEndianContainer {
3475 CPUReadMemoryFunc *read[3];
3476 CPUWriteMemoryFunc *write[3];
3477 void *opaque;
3478} SwapEndianContainer;
3479
3480static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3481{
3482 uint32_t val;
3483 SwapEndianContainer *c = opaque;
3484 val = c->read[0](c->opaque, addr);
3485 return val;
3486}
3487
3488static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3489{
3490 uint32_t val;
3491 SwapEndianContainer *c = opaque;
3492 val = bswap16(c->read[1](c->opaque, addr));
3493 return val;
3494}
3495
3496static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3497{
3498 uint32_t val;
3499 SwapEndianContainer *c = opaque;
3500 val = bswap32(c->read[2](c->opaque, addr));
3501 return val;
3502}
3503
3504static CPUReadMemoryFunc * const swapendian_readfn[3]={
3505 swapendian_mem_readb,
3506 swapendian_mem_readw,
3507 swapendian_mem_readl
3508};
3509
3510static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3511 uint32_t val)
3512{
3513 SwapEndianContainer *c = opaque;
3514 c->write[0](c->opaque, addr, val);
3515}
3516
3517static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3518 uint32_t val)
3519{
3520 SwapEndianContainer *c = opaque;
3521 c->write[1](c->opaque, addr, bswap16(val));
3522}
3523
3524static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3525 uint32_t val)
3526{
3527 SwapEndianContainer *c = opaque;
3528 c->write[2](c->opaque, addr, bswap32(val));
3529}
3530
3531static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3532 swapendian_mem_writeb,
3533 swapendian_mem_writew,
3534 swapendian_mem_writel
3535};
3536
3537static void swapendian_init(int io_index)
3538{
3539 SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3540 int i;
3541
3542 /* Swap mmio for big endian targets */
3543 c->opaque = io_mem_opaque[io_index];
3544 for (i = 0; i < 3; i++) {
3545 c->read[i] = io_mem_read[io_index][i];
3546 c->write[i] = io_mem_write[io_index][i];
3547
3548 io_mem_read[io_index][i] = swapendian_readfn[i];
3549 io_mem_write[io_index][i] = swapendian_writefn[i];
3550 }
3551 io_mem_opaque[io_index] = c;
3552}
3553
3554static void swapendian_del(int io_index)
3555{
3556 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3557 qemu_free(io_mem_opaque[io_index]);
3558 }
3559}
3560
bellard33417e72003-08-10 21:47:01 +00003561/* mem_read and mem_write are arrays of functions containing the
3562 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003563 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003564 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003565 modified. If it is zero, a new io zone is allocated. The return
3566 value can be used with cpu_register_physical_memory(). (-1) is
3567 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003568static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003569 CPUReadMemoryFunc * const *mem_read,
3570 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003571 void *opaque, enum device_endian endian)
bellard33417e72003-08-10 21:47:01 +00003572{
Richard Henderson3cab7212010-05-07 09:52:51 -07003573 int i;
3574
bellard33417e72003-08-10 21:47:01 +00003575 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003576 io_index = get_free_io_mem_idx();
3577 if (io_index == -1)
3578 return io_index;
bellard33417e72003-08-10 21:47:01 +00003579 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003580 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003581 if (io_index >= IO_MEM_NB_ENTRIES)
3582 return -1;
3583 }
bellardb5ff1b32005-11-26 10:38:39 +00003584
Richard Henderson3cab7212010-05-07 09:52:51 -07003585 for (i = 0; i < 3; ++i) {
3586 io_mem_read[io_index][i]
3587 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3588 }
3589 for (i = 0; i < 3; ++i) {
3590 io_mem_write[io_index][i]
3591 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3592 }
bellarda4193c82004-06-03 14:01:43 +00003593 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003594
Alexander Grafdd310532010-12-08 12:05:36 +01003595 switch (endian) {
3596 case DEVICE_BIG_ENDIAN:
3597#ifndef TARGET_WORDS_BIGENDIAN
3598 swapendian_init(io_index);
3599#endif
3600 break;
3601 case DEVICE_LITTLE_ENDIAN:
3602#ifdef TARGET_WORDS_BIGENDIAN
3603 swapendian_init(io_index);
3604#endif
3605 break;
3606 case DEVICE_NATIVE_ENDIAN:
3607 default:
3608 break;
3609 }
3610
Richard Hendersonf6405242010-04-22 16:47:31 -07003611 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003612}
bellard61382a52003-10-27 21:22:23 +00003613
Blue Swirld60efc62009-08-25 18:29:31 +00003614int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3615 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003616 void *opaque, enum device_endian endian)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003617{
Alexander Graf2507c122010-12-08 12:05:37 +01003618 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003619}
3620
aliguori88715652009-02-11 15:20:58 +00003621void cpu_unregister_io_memory(int io_table_address)
3622{
3623 int i;
3624 int io_index = io_table_address >> IO_MEM_SHIFT;
3625
Alexander Grafdd310532010-12-08 12:05:36 +01003626 swapendian_del(io_index);
3627
aliguori88715652009-02-11 15:20:58 +00003628 for (i=0;i < 3; i++) {
3629 io_mem_read[io_index][i] = unassigned_mem_read[i];
3630 io_mem_write[io_index][i] = unassigned_mem_write[i];
3631 }
3632 io_mem_opaque[io_index] = NULL;
3633 io_mem_used[io_index] = 0;
3634}
3635
Avi Kivitye9179ce2009-06-14 11:38:52 +03003636static void io_mem_init(void)
3637{
3638 int i;
3639
Alexander Graf2507c122010-12-08 12:05:37 +01003640 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3641 unassigned_mem_write, NULL,
3642 DEVICE_NATIVE_ENDIAN);
3643 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3644 unassigned_mem_write, NULL,
3645 DEVICE_NATIVE_ENDIAN);
3646 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3647 notdirty_mem_write, NULL,
3648 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003649 for (i=0; i<5; i++)
3650 io_mem_used[i] = 1;
3651
3652 io_mem_watch = cpu_register_io_memory(watch_mem_read,
Alexander Graf2507c122010-12-08 12:05:37 +01003653 watch_mem_write, NULL,
3654 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003655}
3656
pbrooke2eef172008-06-08 01:09:01 +00003657#endif /* !defined(CONFIG_USER_ONLY) */
3658
bellard13eb76e2004-01-24 15:23:36 +00003659/* physical memory access (slow version, mainly for debug) */
3660#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003661int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3662 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003663{
3664 int l, flags;
3665 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003666 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003667
3668 while (len > 0) {
3669 page = addr & TARGET_PAGE_MASK;
3670 l = (page + TARGET_PAGE_SIZE) - addr;
3671 if (l > len)
3672 l = len;
3673 flags = page_get_flags(page);
3674 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003675 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003676 if (is_write) {
3677 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003678 return -1;
bellard579a97f2007-11-11 14:26:47 +00003679 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003680 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003681 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003682 memcpy(p, buf, l);
3683 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003684 } else {
3685 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003686 return -1;
bellard579a97f2007-11-11 14:26:47 +00003687 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003688 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003689 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003690 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003691 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003692 }
3693 len -= l;
3694 buf += l;
3695 addr += l;
3696 }
Paul Brooka68fe892010-03-01 00:08:59 +00003697 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003698}
bellard8df1cd02005-01-28 22:37:22 +00003699
bellard13eb76e2004-01-24 15:23:36 +00003700#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003701void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003702 int len, int is_write)
3703{
3704 int l, io_index;
3705 uint8_t *ptr;
3706 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003707 target_phys_addr_t page;
bellard2e126692004-04-25 21:28:44 +00003708 unsigned long pd;
bellard92e873b2004-05-21 14:52:29 +00003709 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003710
bellard13eb76e2004-01-24 15:23:36 +00003711 while (len > 0) {
3712 page = addr & TARGET_PAGE_MASK;
3713 l = (page + TARGET_PAGE_SIZE) - addr;
3714 if (l > len)
3715 l = len;
bellard92e873b2004-05-21 14:52:29 +00003716 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003717 if (!p) {
3718 pd = IO_MEM_UNASSIGNED;
3719 } else {
3720 pd = p->phys_offset;
3721 }
ths3b46e622007-09-17 08:09:54 +00003722
bellard13eb76e2004-01-24 15:23:36 +00003723 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003724 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003725 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003726 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003727 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003728 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003729 /* XXX: could force cpu_single_env to NULL to avoid
3730 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003731 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003732 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003733 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003734 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003735 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003736 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003737 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003738 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003739 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003740 l = 2;
3741 } else {
bellard1c213d12005-09-03 10:49:04 +00003742 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003743 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003744 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003745 l = 1;
3746 }
3747 } else {
bellardb448f2f2004-02-25 23:24:04 +00003748 unsigned long addr1;
3749 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003750 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003751 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003752 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003753 if (!cpu_physical_memory_is_dirty(addr1)) {
3754 /* invalidate code */
3755 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3756 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003757 cpu_physical_memory_set_dirty_flags(
3758 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003759 }
bellard13eb76e2004-01-24 15:23:36 +00003760 }
3761 } else {
ths5fafdf22007-09-16 21:08:06 +00003762 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003763 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003764 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003765 /* I/O case */
3766 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003767 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003768 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3769 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003770 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003771 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003772 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003773 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003774 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003775 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003776 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003777 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003778 l = 2;
3779 } else {
bellard1c213d12005-09-03 10:49:04 +00003780 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003781 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003782 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003783 l = 1;
3784 }
3785 } else {
3786 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003787 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard13eb76e2004-01-24 15:23:36 +00003788 (addr & ~TARGET_PAGE_MASK);
3789 memcpy(buf, ptr, l);
3790 }
3791 }
3792 len -= l;
3793 buf += l;
3794 addr += l;
3795 }
3796}
bellard8df1cd02005-01-28 22:37:22 +00003797
bellardd0ecd2a2006-04-23 17:14:48 +00003798/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003799void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003800 const uint8_t *buf, int len)
3801{
3802 int l;
3803 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003804 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003805 unsigned long pd;
3806 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003807
bellardd0ecd2a2006-04-23 17:14:48 +00003808 while (len > 0) {
3809 page = addr & TARGET_PAGE_MASK;
3810 l = (page + TARGET_PAGE_SIZE) - addr;
3811 if (l > len)
3812 l = len;
3813 p = phys_page_find(page >> TARGET_PAGE_BITS);
3814 if (!p) {
3815 pd = IO_MEM_UNASSIGNED;
3816 } else {
3817 pd = p->phys_offset;
3818 }
ths3b46e622007-09-17 08:09:54 +00003819
bellardd0ecd2a2006-04-23 17:14:48 +00003820 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00003821 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3822 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003823 /* do nothing */
3824 } else {
3825 unsigned long addr1;
3826 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3827 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003828 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003829 memcpy(ptr, buf, l);
3830 }
3831 len -= l;
3832 buf += l;
3833 addr += l;
3834 }
3835}
3836
aliguori6d16c2f2009-01-22 16:59:11 +00003837typedef struct {
3838 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003839 target_phys_addr_t addr;
3840 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003841} BounceBuffer;
3842
3843static BounceBuffer bounce;
3844
aliguoriba223c22009-01-22 16:59:16 +00003845typedef struct MapClient {
3846 void *opaque;
3847 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003848 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003849} MapClient;
3850
Blue Swirl72cf2d42009-09-12 07:36:22 +00003851static QLIST_HEAD(map_client_list, MapClient) map_client_list
3852 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003853
3854void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3855{
3856 MapClient *client = qemu_malloc(sizeof(*client));
3857
3858 client->opaque = opaque;
3859 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003860 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003861 return client;
3862}
3863
3864void cpu_unregister_map_client(void *_client)
3865{
3866 MapClient *client = (MapClient *)_client;
3867
Blue Swirl72cf2d42009-09-12 07:36:22 +00003868 QLIST_REMOVE(client, link);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003869 qemu_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003870}
3871
3872static void cpu_notify_map_clients(void)
3873{
3874 MapClient *client;
3875
Blue Swirl72cf2d42009-09-12 07:36:22 +00003876 while (!QLIST_EMPTY(&map_client_list)) {
3877 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003878 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003879 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003880 }
3881}
3882
aliguori6d16c2f2009-01-22 16:59:11 +00003883/* Map a physical memory region into a host virtual address.
3884 * May map a subset of the requested range, given by and returned in *plen.
3885 * May return NULL if resources needed to perform the mapping are exhausted.
3886 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003887 * Use cpu_register_map_client() to know when retrying the map operation is
3888 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003889 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003890void *cpu_physical_memory_map(target_phys_addr_t addr,
3891 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003892 int is_write)
3893{
Anthony Liguoric227f092009-10-01 16:12:16 -05003894 target_phys_addr_t len = *plen;
3895 target_phys_addr_t done = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003896 int l;
3897 uint8_t *ret = NULL;
3898 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003899 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003900 unsigned long pd;
3901 PhysPageDesc *p;
3902 unsigned long addr1;
3903
3904 while (len > 0) {
3905 page = addr & TARGET_PAGE_MASK;
3906 l = (page + TARGET_PAGE_SIZE) - addr;
3907 if (l > len)
3908 l = len;
3909 p = phys_page_find(page >> TARGET_PAGE_BITS);
3910 if (!p) {
3911 pd = IO_MEM_UNASSIGNED;
3912 } else {
3913 pd = p->phys_offset;
3914 }
3915
3916 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3917 if (done || bounce.buffer) {
3918 break;
3919 }
3920 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3921 bounce.addr = addr;
3922 bounce.len = l;
3923 if (!is_write) {
3924 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3925 }
3926 ptr = bounce.buffer;
3927 } else {
3928 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003929 ptr = qemu_get_ram_ptr(addr1);
aliguori6d16c2f2009-01-22 16:59:11 +00003930 }
3931 if (!done) {
3932 ret = ptr;
3933 } else if (ret + done != ptr) {
3934 break;
3935 }
3936
3937 len -= l;
3938 addr += l;
3939 done += l;
3940 }
3941 *plen = done;
3942 return ret;
3943}
3944
3945/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3946 * Will also mark the memory as dirty if is_write == 1. access_len gives
3947 * the amount of memory that was actually read or written by the caller.
3948 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003949void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3950 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003951{
3952 if (buffer != bounce.buffer) {
3953 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003954 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003955 while (access_len) {
3956 unsigned l;
3957 l = TARGET_PAGE_SIZE;
3958 if (l > access_len)
3959 l = access_len;
3960 if (!cpu_physical_memory_is_dirty(addr1)) {
3961 /* invalidate code */
3962 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3963 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003964 cpu_physical_memory_set_dirty_flags(
3965 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003966 }
3967 addr1 += l;
3968 access_len -= l;
3969 }
3970 }
3971 return;
3972 }
3973 if (is_write) {
3974 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3975 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003976 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003977 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003978 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003979}
bellardd0ecd2a2006-04-23 17:14:48 +00003980
bellard8df1cd02005-01-28 22:37:22 +00003981/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003982uint32_t ldl_phys(target_phys_addr_t addr)
bellard8df1cd02005-01-28 22:37:22 +00003983{
3984 int io_index;
3985 uint8_t *ptr;
3986 uint32_t val;
3987 unsigned long pd;
3988 PhysPageDesc *p;
3989
3990 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3991 if (!p) {
3992 pd = IO_MEM_UNASSIGNED;
3993 } else {
3994 pd = p->phys_offset;
3995 }
ths3b46e622007-09-17 08:09:54 +00003996
ths5fafdf22007-09-16 21:08:06 +00003997 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003998 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00003999 /* I/O case */
4000 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004001 if (p)
4002 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004003 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4004 } else {
4005 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004006 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004007 (addr & ~TARGET_PAGE_MASK);
4008 val = ldl_p(ptr);
4009 }
4010 return val;
4011}
4012
bellard84b7b8e2005-11-28 21:19:04 +00004013/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05004014uint64_t ldq_phys(target_phys_addr_t addr)
bellard84b7b8e2005-11-28 21:19:04 +00004015{
4016 int io_index;
4017 uint8_t *ptr;
4018 uint64_t val;
4019 unsigned long pd;
4020 PhysPageDesc *p;
4021
4022 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4023 if (!p) {
4024 pd = IO_MEM_UNASSIGNED;
4025 } else {
4026 pd = p->phys_offset;
4027 }
ths3b46e622007-09-17 08:09:54 +00004028
bellard2a4188a2006-06-25 21:54:59 +00004029 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4030 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00004031 /* I/O case */
4032 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004033 if (p)
4034 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard84b7b8e2005-11-28 21:19:04 +00004035#ifdef TARGET_WORDS_BIGENDIAN
4036 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4037 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4038#else
4039 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4040 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4041#endif
4042 } else {
4043 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004044 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004045 (addr & ~TARGET_PAGE_MASK);
4046 val = ldq_p(ptr);
4047 }
4048 return val;
4049}
4050
bellardaab33092005-10-30 20:48:42 +00004051/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004052uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004053{
4054 uint8_t val;
4055 cpu_physical_memory_read(addr, &val, 1);
4056 return val;
4057}
4058
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004059/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05004060uint32_t lduw_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004061{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004062 int io_index;
4063 uint8_t *ptr;
4064 uint64_t val;
4065 unsigned long pd;
4066 PhysPageDesc *p;
4067
4068 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4069 if (!p) {
4070 pd = IO_MEM_UNASSIGNED;
4071 } else {
4072 pd = p->phys_offset;
4073 }
4074
4075 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4076 !(pd & IO_MEM_ROMD)) {
4077 /* I/O case */
4078 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4079 if (p)
4080 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4081 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4082 } else {
4083 /* RAM case */
4084 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4085 (addr & ~TARGET_PAGE_MASK);
4086 val = lduw_p(ptr);
4087 }
4088 return val;
bellardaab33092005-10-30 20:48:42 +00004089}
4090
bellard8df1cd02005-01-28 22:37:22 +00004091/* warning: addr must be aligned. The ram page is not masked as dirty
4092 and the code inside is not invalidated. It is useful if the dirty
4093 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004094void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004095{
4096 int io_index;
4097 uint8_t *ptr;
4098 unsigned long pd;
4099 PhysPageDesc *p;
4100
4101 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4102 if (!p) {
4103 pd = IO_MEM_UNASSIGNED;
4104 } else {
4105 pd = p->phys_offset;
4106 }
ths3b46e622007-09-17 08:09:54 +00004107
bellard3a7d9292005-08-21 09:26:42 +00004108 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004109 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004110 if (p)
4111 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004112 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4113 } else {
aliguori74576192008-10-06 14:02:03 +00004114 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004115 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004116 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004117
4118 if (unlikely(in_migration)) {
4119 if (!cpu_physical_memory_is_dirty(addr1)) {
4120 /* invalidate code */
4121 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4122 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004123 cpu_physical_memory_set_dirty_flags(
4124 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004125 }
4126 }
bellard8df1cd02005-01-28 22:37:22 +00004127 }
4128}
4129
Anthony Liguoric227f092009-10-01 16:12:16 -05004130void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004131{
4132 int io_index;
4133 uint8_t *ptr;
4134 unsigned long pd;
4135 PhysPageDesc *p;
4136
4137 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4138 if (!p) {
4139 pd = IO_MEM_UNASSIGNED;
4140 } else {
4141 pd = p->phys_offset;
4142 }
ths3b46e622007-09-17 08:09:54 +00004143
j_mayerbc98a7e2007-04-04 07:55:12 +00004144 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4145 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004146 if (p)
4147 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004148#ifdef TARGET_WORDS_BIGENDIAN
4149 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4150 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4151#else
4152 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4153 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4154#endif
4155 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004156 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004157 (addr & ~TARGET_PAGE_MASK);
4158 stq_p(ptr, val);
4159 }
4160}
4161
bellard8df1cd02005-01-28 22:37:22 +00004162/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05004163void stl_phys(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004164{
4165 int io_index;
4166 uint8_t *ptr;
4167 unsigned long pd;
4168 PhysPageDesc *p;
4169
4170 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4171 if (!p) {
4172 pd = IO_MEM_UNASSIGNED;
4173 } else {
4174 pd = p->phys_offset;
4175 }
ths3b46e622007-09-17 08:09:54 +00004176
bellard3a7d9292005-08-21 09:26:42 +00004177 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004178 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004179 if (p)
4180 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004181 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4182 } else {
4183 unsigned long addr1;
4184 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4185 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004186 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004187 stl_p(ptr, val);
bellard3a7d9292005-08-21 09:26:42 +00004188 if (!cpu_physical_memory_is_dirty(addr1)) {
4189 /* invalidate code */
4190 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4191 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004192 cpu_physical_memory_set_dirty_flags(addr1,
4193 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004194 }
bellard8df1cd02005-01-28 22:37:22 +00004195 }
4196}
4197
bellardaab33092005-10-30 20:48:42 +00004198/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004199void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004200{
4201 uint8_t v = val;
4202 cpu_physical_memory_write(addr, &v, 1);
4203}
4204
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004205/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05004206void stw_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004207{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004208 int io_index;
4209 uint8_t *ptr;
4210 unsigned long pd;
4211 PhysPageDesc *p;
4212
4213 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4214 if (!p) {
4215 pd = IO_MEM_UNASSIGNED;
4216 } else {
4217 pd = p->phys_offset;
4218 }
4219
4220 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4221 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4222 if (p)
4223 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4224 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4225 } else {
4226 unsigned long addr1;
4227 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4228 /* RAM case */
4229 ptr = qemu_get_ram_ptr(addr1);
4230 stw_p(ptr, val);
4231 if (!cpu_physical_memory_is_dirty(addr1)) {
4232 /* invalidate code */
4233 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4234 /* set dirty bit */
4235 cpu_physical_memory_set_dirty_flags(addr1,
4236 (0xff & ~CODE_DIRTY_FLAG));
4237 }
4238 }
bellardaab33092005-10-30 20:48:42 +00004239}
4240
4241/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004242void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004243{
4244 val = tswap64(val);
4245 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
4246}
4247
aliguori5e2972f2009-03-28 17:51:36 +00004248/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004249int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004250 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004251{
4252 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004253 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004254 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004255
4256 while (len > 0) {
4257 page = addr & TARGET_PAGE_MASK;
4258 phys_addr = cpu_get_phys_page_debug(env, page);
4259 /* if no physical page mapped, return an error */
4260 if (phys_addr == -1)
4261 return -1;
4262 l = (page + TARGET_PAGE_SIZE) - addr;
4263 if (l > len)
4264 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004265 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004266 if (is_write)
4267 cpu_physical_memory_write_rom(phys_addr, buf, l);
4268 else
aliguori5e2972f2009-03-28 17:51:36 +00004269 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004270 len -= l;
4271 buf += l;
4272 addr += l;
4273 }
4274 return 0;
4275}
Paul Brooka68fe892010-03-01 00:08:59 +00004276#endif
bellard13eb76e2004-01-24 15:23:36 +00004277
pbrook2e70f6e2008-06-29 01:03:05 +00004278/* in deterministic execution mode, instructions doing device I/Os
4279 must be at the end of the TB */
4280void cpu_io_recompile(CPUState *env, void *retaddr)
4281{
4282 TranslationBlock *tb;
4283 uint32_t n, cflags;
4284 target_ulong pc, cs_base;
4285 uint64_t flags;
4286
4287 tb = tb_find_pc((unsigned long)retaddr);
4288 if (!tb) {
4289 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4290 retaddr);
4291 }
4292 n = env->icount_decr.u16.low + tb->icount;
4293 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4294 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004295 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004296 n = n - env->icount_decr.u16.low;
4297 /* Generate a new TB ending on the I/O insn. */
4298 n++;
4299 /* On MIPS and SH, delay slot instructions can only be restarted if
4300 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004301 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004302 branch. */
4303#if defined(TARGET_MIPS)
4304 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4305 env->active_tc.PC -= 4;
4306 env->icount_decr.u16.low++;
4307 env->hflags &= ~MIPS_HFLAG_BMASK;
4308 }
4309#elif defined(TARGET_SH4)
4310 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4311 && n > 1) {
4312 env->pc -= 2;
4313 env->icount_decr.u16.low++;
4314 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4315 }
4316#endif
4317 /* This should never happen. */
4318 if (n > CF_COUNT_MASK)
4319 cpu_abort(env, "TB too big during recompile");
4320
4321 cflags = n | CF_LAST_IO;
4322 pc = tb->pc;
4323 cs_base = tb->cs_base;
4324 flags = tb->flags;
4325 tb_phys_invalidate(tb, -1);
4326 /* FIXME: In theory this could raise an exception. In practice
4327 we have already translated the block once so it's probably ok. */
4328 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004329 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004330 the first in the TB) then we end up generating a whole new TB and
4331 repeating the fault, which is horribly inefficient.
4332 Better would be to execute just this insn uncached, or generate a
4333 second new TB. */
4334 cpu_resume_from_signal(env, NULL);
4335}
4336
Paul Brookb3755a92010-03-12 16:54:58 +00004337#if !defined(CONFIG_USER_ONLY)
4338
Stefan Weil055403b2010-10-22 23:03:32 +02004339void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004340{
4341 int i, target_code_size, max_target_code_size;
4342 int direct_jmp_count, direct_jmp2_count, cross_page;
4343 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004344
bellarde3db7222005-01-26 22:00:47 +00004345 target_code_size = 0;
4346 max_target_code_size = 0;
4347 cross_page = 0;
4348 direct_jmp_count = 0;
4349 direct_jmp2_count = 0;
4350 for(i = 0; i < nb_tbs; i++) {
4351 tb = &tbs[i];
4352 target_code_size += tb->size;
4353 if (tb->size > max_target_code_size)
4354 max_target_code_size = tb->size;
4355 if (tb->page_addr[1] != -1)
4356 cross_page++;
4357 if (tb->tb_next_offset[0] != 0xffff) {
4358 direct_jmp_count++;
4359 if (tb->tb_next_offset[1] != 0xffff) {
4360 direct_jmp2_count++;
4361 }
4362 }
4363 }
4364 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004365 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004366 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004367 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4368 cpu_fprintf(f, "TB count %d/%d\n",
4369 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004370 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004371 nb_tbs ? target_code_size / nb_tbs : 0,
4372 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004373 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004374 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4375 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004376 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4377 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004378 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4379 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004380 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004381 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4382 direct_jmp2_count,
4383 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004384 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004385 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4386 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4387 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004388 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004389}
4390
bellard61382a52003-10-27 21:22:23 +00004391#define MMUSUFFIX _cmmu
4392#define GETPC() NULL
4393#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004394#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004395
4396#define SHIFT 0
4397#include "softmmu_template.h"
4398
4399#define SHIFT 1
4400#include "softmmu_template.h"
4401
4402#define SHIFT 2
4403#include "softmmu_template.h"
4404
4405#define SHIFT 3
4406#include "softmmu_template.h"
4407
4408#undef env
4409
4410#endif