blob: c541ee7107e96a332ad3965dde39cb5e3117401a [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Avi Kivity3eef53d2012-02-10 14:57:31 +0200163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000189typedef struct PhysPageDesc {
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset;
192 ram_addr_t region_offset;
193} PhysPageDesc;
194
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800195/* This is a multi-level map on the physical address space.
196 The bottom level has pointers to PhysPageDesc. */
Avi Kivity3eef53d2012-02-10 14:57:31 +0200197static void *phys_map;
Paul Brook6d9a1302010-02-28 23:55:53 +0000198
pbrooke2eef172008-06-08 01:09:01 +0000199static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300200static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000201
bellard33417e72003-08-10 21:47:01 +0000202/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200203MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000204static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200205static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000206#endif
bellard33417e72003-08-10 21:47:01 +0000207
bellard34865132003-10-05 14:28:56 +0000208/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200209#ifdef WIN32
210static const char *logfilename = "qemu.log";
211#else
blueswir1d9b630f2008-10-05 09:57:08 +0000212static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200213#endif
bellard34865132003-10-05 14:28:56 +0000214FILE *logfile;
215int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000216static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000217
bellarde3db7222005-01-26 22:00:47 +0000218/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000219#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000220static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000221#endif
bellarde3db7222005-01-26 22:00:47 +0000222static int tb_flush_count;
223static int tb_phys_invalidate_count;
224
bellard7cb69ca2008-05-10 10:55:51 +0000225#ifdef _WIN32
226static void map_exec(void *addr, long size)
227{
228 DWORD old_protect;
229 VirtualProtect(addr, size,
230 PAGE_EXECUTE_READWRITE, &old_protect);
231
232}
233#else
234static void map_exec(void *addr, long size)
235{
bellard43694152008-05-29 09:35:57 +0000236 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000237
bellard43694152008-05-29 09:35:57 +0000238 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000239 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000240 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000241
242 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000243 end += page_size - 1;
244 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000245
246 mprotect((void *)start, end - start,
247 PROT_READ | PROT_WRITE | PROT_EXEC);
248}
249#endif
250
bellardb346ff42003-06-15 20:05:50 +0000251static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000252{
bellard83fb7ad2004-07-05 21:25:26 +0000253 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000254 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000255#ifdef _WIN32
256 {
257 SYSTEM_INFO system_info;
258
259 GetSystemInfo(&system_info);
260 qemu_real_host_page_size = system_info.dwPageSize;
261 }
262#else
263 qemu_real_host_page_size = getpagesize();
264#endif
bellard83fb7ad2004-07-05 21:25:26 +0000265 if (qemu_host_page_size == 0)
266 qemu_host_page_size = qemu_real_host_page_size;
267 if (qemu_host_page_size < TARGET_PAGE_SIZE)
268 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000269 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000270
Paul Brook2e9a5712010-05-05 16:32:59 +0100271#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000272 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100273#ifdef HAVE_KINFO_GETVMMAP
274 struct kinfo_vmentry *freep;
275 int i, cnt;
276
277 freep = kinfo_getvmmap(getpid(), &cnt);
278 if (freep) {
279 mmap_lock();
280 for (i = 0; i < cnt; i++) {
281 unsigned long startaddr, endaddr;
282
283 startaddr = freep[i].kve_start;
284 endaddr = freep[i].kve_end;
285 if (h2g_valid(startaddr)) {
286 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
287
288 if (h2g_valid(endaddr)) {
289 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200290 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100291 } else {
292#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
293 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200294 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100295#endif
296 }
297 }
298 }
299 free(freep);
300 mmap_unlock();
301 }
302#else
balrog50a95692007-12-12 01:16:23 +0000303 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000304
pbrook07765902008-05-31 16:33:53 +0000305 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800306
Aurelien Jarnofd436902010-04-10 17:20:36 +0200307 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000308 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800309 mmap_lock();
310
balrog50a95692007-12-12 01:16:23 +0000311 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800312 unsigned long startaddr, endaddr;
313 int n;
314
315 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
316
317 if (n == 2 && h2g_valid(startaddr)) {
318 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
319
320 if (h2g_valid(endaddr)) {
321 endaddr = h2g(endaddr);
322 } else {
323 endaddr = ~0ul;
324 }
325 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000326 }
327 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800328
balrog50a95692007-12-12 01:16:23 +0000329 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800330 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000331 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100332#endif
balrog50a95692007-12-12 01:16:23 +0000333 }
334#endif
bellard54936002003-05-13 00:25:15 +0000335}
336
Paul Brook41c1b1c2010-03-12 16:54:58 +0000337static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000338{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000339 PageDesc *pd;
340 void **lp;
341 int i;
342
pbrook17e23772008-06-09 13:47:45 +0000343#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500344 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800345# define ALLOC(P, SIZE) \
346 do { \
347 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
348 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800349 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000350#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800351# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500352 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000353#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800354
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800355 /* Level 1. Always allocated. */
356 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
357
358 /* Level 2..N-1. */
359 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
360 void **p = *lp;
361
362 if (p == NULL) {
363 if (!alloc) {
364 return NULL;
365 }
366 ALLOC(p, sizeof(void *) * L2_SIZE);
367 *lp = p;
368 }
369
370 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000371 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800372
373 pd = *lp;
374 if (pd == NULL) {
375 if (!alloc) {
376 return NULL;
377 }
378 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
379 *lp = pd;
380 }
381
382#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800383
384 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000385}
386
Paul Brook41c1b1c2010-03-12 16:54:58 +0000387static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000388{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800389 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000390}
391
Paul Brook6d9a1302010-02-28 23:55:53 +0000392#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500393static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000394{
pbrooke3f4e2a2006-04-08 20:02:06 +0000395 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800396 void **lp;
397 int i;
bellard92e873b2004-05-21 14:52:29 +0000398
Avi Kivity3eef53d2012-02-10 14:57:31 +0200399 lp = &phys_map;
bellard108c49b2005-07-24 12:55:09 +0000400
Avi Kivity3eef53d2012-02-10 14:57:31 +0200401 /* Level 1..N-1. */
402 for (i = P_L2_LEVELS - 1; i > 0; i--) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800403 void **p = *lp;
404 if (p == NULL) {
405 if (!alloc) {
406 return NULL;
407 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500408 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800409 }
410 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000411 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800412
pbrooke3f4e2a2006-04-08 20:02:06 +0000413 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800414 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000415 int i;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200416 int first_index = index & ~(L2_SIZE - 1);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800417
418 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000419 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800420 }
421
Anthony Liguori7267c092011-08-20 22:09:37 -0500422 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800423
pbrook67c4d232009-02-23 13:16:07 +0000424 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200425 pd[i].phys_offset = io_mem_unassigned.ram_addr;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200426 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000427 }
bellard92e873b2004-05-21 14:52:29 +0000428 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800429
430 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000431}
432
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200433static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000434{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200435 PhysPageDesc *p = phys_page_find_alloc(index, 0);
436
437 if (p) {
438 return *p;
439 } else {
440 return (PhysPageDesc) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200441 .phys_offset = io_mem_unassigned.ram_addr,
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200442 .region_offset = index << TARGET_PAGE_BITS,
443 };
444 }
bellard92e873b2004-05-21 14:52:29 +0000445}
446
Anthony Liguoric227f092009-10-01 16:12:16 -0500447static void tlb_protect_code(ram_addr_t ram_addr);
448static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000449 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000450#define mmap_lock() do { } while(0)
451#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000452#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000453
bellard43694152008-05-29 09:35:57 +0000454#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
455
456#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100457/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000458 user mode. It will change when a dedicated libc will be used */
459#define USE_STATIC_CODE_GEN_BUFFER
460#endif
461
462#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200463static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
464 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000465#endif
466
blueswir18fcd3692008-08-17 20:26:25 +0000467static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000468{
bellard43694152008-05-29 09:35:57 +0000469#ifdef USE_STATIC_CODE_GEN_BUFFER
470 code_gen_buffer = static_code_gen_buffer;
471 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
472 map_exec(code_gen_buffer, code_gen_buffer_size);
473#else
bellard26a5f132008-05-28 12:30:31 +0000474 code_gen_buffer_size = tb_size;
475 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000476#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000477 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
478#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100479 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000480 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000481#endif
bellard26a5f132008-05-28 12:30:31 +0000482 }
483 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
484 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
485 /* The code gen buffer location may have constraints depending on
486 the host cpu and OS */
487#if defined(__linux__)
488 {
489 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000490 void *start = NULL;
491
bellard26a5f132008-05-28 12:30:31 +0000492 flags = MAP_PRIVATE | MAP_ANONYMOUS;
493#if defined(__x86_64__)
494 flags |= MAP_32BIT;
495 /* Cannot map more than that */
496 if (code_gen_buffer_size > (800 * 1024 * 1024))
497 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000498#elif defined(__sparc_v9__)
499 // Map the buffer below 2G, so we can use direct calls and branches
500 flags |= MAP_FIXED;
501 start = (void *) 0x60000000UL;
502 if (code_gen_buffer_size > (512 * 1024 * 1024))
503 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000504#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100505 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000506 if (code_gen_buffer_size > 16 * 1024 * 1024)
507 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700508#elif defined(__s390x__)
509 /* Map the buffer so that we can use direct calls and branches. */
510 /* We have a +- 4GB range on the branches; leave some slop. */
511 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
512 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
513 }
514 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000515#endif
blueswir1141ac462008-07-26 15:05:57 +0000516 code_gen_buffer = mmap(start, code_gen_buffer_size,
517 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000518 flags, -1, 0);
519 if (code_gen_buffer == MAP_FAILED) {
520 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
521 exit(1);
522 }
523 }
Bradcbb608a2010-12-20 21:25:40 -0500524#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000525 || defined(__DragonFly__) || defined(__OpenBSD__) \
526 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000527 {
528 int flags;
529 void *addr = NULL;
530 flags = MAP_PRIVATE | MAP_ANONYMOUS;
531#if defined(__x86_64__)
532 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
533 * 0x40000000 is free */
534 flags |= MAP_FIXED;
535 addr = (void *)0x40000000;
536 /* Cannot map more than that */
537 if (code_gen_buffer_size > (800 * 1024 * 1024))
538 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000539#elif defined(__sparc_v9__)
540 // Map the buffer below 2G, so we can use direct calls and branches
541 flags |= MAP_FIXED;
542 addr = (void *) 0x60000000UL;
543 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
544 code_gen_buffer_size = (512 * 1024 * 1024);
545 }
aliguori06e67a82008-09-27 15:32:41 +0000546#endif
547 code_gen_buffer = mmap(addr, code_gen_buffer_size,
548 PROT_WRITE | PROT_READ | PROT_EXEC,
549 flags, -1, 0);
550 if (code_gen_buffer == MAP_FAILED) {
551 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
552 exit(1);
553 }
554 }
bellard26a5f132008-05-28 12:30:31 +0000555#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500556 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000557 map_exec(code_gen_buffer, code_gen_buffer_size);
558#endif
bellard43694152008-05-29 09:35:57 +0000559#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000560 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100561 code_gen_buffer_max_size = code_gen_buffer_size -
562 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000563 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500564 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000565}
566
567/* Must be called before using the QEMU cpus. 'tb_size' is the size
568 (in bytes) allocated to the translation buffer. Zero means default
569 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200570void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000571{
bellard26a5f132008-05-28 12:30:31 +0000572 cpu_gen_init();
573 code_gen_alloc(tb_size);
574 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000575 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700576#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
577 /* There's no guest base to take into account, so go ahead and
578 initialize the prologue now. */
579 tcg_prologue_init(&tcg_ctx);
580#endif
bellard26a5f132008-05-28 12:30:31 +0000581}
582
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200583bool tcg_enabled(void)
584{
585 return code_gen_buffer != NULL;
586}
587
588void cpu_exec_init_all(void)
589{
590#if !defined(CONFIG_USER_ONLY)
591 memory_map_init();
592 io_mem_init();
593#endif
594}
595
pbrook9656f322008-07-01 20:01:19 +0000596#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
597
Juan Quintelae59fb372009-09-29 22:48:21 +0200598static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200599{
600 CPUState *env = opaque;
601
aurel323098dba2009-03-07 21:28:24 +0000602 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
603 version_id is increased. */
604 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000605 tlb_flush(env, 1);
606
607 return 0;
608}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200609
610static const VMStateDescription vmstate_cpu_common = {
611 .name = "cpu_common",
612 .version_id = 1,
613 .minimum_version_id = 1,
614 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200615 .post_load = cpu_common_post_load,
616 .fields = (VMStateField []) {
617 VMSTATE_UINT32(halted, CPUState),
618 VMSTATE_UINT32(interrupt_request, CPUState),
619 VMSTATE_END_OF_LIST()
620 }
621};
pbrook9656f322008-07-01 20:01:19 +0000622#endif
623
Glauber Costa950f1472009-06-09 12:15:18 -0400624CPUState *qemu_get_cpu(int cpu)
625{
626 CPUState *env = first_cpu;
627
628 while (env) {
629 if (env->cpu_index == cpu)
630 break;
631 env = env->next_cpu;
632 }
633
634 return env;
635}
636
bellard6a00d602005-11-21 23:25:50 +0000637void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000638{
bellard6a00d602005-11-21 23:25:50 +0000639 CPUState **penv;
640 int cpu_index;
641
pbrookc2764712009-03-07 15:24:59 +0000642#if defined(CONFIG_USER_ONLY)
643 cpu_list_lock();
644#endif
bellard6a00d602005-11-21 23:25:50 +0000645 env->next_cpu = NULL;
646 penv = &first_cpu;
647 cpu_index = 0;
648 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700649 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000650 cpu_index++;
651 }
652 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000653 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000654 QTAILQ_INIT(&env->breakpoints);
655 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100656#ifndef CONFIG_USER_ONLY
657 env->thread_id = qemu_get_thread_id();
658#endif
bellard6a00d602005-11-21 23:25:50 +0000659 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000660#if defined(CONFIG_USER_ONLY)
661 cpu_list_unlock();
662#endif
pbrookb3c77242008-06-30 16:31:04 +0000663#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600664 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
665 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000666 cpu_save, cpu_load, env);
667#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000668}
669
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100670/* Allocate a new translation block. Flush the translation buffer if
671 too many translation blocks or too much generated code. */
672static TranslationBlock *tb_alloc(target_ulong pc)
673{
674 TranslationBlock *tb;
675
676 if (nb_tbs >= code_gen_max_blocks ||
677 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
678 return NULL;
679 tb = &tbs[nb_tbs++];
680 tb->pc = pc;
681 tb->cflags = 0;
682 return tb;
683}
684
685void tb_free(TranslationBlock *tb)
686{
687 /* In practice this is mostly used for single use temporary TB
688 Ignore the hard cases and just back up if this TB happens to
689 be the last one generated. */
690 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
691 code_gen_ptr = tb->tc_ptr;
692 nb_tbs--;
693 }
694}
695
bellard9fa3e852004-01-04 18:06:42 +0000696static inline void invalidate_page_bitmap(PageDesc *p)
697{
698 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500699 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000700 p->code_bitmap = NULL;
701 }
702 p->code_write_count = 0;
703}
704
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800705/* Set to NULL all the 'first_tb' fields in all PageDescs. */
706
707static void page_flush_tb_1 (int level, void **lp)
708{
709 int i;
710
711 if (*lp == NULL) {
712 return;
713 }
714 if (level == 0) {
715 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000716 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800717 pd[i].first_tb = NULL;
718 invalidate_page_bitmap(pd + i);
719 }
720 } else {
721 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000722 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800723 page_flush_tb_1 (level - 1, pp + i);
724 }
725 }
726}
727
bellardfd6ce8f2003-05-14 19:00:11 +0000728static void page_flush_tb(void)
729{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800730 int i;
731 for (i = 0; i < V_L1_SIZE; i++) {
732 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000733 }
734}
735
736/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000737/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000738void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000739{
bellard6a00d602005-11-21 23:25:50 +0000740 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000741#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000742 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
743 (unsigned long)(code_gen_ptr - code_gen_buffer),
744 nb_tbs, nb_tbs > 0 ?
745 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000746#endif
bellard26a5f132008-05-28 12:30:31 +0000747 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000748 cpu_abort(env1, "Internal error: code buffer overflow\n");
749
bellardfd6ce8f2003-05-14 19:00:11 +0000750 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000751
bellard6a00d602005-11-21 23:25:50 +0000752 for(env = first_cpu; env != NULL; env = env->next_cpu) {
753 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
754 }
bellard9fa3e852004-01-04 18:06:42 +0000755
bellard8a8a6082004-10-03 13:36:49 +0000756 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000757 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000758
bellardfd6ce8f2003-05-14 19:00:11 +0000759 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000760 /* XXX: flush processor icache at this point if cache flush is
761 expensive */
bellarde3db7222005-01-26 22:00:47 +0000762 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000763}
764
765#ifdef DEBUG_TB_CHECK
766
j_mayerbc98a7e2007-04-04 07:55:12 +0000767static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000768{
769 TranslationBlock *tb;
770 int i;
771 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000772 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
773 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000774 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
775 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000776 printf("ERROR invalidate: address=" TARGET_FMT_lx
777 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000778 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000779 }
780 }
781 }
782}
783
784/* verify that all the pages have correct rights for code */
785static void tb_page_check(void)
786{
787 TranslationBlock *tb;
788 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000789
pbrook99773bd2006-04-16 15:14:59 +0000790 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
791 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000792 flags1 = page_get_flags(tb->pc);
793 flags2 = page_get_flags(tb->pc + tb->size - 1);
794 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
795 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000796 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000797 }
798 }
799 }
800}
801
802#endif
803
804/* invalidate one TB */
805static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
806 int next_offset)
807{
808 TranslationBlock *tb1;
809 for(;;) {
810 tb1 = *ptb;
811 if (tb1 == tb) {
812 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
813 break;
814 }
815 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
816 }
817}
818
bellard9fa3e852004-01-04 18:06:42 +0000819static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
820{
821 TranslationBlock *tb1;
822 unsigned int n1;
823
824 for(;;) {
825 tb1 = *ptb;
826 n1 = (long)tb1 & 3;
827 tb1 = (TranslationBlock *)((long)tb1 & ~3);
828 if (tb1 == tb) {
829 *ptb = tb1->page_next[n1];
830 break;
831 }
832 ptb = &tb1->page_next[n1];
833 }
834}
835
bellardd4e81642003-05-25 16:46:15 +0000836static inline void tb_jmp_remove(TranslationBlock *tb, int n)
837{
838 TranslationBlock *tb1, **ptb;
839 unsigned int n1;
840
841 ptb = &tb->jmp_next[n];
842 tb1 = *ptb;
843 if (tb1) {
844 /* find tb(n) in circular list */
845 for(;;) {
846 tb1 = *ptb;
847 n1 = (long)tb1 & 3;
848 tb1 = (TranslationBlock *)((long)tb1 & ~3);
849 if (n1 == n && tb1 == tb)
850 break;
851 if (n1 == 2) {
852 ptb = &tb1->jmp_first;
853 } else {
854 ptb = &tb1->jmp_next[n1];
855 }
856 }
857 /* now we can suppress tb(n) from the list */
858 *ptb = tb->jmp_next[n];
859
860 tb->jmp_next[n] = NULL;
861 }
862}
863
864/* reset the jump entry 'n' of a TB so that it is not chained to
865 another TB */
866static inline void tb_reset_jump(TranslationBlock *tb, int n)
867{
868 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
869}
870
Paul Brook41c1b1c2010-03-12 16:54:58 +0000871void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000872{
bellard6a00d602005-11-21 23:25:50 +0000873 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000874 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000875 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000876 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000877 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000878
bellard9fa3e852004-01-04 18:06:42 +0000879 /* remove the TB from the hash list */
880 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
881 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000882 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000883 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000884
bellard9fa3e852004-01-04 18:06:42 +0000885 /* remove the TB from the page list */
886 if (tb->page_addr[0] != page_addr) {
887 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
888 tb_page_remove(&p->first_tb, tb);
889 invalidate_page_bitmap(p);
890 }
891 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
892 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
893 tb_page_remove(&p->first_tb, tb);
894 invalidate_page_bitmap(p);
895 }
896
bellard8a40a182005-11-20 10:35:40 +0000897 tb_invalidated_flag = 1;
898
899 /* remove the TB from the hash list */
900 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000901 for(env = first_cpu; env != NULL; env = env->next_cpu) {
902 if (env->tb_jmp_cache[h] == tb)
903 env->tb_jmp_cache[h] = NULL;
904 }
bellard8a40a182005-11-20 10:35:40 +0000905
906 /* suppress this TB from the two jump lists */
907 tb_jmp_remove(tb, 0);
908 tb_jmp_remove(tb, 1);
909
910 /* suppress any remaining jumps to this TB */
911 tb1 = tb->jmp_first;
912 for(;;) {
913 n1 = (long)tb1 & 3;
914 if (n1 == 2)
915 break;
916 tb1 = (TranslationBlock *)((long)tb1 & ~3);
917 tb2 = tb1->jmp_next[n1];
918 tb_reset_jump(tb1, n1);
919 tb1->jmp_next[n1] = NULL;
920 tb1 = tb2;
921 }
922 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
923
bellarde3db7222005-01-26 22:00:47 +0000924 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000925}
926
927static inline void set_bits(uint8_t *tab, int start, int len)
928{
929 int end, mask, end1;
930
931 end = start + len;
932 tab += start >> 3;
933 mask = 0xff << (start & 7);
934 if ((start & ~7) == (end & ~7)) {
935 if (start < end) {
936 mask &= ~(0xff << (end & 7));
937 *tab |= mask;
938 }
939 } else {
940 *tab++ |= mask;
941 start = (start + 8) & ~7;
942 end1 = end & ~7;
943 while (start < end1) {
944 *tab++ = 0xff;
945 start += 8;
946 }
947 if (start < end) {
948 mask = ~(0xff << (end & 7));
949 *tab |= mask;
950 }
951 }
952}
953
954static void build_page_bitmap(PageDesc *p)
955{
956 int n, tb_start, tb_end;
957 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000958
Anthony Liguori7267c092011-08-20 22:09:37 -0500959 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000960
961 tb = p->first_tb;
962 while (tb != NULL) {
963 n = (long)tb & 3;
964 tb = (TranslationBlock *)((long)tb & ~3);
965 /* NOTE: this is subtle as a TB may span two physical pages */
966 if (n == 0) {
967 /* NOTE: tb_end may be after the end of the page, but
968 it is not a problem */
969 tb_start = tb->pc & ~TARGET_PAGE_MASK;
970 tb_end = tb_start + tb->size;
971 if (tb_end > TARGET_PAGE_SIZE)
972 tb_end = TARGET_PAGE_SIZE;
973 } else {
974 tb_start = 0;
975 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
976 }
977 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
978 tb = tb->page_next[n];
979 }
980}
981
pbrook2e70f6e2008-06-29 01:03:05 +0000982TranslationBlock *tb_gen_code(CPUState *env,
983 target_ulong pc, target_ulong cs_base,
984 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000985{
986 TranslationBlock *tb;
987 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000988 tb_page_addr_t phys_pc, phys_page2;
989 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000990 int code_gen_size;
991
Paul Brook41c1b1c2010-03-12 16:54:58 +0000992 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000993 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000994 if (!tb) {
995 /* flush must be done */
996 tb_flush(env);
997 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000998 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000999 /* Don't forget to invalidate previous TB info. */
1000 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001001 }
1002 tc_ptr = code_gen_ptr;
1003 tb->tc_ptr = tc_ptr;
1004 tb->cs_base = cs_base;
1005 tb->flags = flags;
1006 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001007 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001008 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001009
bellardd720b932004-04-25 17:57:43 +00001010 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001011 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001012 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001013 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001014 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001015 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001016 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001017 return tb;
bellardd720b932004-04-25 17:57:43 +00001018}
ths3b46e622007-09-17 08:09:54 +00001019
bellard9fa3e852004-01-04 18:06:42 +00001020/* invalidate all TBs which intersect with the target physical page
1021 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001022 the same physical page. 'is_cpu_write_access' should be true if called
1023 from a real cpu write access: the virtual CPU will exit the current
1024 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001025void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001026 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001027{
aliguori6b917542008-11-18 19:46:41 +00001028 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001029 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001030 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001031 PageDesc *p;
1032 int n;
1033#ifdef TARGET_HAS_PRECISE_SMC
1034 int current_tb_not_found = is_cpu_write_access;
1035 TranslationBlock *current_tb = NULL;
1036 int current_tb_modified = 0;
1037 target_ulong current_pc = 0;
1038 target_ulong current_cs_base = 0;
1039 int current_flags = 0;
1040#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001041
1042 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001043 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001044 return;
ths5fafdf22007-09-16 21:08:06 +00001045 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001046 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1047 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001048 /* build code bitmap */
1049 build_page_bitmap(p);
1050 }
1051
1052 /* we remove all the TBs in the range [start, end[ */
1053 /* XXX: see if in some cases it could be faster to invalidate all the code */
1054 tb = p->first_tb;
1055 while (tb != NULL) {
1056 n = (long)tb & 3;
1057 tb = (TranslationBlock *)((long)tb & ~3);
1058 tb_next = tb->page_next[n];
1059 /* NOTE: this is subtle as a TB may span two physical pages */
1060 if (n == 0) {
1061 /* NOTE: tb_end may be after the end of the page, but
1062 it is not a problem */
1063 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1064 tb_end = tb_start + tb->size;
1065 } else {
1066 tb_start = tb->page_addr[1];
1067 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1068 }
1069 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001070#ifdef TARGET_HAS_PRECISE_SMC
1071 if (current_tb_not_found) {
1072 current_tb_not_found = 0;
1073 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001074 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001075 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001076 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001077 }
1078 }
1079 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001080 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001081 /* If we are modifying the current TB, we must stop
1082 its execution. We could be more precise by checking
1083 that the modification is after the current PC, but it
1084 would require a specialized function to partially
1085 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001086
bellardd720b932004-04-25 17:57:43 +00001087 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001088 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001089 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1090 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001091 }
1092#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001093 /* we need to do that to handle the case where a signal
1094 occurs while doing tb_phys_invalidate() */
1095 saved_tb = NULL;
1096 if (env) {
1097 saved_tb = env->current_tb;
1098 env->current_tb = NULL;
1099 }
bellard9fa3e852004-01-04 18:06:42 +00001100 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001101 if (env) {
1102 env->current_tb = saved_tb;
1103 if (env->interrupt_request && env->current_tb)
1104 cpu_interrupt(env, env->interrupt_request);
1105 }
bellard9fa3e852004-01-04 18:06:42 +00001106 }
1107 tb = tb_next;
1108 }
1109#if !defined(CONFIG_USER_ONLY)
1110 /* if no code remaining, no need to continue to use slow writes */
1111 if (!p->first_tb) {
1112 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001113 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001114 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001115 }
1116 }
1117#endif
1118#ifdef TARGET_HAS_PRECISE_SMC
1119 if (current_tb_modified) {
1120 /* we generate a block containing just the instruction
1121 modifying the memory. It will ensure that it cannot modify
1122 itself */
bellardea1c1802004-06-14 18:56:36 +00001123 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001124 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001125 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001126 }
1127#endif
1128}
1129
1130/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001131static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001132{
1133 PageDesc *p;
1134 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001135#if 0
bellarda4193c82004-06-03 14:01:43 +00001136 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001137 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1138 cpu_single_env->mem_io_vaddr, len,
1139 cpu_single_env->eip,
1140 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001141 }
1142#endif
bellard9fa3e852004-01-04 18:06:42 +00001143 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001144 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001145 return;
1146 if (p->code_bitmap) {
1147 offset = start & ~TARGET_PAGE_MASK;
1148 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1149 if (b & ((1 << len) - 1))
1150 goto do_invalidate;
1151 } else {
1152 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001153 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001154 }
1155}
1156
bellard9fa3e852004-01-04 18:06:42 +00001157#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001158static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001159 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001160{
aliguori6b917542008-11-18 19:46:41 +00001161 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001162 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001163 int n;
bellardd720b932004-04-25 17:57:43 +00001164#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001165 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001166 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001167 int current_tb_modified = 0;
1168 target_ulong current_pc = 0;
1169 target_ulong current_cs_base = 0;
1170 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001171#endif
bellard9fa3e852004-01-04 18:06:42 +00001172
1173 addr &= TARGET_PAGE_MASK;
1174 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001175 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001176 return;
1177 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001178#ifdef TARGET_HAS_PRECISE_SMC
1179 if (tb && pc != 0) {
1180 current_tb = tb_find_pc(pc);
1181 }
1182#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001183 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001184 n = (long)tb & 3;
1185 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001186#ifdef TARGET_HAS_PRECISE_SMC
1187 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001188 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001189 /* If we are modifying the current TB, we must stop
1190 its execution. We could be more precise by checking
1191 that the modification is after the current PC, but it
1192 would require a specialized function to partially
1193 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001194
bellardd720b932004-04-25 17:57:43 +00001195 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001196 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001197 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1198 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001199 }
1200#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001201 tb_phys_invalidate(tb, addr);
1202 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001203 }
1204 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001205#ifdef TARGET_HAS_PRECISE_SMC
1206 if (current_tb_modified) {
1207 /* we generate a block containing just the instruction
1208 modifying the memory. It will ensure that it cannot modify
1209 itself */
bellardea1c1802004-06-14 18:56:36 +00001210 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001211 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001212 cpu_resume_from_signal(env, puc);
1213 }
1214#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001215}
bellard9fa3e852004-01-04 18:06:42 +00001216#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001217
1218/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001219static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001220 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001221{
1222 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001223#ifndef CONFIG_USER_ONLY
1224 bool page_already_protected;
1225#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001226
bellard9fa3e852004-01-04 18:06:42 +00001227 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001228 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001229 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001230#ifndef CONFIG_USER_ONLY
1231 page_already_protected = p->first_tb != NULL;
1232#endif
bellard9fa3e852004-01-04 18:06:42 +00001233 p->first_tb = (TranslationBlock *)((long)tb | n);
1234 invalidate_page_bitmap(p);
1235
bellard107db442004-06-22 18:48:46 +00001236#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001237
bellard9fa3e852004-01-04 18:06:42 +00001238#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001239 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001240 target_ulong addr;
1241 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001242 int prot;
1243
bellardfd6ce8f2003-05-14 19:00:11 +00001244 /* force the host page as non writable (writes will have a
1245 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001246 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001247 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001248 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1249 addr += TARGET_PAGE_SIZE) {
1250
1251 p2 = page_find (addr >> TARGET_PAGE_BITS);
1252 if (!p2)
1253 continue;
1254 prot |= p2->flags;
1255 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001256 }
ths5fafdf22007-09-16 21:08:06 +00001257 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001258 (prot & PAGE_BITS) & ~PAGE_WRITE);
1259#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001260 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001261 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001262#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001263 }
bellard9fa3e852004-01-04 18:06:42 +00001264#else
1265 /* if some code is already present, then the pages are already
1266 protected. So we handle the case where only the first TB is
1267 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001268 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001269 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001270 }
1271#endif
bellardd720b932004-04-25 17:57:43 +00001272
1273#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001274}
1275
bellard9fa3e852004-01-04 18:06:42 +00001276/* add a new TB and link it to the physical page tables. phys_page2 is
1277 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001278void tb_link_page(TranslationBlock *tb,
1279 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001280{
bellard9fa3e852004-01-04 18:06:42 +00001281 unsigned int h;
1282 TranslationBlock **ptb;
1283
pbrookc8a706f2008-06-02 16:16:42 +00001284 /* Grab the mmap lock to stop another thread invalidating this TB
1285 before we are done. */
1286 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001287 /* add in the physical hash table */
1288 h = tb_phys_hash_func(phys_pc);
1289 ptb = &tb_phys_hash[h];
1290 tb->phys_hash_next = *ptb;
1291 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001292
1293 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001294 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1295 if (phys_page2 != -1)
1296 tb_alloc_page(tb, 1, phys_page2);
1297 else
1298 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001299
bellardd4e81642003-05-25 16:46:15 +00001300 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1301 tb->jmp_next[0] = NULL;
1302 tb->jmp_next[1] = NULL;
1303
1304 /* init original jump addresses */
1305 if (tb->tb_next_offset[0] != 0xffff)
1306 tb_reset_jump(tb, 0);
1307 if (tb->tb_next_offset[1] != 0xffff)
1308 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001309
1310#ifdef DEBUG_TB_CHECK
1311 tb_page_check();
1312#endif
pbrookc8a706f2008-06-02 16:16:42 +00001313 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001314}
1315
bellarda513fe12003-05-27 23:29:48 +00001316/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1317 tb[1].tc_ptr. Return NULL if not found */
1318TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1319{
1320 int m_min, m_max, m;
1321 unsigned long v;
1322 TranslationBlock *tb;
1323
1324 if (nb_tbs <= 0)
1325 return NULL;
1326 if (tc_ptr < (unsigned long)code_gen_buffer ||
1327 tc_ptr >= (unsigned long)code_gen_ptr)
1328 return NULL;
1329 /* binary search (cf Knuth) */
1330 m_min = 0;
1331 m_max = nb_tbs - 1;
1332 while (m_min <= m_max) {
1333 m = (m_min + m_max) >> 1;
1334 tb = &tbs[m];
1335 v = (unsigned long)tb->tc_ptr;
1336 if (v == tc_ptr)
1337 return tb;
1338 else if (tc_ptr < v) {
1339 m_max = m - 1;
1340 } else {
1341 m_min = m + 1;
1342 }
ths5fafdf22007-09-16 21:08:06 +00001343 }
bellarda513fe12003-05-27 23:29:48 +00001344 return &tbs[m_max];
1345}
bellard75012672003-06-21 13:11:07 +00001346
bellardea041c02003-06-25 16:16:50 +00001347static void tb_reset_jump_recursive(TranslationBlock *tb);
1348
1349static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1350{
1351 TranslationBlock *tb1, *tb_next, **ptb;
1352 unsigned int n1;
1353
1354 tb1 = tb->jmp_next[n];
1355 if (tb1 != NULL) {
1356 /* find head of list */
1357 for(;;) {
1358 n1 = (long)tb1 & 3;
1359 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1360 if (n1 == 2)
1361 break;
1362 tb1 = tb1->jmp_next[n1];
1363 }
1364 /* we are now sure now that tb jumps to tb1 */
1365 tb_next = tb1;
1366
1367 /* remove tb from the jmp_first list */
1368 ptb = &tb_next->jmp_first;
1369 for(;;) {
1370 tb1 = *ptb;
1371 n1 = (long)tb1 & 3;
1372 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1373 if (n1 == n && tb1 == tb)
1374 break;
1375 ptb = &tb1->jmp_next[n1];
1376 }
1377 *ptb = tb->jmp_next[n];
1378 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001379
bellardea041c02003-06-25 16:16:50 +00001380 /* suppress the jump to next tb in generated code */
1381 tb_reset_jump(tb, n);
1382
bellard01243112004-01-04 15:48:17 +00001383 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001384 tb_reset_jump_recursive(tb_next);
1385 }
1386}
1387
1388static void tb_reset_jump_recursive(TranslationBlock *tb)
1389{
1390 tb_reset_jump_recursive2(tb, 0);
1391 tb_reset_jump_recursive2(tb, 1);
1392}
1393
bellard1fddef42005-04-17 19:16:13 +00001394#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001395#if defined(CONFIG_USER_ONLY)
1396static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1397{
1398 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1399}
1400#else
bellardd720b932004-04-25 17:57:43 +00001401static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1402{
Anthony Liguoric227f092009-10-01 16:12:16 -05001403 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001404 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001405 ram_addr_t ram_addr;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001406 PhysPageDesc p;
bellardd720b932004-04-25 17:57:43 +00001407
pbrookc2f07f82006-04-08 17:14:56 +00001408 addr = cpu_get_phys_page_debug(env, pc);
1409 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001410 pd = p.phys_offset;
pbrookc2f07f82006-04-08 17:14:56 +00001411 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001412 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001413}
bellardc27004e2005-01-03 23:35:10 +00001414#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001415#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001416
Paul Brookc527ee82010-03-01 03:31:14 +00001417#if defined(CONFIG_USER_ONLY)
1418void cpu_watchpoint_remove_all(CPUState *env, int mask)
1419
1420{
1421}
1422
1423int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1424 int flags, CPUWatchpoint **watchpoint)
1425{
1426 return -ENOSYS;
1427}
1428#else
pbrook6658ffb2007-03-16 23:58:11 +00001429/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001430int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1431 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001432{
aliguorib4051332008-11-18 20:14:20 +00001433 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001434 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001435
aliguorib4051332008-11-18 20:14:20 +00001436 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1437 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1438 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1439 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1440 return -EINVAL;
1441 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001442 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001443
aliguoria1d1bb32008-11-18 20:07:32 +00001444 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001445 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001446 wp->flags = flags;
1447
aliguori2dc9f412008-11-18 20:56:59 +00001448 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001449 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001450 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001451 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001452 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001453
pbrook6658ffb2007-03-16 23:58:11 +00001454 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001455
1456 if (watchpoint)
1457 *watchpoint = wp;
1458 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001459}
1460
aliguoria1d1bb32008-11-18 20:07:32 +00001461/* Remove a specific watchpoint. */
1462int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1463 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001464{
aliguorib4051332008-11-18 20:14:20 +00001465 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001466 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001467
Blue Swirl72cf2d42009-09-12 07:36:22 +00001468 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001469 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001470 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001471 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001472 return 0;
1473 }
1474 }
aliguoria1d1bb32008-11-18 20:07:32 +00001475 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001476}
1477
aliguoria1d1bb32008-11-18 20:07:32 +00001478/* Remove a specific watchpoint by reference. */
1479void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1480{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001481 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001482
aliguoria1d1bb32008-11-18 20:07:32 +00001483 tlb_flush_page(env, watchpoint->vaddr);
1484
Anthony Liguori7267c092011-08-20 22:09:37 -05001485 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001486}
1487
aliguoria1d1bb32008-11-18 20:07:32 +00001488/* Remove all matching watchpoints. */
1489void cpu_watchpoint_remove_all(CPUState *env, int mask)
1490{
aliguoric0ce9982008-11-25 22:13:57 +00001491 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001492
Blue Swirl72cf2d42009-09-12 07:36:22 +00001493 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001494 if (wp->flags & mask)
1495 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001496 }
aliguoria1d1bb32008-11-18 20:07:32 +00001497}
Paul Brookc527ee82010-03-01 03:31:14 +00001498#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001499
1500/* Add a breakpoint. */
1501int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1502 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001503{
bellard1fddef42005-04-17 19:16:13 +00001504#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001505 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001506
Anthony Liguori7267c092011-08-20 22:09:37 -05001507 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001508
1509 bp->pc = pc;
1510 bp->flags = flags;
1511
aliguori2dc9f412008-11-18 20:56:59 +00001512 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001513 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001514 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001515 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001516 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001517
1518 breakpoint_invalidate(env, pc);
1519
1520 if (breakpoint)
1521 *breakpoint = bp;
1522 return 0;
1523#else
1524 return -ENOSYS;
1525#endif
1526}
1527
1528/* Remove a specific breakpoint. */
1529int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1530{
1531#if defined(TARGET_HAS_ICE)
1532 CPUBreakpoint *bp;
1533
Blue Swirl72cf2d42009-09-12 07:36:22 +00001534 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001535 if (bp->pc == pc && bp->flags == flags) {
1536 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001537 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001538 }
bellard4c3a88a2003-07-26 12:06:08 +00001539 }
aliguoria1d1bb32008-11-18 20:07:32 +00001540 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001541#else
aliguoria1d1bb32008-11-18 20:07:32 +00001542 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001543#endif
1544}
1545
aliguoria1d1bb32008-11-18 20:07:32 +00001546/* Remove a specific breakpoint by reference. */
1547void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001548{
bellard1fddef42005-04-17 19:16:13 +00001549#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001550 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001551
aliguoria1d1bb32008-11-18 20:07:32 +00001552 breakpoint_invalidate(env, breakpoint->pc);
1553
Anthony Liguori7267c092011-08-20 22:09:37 -05001554 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001555#endif
1556}
1557
1558/* Remove all matching breakpoints. */
1559void cpu_breakpoint_remove_all(CPUState *env, int mask)
1560{
1561#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001562 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001563
Blue Swirl72cf2d42009-09-12 07:36:22 +00001564 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001565 if (bp->flags & mask)
1566 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001567 }
bellard4c3a88a2003-07-26 12:06:08 +00001568#endif
1569}
1570
bellardc33a3462003-07-29 20:50:33 +00001571/* enable or disable single step mode. EXCP_DEBUG is returned by the
1572 CPU loop after each instruction */
1573void cpu_single_step(CPUState *env, int enabled)
1574{
bellard1fddef42005-04-17 19:16:13 +00001575#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001576 if (env->singlestep_enabled != enabled) {
1577 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001578 if (kvm_enabled())
1579 kvm_update_guest_debug(env, 0);
1580 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001581 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001582 /* XXX: only flush what is necessary */
1583 tb_flush(env);
1584 }
bellardc33a3462003-07-29 20:50:33 +00001585 }
1586#endif
1587}
1588
bellard34865132003-10-05 14:28:56 +00001589/* enable or disable low levels log */
1590void cpu_set_log(int log_flags)
1591{
1592 loglevel = log_flags;
1593 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001594 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001595 if (!logfile) {
1596 perror(logfilename);
1597 _exit(1);
1598 }
bellard9fa3e852004-01-04 18:06:42 +00001599#if !defined(CONFIG_SOFTMMU)
1600 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1601 {
blueswir1b55266b2008-09-20 08:07:15 +00001602 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001603 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1604 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001605#elif defined(_WIN32)
1606 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1607 setvbuf(logfile, NULL, _IONBF, 0);
1608#else
bellard34865132003-10-05 14:28:56 +00001609 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001610#endif
pbrooke735b912007-06-30 13:53:24 +00001611 log_append = 1;
1612 }
1613 if (!loglevel && logfile) {
1614 fclose(logfile);
1615 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001616 }
1617}
1618
1619void cpu_set_log_filename(const char *filename)
1620{
1621 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001622 if (logfile) {
1623 fclose(logfile);
1624 logfile = NULL;
1625 }
1626 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001627}
bellardc33a3462003-07-29 20:50:33 +00001628
aurel323098dba2009-03-07 21:28:24 +00001629static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001630{
pbrookd5975362008-06-07 20:50:51 +00001631 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1632 problem and hope the cpu will stop of its own accord. For userspace
1633 emulation this often isn't actually as bad as it sounds. Often
1634 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001635 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001636 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001637
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001638 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001639 tb = env->current_tb;
1640 /* if the cpu is currently executing code, we must unlink it and
1641 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001642 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001643 env->current_tb = NULL;
1644 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001645 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001646 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001647}
1648
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001649#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001650/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001651static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001652{
1653 int old_mask;
1654
1655 old_mask = env->interrupt_request;
1656 env->interrupt_request |= mask;
1657
aliguori8edac962009-04-24 18:03:45 +00001658 /*
1659 * If called from iothread context, wake the target cpu in
1660 * case its halted.
1661 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001662 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001663 qemu_cpu_kick(env);
1664 return;
1665 }
aliguori8edac962009-04-24 18:03:45 +00001666
pbrook2e70f6e2008-06-29 01:03:05 +00001667 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001668 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001669 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001670 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001671 cpu_abort(env, "Raised interrupt while not in I/O function");
1672 }
pbrook2e70f6e2008-06-29 01:03:05 +00001673 } else {
aurel323098dba2009-03-07 21:28:24 +00001674 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001675 }
1676}
1677
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001678CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1679
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001680#else /* CONFIG_USER_ONLY */
1681
1682void cpu_interrupt(CPUState *env, int mask)
1683{
1684 env->interrupt_request |= mask;
1685 cpu_unlink_tb(env);
1686}
1687#endif /* CONFIG_USER_ONLY */
1688
bellardb54ad042004-05-20 13:42:52 +00001689void cpu_reset_interrupt(CPUState *env, int mask)
1690{
1691 env->interrupt_request &= ~mask;
1692}
1693
aurel323098dba2009-03-07 21:28:24 +00001694void cpu_exit(CPUState *env)
1695{
1696 env->exit_request = 1;
1697 cpu_unlink_tb(env);
1698}
1699
blueswir1c7cd6a32008-10-02 18:27:46 +00001700const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001701 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001702 "show generated host assembly code for each compiled TB" },
1703 { CPU_LOG_TB_IN_ASM, "in_asm",
1704 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001705 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001706 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001707 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001708 "show micro ops "
1709#ifdef TARGET_I386
1710 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001711#endif
blueswir1e01a1152008-03-14 17:37:11 +00001712 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001713 { CPU_LOG_INT, "int",
1714 "show interrupts/exceptions in short format" },
1715 { CPU_LOG_EXEC, "exec",
1716 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001717 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001718 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001719#ifdef TARGET_I386
1720 { CPU_LOG_PCALL, "pcall",
1721 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001722 { CPU_LOG_RESET, "cpu_reset",
1723 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001724#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001725#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001726 { CPU_LOG_IOPORT, "ioport",
1727 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001728#endif
bellardf193c792004-03-21 17:06:25 +00001729 { 0, NULL, NULL },
1730};
1731
1732static int cmp1(const char *s1, int n, const char *s2)
1733{
1734 if (strlen(s2) != n)
1735 return 0;
1736 return memcmp(s1, s2, n) == 0;
1737}
ths3b46e622007-09-17 08:09:54 +00001738
bellardf193c792004-03-21 17:06:25 +00001739/* takes a comma separated list of log masks. Return 0 if error. */
1740int cpu_str_to_log_mask(const char *str)
1741{
blueswir1c7cd6a32008-10-02 18:27:46 +00001742 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001743 int mask;
1744 const char *p, *p1;
1745
1746 p = str;
1747 mask = 0;
1748 for(;;) {
1749 p1 = strchr(p, ',');
1750 if (!p1)
1751 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001752 if(cmp1(p,p1-p,"all")) {
1753 for(item = cpu_log_items; item->mask != 0; item++) {
1754 mask |= item->mask;
1755 }
1756 } else {
1757 for(item = cpu_log_items; item->mask != 0; item++) {
1758 if (cmp1(p, p1 - p, item->name))
1759 goto found;
1760 }
1761 return 0;
bellardf193c792004-03-21 17:06:25 +00001762 }
bellardf193c792004-03-21 17:06:25 +00001763 found:
1764 mask |= item->mask;
1765 if (*p1 != ',')
1766 break;
1767 p = p1 + 1;
1768 }
1769 return mask;
1770}
bellardea041c02003-06-25 16:16:50 +00001771
bellard75012672003-06-21 13:11:07 +00001772void cpu_abort(CPUState *env, const char *fmt, ...)
1773{
1774 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001775 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001776
1777 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001778 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001779 fprintf(stderr, "qemu: fatal: ");
1780 vfprintf(stderr, fmt, ap);
1781 fprintf(stderr, "\n");
1782#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001783 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1784#else
1785 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001786#endif
aliguori93fcfe32009-01-15 22:34:14 +00001787 if (qemu_log_enabled()) {
1788 qemu_log("qemu: fatal: ");
1789 qemu_log_vprintf(fmt, ap2);
1790 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001791#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001792 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001793#else
aliguori93fcfe32009-01-15 22:34:14 +00001794 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001795#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001796 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001797 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001798 }
pbrook493ae1f2007-11-23 16:53:59 +00001799 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001800 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001801#if defined(CONFIG_USER_ONLY)
1802 {
1803 struct sigaction act;
1804 sigfillset(&act.sa_mask);
1805 act.sa_handler = SIG_DFL;
1806 sigaction(SIGABRT, &act, NULL);
1807 }
1808#endif
bellard75012672003-06-21 13:11:07 +00001809 abort();
1810}
1811
thsc5be9f02007-02-28 20:20:53 +00001812CPUState *cpu_copy(CPUState *env)
1813{
ths01ba9812007-12-09 02:22:57 +00001814 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001815 CPUState *next_cpu = new_env->next_cpu;
1816 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001817#if defined(TARGET_HAS_ICE)
1818 CPUBreakpoint *bp;
1819 CPUWatchpoint *wp;
1820#endif
1821
thsc5be9f02007-02-28 20:20:53 +00001822 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001823
1824 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001825 new_env->next_cpu = next_cpu;
1826 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001827
1828 /* Clone all break/watchpoints.
1829 Note: Once we support ptrace with hw-debug register access, make sure
1830 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001831 QTAILQ_INIT(&env->breakpoints);
1832 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001833#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001834 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001835 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1836 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001837 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001838 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1839 wp->flags, NULL);
1840 }
1841#endif
1842
thsc5be9f02007-02-28 20:20:53 +00001843 return new_env;
1844}
1845
bellard01243112004-01-04 15:48:17 +00001846#if !defined(CONFIG_USER_ONLY)
1847
edgar_igl5c751e92008-05-06 08:44:21 +00001848static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1849{
1850 unsigned int i;
1851
1852 /* Discard jump cache entries for any tb which might potentially
1853 overlap the flushed page. */
1854 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1855 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001856 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001857
1858 i = tb_jmp_cache_hash_page(addr);
1859 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001860 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001861}
1862
Igor Kovalenko08738982009-07-12 02:15:40 +04001863static CPUTLBEntry s_cputlb_empty_entry = {
1864 .addr_read = -1,
1865 .addr_write = -1,
1866 .addr_code = -1,
1867 .addend = -1,
1868};
1869
Peter Maydell771124e2012-01-17 13:23:13 +00001870/* NOTE:
1871 * If flush_global is true (the usual case), flush all tlb entries.
1872 * If flush_global is false, flush (at least) all tlb entries not
1873 * marked global.
1874 *
1875 * Since QEMU doesn't currently implement a global/not-global flag
1876 * for tlb entries, at the moment tlb_flush() will also flush all
1877 * tlb entries in the flush_global == false case. This is OK because
1878 * CPU architectures generally permit an implementation to drop
1879 * entries from the TLB at any time, so flushing more entries than
1880 * required is only an efficiency issue, not a correctness issue.
1881 */
bellardee8b7022004-02-03 23:35:10 +00001882void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001883{
bellard33417e72003-08-10 21:47:01 +00001884 int i;
bellard01243112004-01-04 15:48:17 +00001885
bellard9fa3e852004-01-04 18:06:42 +00001886#if defined(DEBUG_TLB)
1887 printf("tlb_flush:\n");
1888#endif
bellard01243112004-01-04 15:48:17 +00001889 /* must reset current TB so that interrupts cannot modify the
1890 links while we are modifying them */
1891 env->current_tb = NULL;
1892
bellard33417e72003-08-10 21:47:01 +00001893 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001894 int mmu_idx;
1895 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001896 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001897 }
bellard33417e72003-08-10 21:47:01 +00001898 }
bellard9fa3e852004-01-04 18:06:42 +00001899
bellard8a40a182005-11-20 10:35:40 +00001900 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001901
Paul Brookd4c430a2010-03-17 02:14:28 +00001902 env->tlb_flush_addr = -1;
1903 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001904 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001905}
1906
bellard274da6b2004-05-20 21:56:27 +00001907static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001908{
ths5fafdf22007-09-16 21:08:06 +00001909 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001910 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001911 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001912 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001913 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001914 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001915 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001916 }
bellard61382a52003-10-27 21:22:23 +00001917}
1918
bellard2e126692004-04-25 21:28:44 +00001919void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001920{
bellard8a40a182005-11-20 10:35:40 +00001921 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001922 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001923
bellard9fa3e852004-01-04 18:06:42 +00001924#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001925 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001926#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001927 /* Check if we need to flush due to large pages. */
1928 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1929#if defined(DEBUG_TLB)
1930 printf("tlb_flush_page: forced full flush ("
1931 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1932 env->tlb_flush_addr, env->tlb_flush_mask);
1933#endif
1934 tlb_flush(env, 1);
1935 return;
1936 }
bellard01243112004-01-04 15:48:17 +00001937 /* must reset current TB so that interrupts cannot modify the
1938 links while we are modifying them */
1939 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001940
bellard61382a52003-10-27 21:22:23 +00001941 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001942 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001943 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1944 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001945
edgar_igl5c751e92008-05-06 08:44:21 +00001946 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001947}
1948
bellard9fa3e852004-01-04 18:06:42 +00001949/* update the TLBs so that writes to code in the virtual page 'addr'
1950 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001951static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001952{
ths5fafdf22007-09-16 21:08:06 +00001953 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001954 ram_addr + TARGET_PAGE_SIZE,
1955 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001956}
1957
bellard9fa3e852004-01-04 18:06:42 +00001958/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001959 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001960static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001961 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001962{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001963 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00001964}
1965
ths5fafdf22007-09-16 21:08:06 +00001966static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001967 unsigned long start, unsigned long length)
1968{
1969 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001970 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00001971 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001972 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001973 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001974 }
1975 }
1976}
1977
pbrook5579c7f2009-04-11 14:47:08 +00001978/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001979void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001980 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001981{
1982 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001983 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001984 int i;
bellard1ccde1c2004-02-06 19:46:14 +00001985
1986 start &= TARGET_PAGE_MASK;
1987 end = TARGET_PAGE_ALIGN(end);
1988
1989 length = end - start;
1990 if (length == 0)
1991 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001992 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001993
bellard1ccde1c2004-02-06 19:46:14 +00001994 /* we modify the TLB cache so that the dirty bit will be set again
1995 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001996 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001997 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001998 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001999 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002000 != (end - 1) - start) {
2001 abort();
2002 }
2003
bellard6a00d602005-11-21 23:25:50 +00002004 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002005 int mmu_idx;
2006 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2007 for(i = 0; i < CPU_TLB_SIZE; i++)
2008 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2009 start1, length);
2010 }
bellard6a00d602005-11-21 23:25:50 +00002011 }
bellard1ccde1c2004-02-06 19:46:14 +00002012}
2013
aliguori74576192008-10-06 14:02:03 +00002014int cpu_physical_memory_set_dirty_tracking(int enable)
2015{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002016 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002017 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002018 return ret;
aliguori74576192008-10-06 14:02:03 +00002019}
2020
bellard3a7d9292005-08-21 09:26:42 +00002021static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2022{
Anthony Liguoric227f092009-10-01 16:12:16 -05002023 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002024 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002025
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002026 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002027 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2028 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002029 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002030 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002031 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002032 }
2033 }
2034}
2035
2036/* update the TLB according to the current state of the dirty bits */
2037void cpu_tlb_update_dirty(CPUState *env)
2038{
2039 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002040 int mmu_idx;
2041 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2042 for(i = 0; i < CPU_TLB_SIZE; i++)
2043 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2044 }
bellard3a7d9292005-08-21 09:26:42 +00002045}
2046
pbrook0f459d12008-06-09 00:20:13 +00002047static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002048{
pbrook0f459d12008-06-09 00:20:13 +00002049 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2050 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002051}
2052
pbrook0f459d12008-06-09 00:20:13 +00002053/* update the TLB corresponding to virtual page vaddr
2054 so that it is no longer dirty */
2055static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002056{
bellard1ccde1c2004-02-06 19:46:14 +00002057 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002058 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002059
pbrook0f459d12008-06-09 00:20:13 +00002060 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002061 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002062 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2063 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002064}
2065
Paul Brookd4c430a2010-03-17 02:14:28 +00002066/* Our TLB does not support large pages, so remember the area covered by
2067 large pages and trigger a full TLB flush if these are invalidated. */
2068static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2069 target_ulong size)
2070{
2071 target_ulong mask = ~(size - 1);
2072
2073 if (env->tlb_flush_addr == (target_ulong)-1) {
2074 env->tlb_flush_addr = vaddr & mask;
2075 env->tlb_flush_mask = mask;
2076 return;
2077 }
2078 /* Extend the existing region to include the new page.
2079 This is a compromise between unnecessary flushes and the cost
2080 of maintaining a full variable size TLB. */
2081 mask &= env->tlb_flush_mask;
2082 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2083 mask <<= 1;
2084 }
2085 env->tlb_flush_addr &= mask;
2086 env->tlb_flush_mask = mask;
2087}
2088
Avi Kivity1d393fa2012-01-01 21:15:42 +02002089static bool is_ram_rom(ram_addr_t pd)
2090{
2091 pd &= ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002092 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002093}
2094
Avi Kivity75c578d2012-01-02 15:40:52 +02002095static bool is_romd(ram_addr_t pd)
2096{
2097 MemoryRegion *mr;
2098
2099 pd &= ~TARGET_PAGE_MASK;
Avi Kivity11c7ef02012-01-02 17:21:07 +02002100 mr = io_mem_region[pd];
Avi Kivity75c578d2012-01-02 15:40:52 +02002101 return mr->rom_device && mr->readable;
2102}
2103
Avi Kivity1d393fa2012-01-01 21:15:42 +02002104static bool is_ram_rom_romd(ram_addr_t pd)
2105{
Avi Kivity75c578d2012-01-02 15:40:52 +02002106 return is_ram_rom(pd) || is_romd(pd);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002107}
2108
Paul Brookd4c430a2010-03-17 02:14:28 +00002109/* Add a new TLB entry. At most one entry for a given virtual address
2110 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2111 supplied size is only used by tlb_flush_page. */
2112void tlb_set_page(CPUState *env, target_ulong vaddr,
2113 target_phys_addr_t paddr, int prot,
2114 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002115{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002116 PhysPageDesc p;
bellard4f2ac232004-04-26 19:44:02 +00002117 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002118 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002119 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002120 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002121 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002122 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002123 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002124 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002125
Paul Brookd4c430a2010-03-17 02:14:28 +00002126 assert(size >= TARGET_PAGE_SIZE);
2127 if (size != TARGET_PAGE_SIZE) {
2128 tlb_add_large_page(env, vaddr, size);
2129 }
bellard92e873b2004-05-21 14:52:29 +00002130 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002131 pd = p.phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002132#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002133 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2134 " prot=%x idx=%d pd=0x%08lx\n",
2135 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002136#endif
2137
pbrook0f459d12008-06-09 00:20:13 +00002138 address = vaddr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002139 if (!is_ram_rom_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002140 /* IO memory case (romd handled later) */
2141 address |= TLB_MMIO;
2142 }
pbrook5579c7f2009-04-11 14:47:08 +00002143 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002144 if (is_ram_rom(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002145 /* Normal RAM. */
2146 iotlb = pd & TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002147 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2148 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002149 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002150 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002151 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002152 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002153 It would be nice to pass an offset from the base address
2154 of that region. This would avoid having to special case RAM,
2155 and avoid full address decoding in every device.
2156 We can't use the high bits of pd for this because
2157 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002158 iotlb = (pd & ~TARGET_PAGE_MASK);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002159 iotlb += p.region_offset;
pbrook0f459d12008-06-09 00:20:13 +00002160 }
pbrook6658ffb2007-03-16 23:58:11 +00002161
pbrook0f459d12008-06-09 00:20:13 +00002162 code_address = address;
2163 /* Make accesses to pages with watchpoints go via the
2164 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002165 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002166 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002167 /* Avoid trapping reads of pages with a write breakpoint. */
2168 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivity1ec9b902012-01-02 12:47:48 +02002169 iotlb = io_mem_watch.ram_addr + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002170 address |= TLB_MMIO;
2171 break;
2172 }
pbrook6658ffb2007-03-16 23:58:11 +00002173 }
pbrook0f459d12008-06-09 00:20:13 +00002174 }
balrogd79acba2007-06-26 20:01:13 +00002175
pbrook0f459d12008-06-09 00:20:13 +00002176 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2177 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2178 te = &env->tlb_table[mmu_idx][index];
2179 te->addend = addend - vaddr;
2180 if (prot & PAGE_READ) {
2181 te->addr_read = address;
2182 } else {
2183 te->addr_read = -1;
2184 }
edgar_igl5c751e92008-05-06 08:44:21 +00002185
pbrook0f459d12008-06-09 00:20:13 +00002186 if (prot & PAGE_EXEC) {
2187 te->addr_code = code_address;
2188 } else {
2189 te->addr_code = -1;
2190 }
2191 if (prot & PAGE_WRITE) {
Avi Kivity75c578d2012-01-02 15:40:52 +02002192 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr || is_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002193 /* Write access calls the I/O callback. */
2194 te->addr_write = address | TLB_MMIO;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002195 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
pbrook0f459d12008-06-09 00:20:13 +00002196 !cpu_physical_memory_is_dirty(pd)) {
2197 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002198 } else {
pbrook0f459d12008-06-09 00:20:13 +00002199 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002200 }
pbrook0f459d12008-06-09 00:20:13 +00002201 } else {
2202 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002203 }
bellard9fa3e852004-01-04 18:06:42 +00002204}
2205
bellard01243112004-01-04 15:48:17 +00002206#else
2207
bellardee8b7022004-02-03 23:35:10 +00002208void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002209{
2210}
2211
bellard2e126692004-04-25 21:28:44 +00002212void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002213{
2214}
2215
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002216/*
2217 * Walks guest process memory "regions" one by one
2218 * and calls callback function 'fn' for each region.
2219 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002220
2221struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002222{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002223 walk_memory_regions_fn fn;
2224 void *priv;
2225 unsigned long start;
2226 int prot;
2227};
bellard9fa3e852004-01-04 18:06:42 +00002228
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002229static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002230 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002231{
2232 if (data->start != -1ul) {
2233 int rc = data->fn(data->priv, data->start, end, data->prot);
2234 if (rc != 0) {
2235 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002236 }
bellard33417e72003-08-10 21:47:01 +00002237 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002238
2239 data->start = (new_prot ? end : -1ul);
2240 data->prot = new_prot;
2241
2242 return 0;
2243}
2244
2245static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002246 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002247{
Paul Brookb480d9b2010-03-12 23:23:29 +00002248 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002249 int i, rc;
2250
2251 if (*lp == NULL) {
2252 return walk_memory_regions_end(data, base, 0);
2253 }
2254
2255 if (level == 0) {
2256 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002257 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002258 int prot = pd[i].flags;
2259
2260 pa = base | (i << TARGET_PAGE_BITS);
2261 if (prot != data->prot) {
2262 rc = walk_memory_regions_end(data, pa, prot);
2263 if (rc != 0) {
2264 return rc;
2265 }
2266 }
2267 }
2268 } else {
2269 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002270 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002271 pa = base | ((abi_ulong)i <<
2272 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002273 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2274 if (rc != 0) {
2275 return rc;
2276 }
2277 }
2278 }
2279
2280 return 0;
2281}
2282
2283int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2284{
2285 struct walk_memory_regions_data data;
2286 unsigned long i;
2287
2288 data.fn = fn;
2289 data.priv = priv;
2290 data.start = -1ul;
2291 data.prot = 0;
2292
2293 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002294 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002295 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2296 if (rc != 0) {
2297 return rc;
2298 }
2299 }
2300
2301 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002302}
2303
Paul Brookb480d9b2010-03-12 23:23:29 +00002304static int dump_region(void *priv, abi_ulong start,
2305 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002306{
2307 FILE *f = (FILE *)priv;
2308
Paul Brookb480d9b2010-03-12 23:23:29 +00002309 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2310 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002311 start, end, end - start,
2312 ((prot & PAGE_READ) ? 'r' : '-'),
2313 ((prot & PAGE_WRITE) ? 'w' : '-'),
2314 ((prot & PAGE_EXEC) ? 'x' : '-'));
2315
2316 return (0);
2317}
2318
2319/* dump memory mappings */
2320void page_dump(FILE *f)
2321{
2322 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2323 "start", "end", "size", "prot");
2324 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002325}
2326
pbrook53a59602006-03-25 19:31:22 +00002327int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002328{
bellard9fa3e852004-01-04 18:06:42 +00002329 PageDesc *p;
2330
2331 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002332 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002333 return 0;
2334 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002335}
2336
Richard Henderson376a7902010-03-10 15:57:04 -08002337/* Modify the flags of a page and invalidate the code if necessary.
2338 The flag PAGE_WRITE_ORG is positioned automatically depending
2339 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002340void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002341{
Richard Henderson376a7902010-03-10 15:57:04 -08002342 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002343
Richard Henderson376a7902010-03-10 15:57:04 -08002344 /* This function should never be called with addresses outside the
2345 guest address space. If this assert fires, it probably indicates
2346 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002347#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2348 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002349#endif
2350 assert(start < end);
2351
bellard9fa3e852004-01-04 18:06:42 +00002352 start = start & TARGET_PAGE_MASK;
2353 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002354
2355 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002356 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002357 }
2358
2359 for (addr = start, len = end - start;
2360 len != 0;
2361 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2362 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2363
2364 /* If the write protection bit is set, then we invalidate
2365 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002366 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002367 (flags & PAGE_WRITE) &&
2368 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002369 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002370 }
2371 p->flags = flags;
2372 }
bellard9fa3e852004-01-04 18:06:42 +00002373}
2374
ths3d97b402007-11-02 19:02:07 +00002375int page_check_range(target_ulong start, target_ulong len, int flags)
2376{
2377 PageDesc *p;
2378 target_ulong end;
2379 target_ulong addr;
2380
Richard Henderson376a7902010-03-10 15:57:04 -08002381 /* This function should never be called with addresses outside the
2382 guest address space. If this assert fires, it probably indicates
2383 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002384#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2385 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002386#endif
2387
Richard Henderson3e0650a2010-03-29 10:54:42 -07002388 if (len == 0) {
2389 return 0;
2390 }
Richard Henderson376a7902010-03-10 15:57:04 -08002391 if (start + len - 1 < start) {
2392 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002393 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002394 }
balrog55f280c2008-10-28 10:24:11 +00002395
ths3d97b402007-11-02 19:02:07 +00002396 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2397 start = start & TARGET_PAGE_MASK;
2398
Richard Henderson376a7902010-03-10 15:57:04 -08002399 for (addr = start, len = end - start;
2400 len != 0;
2401 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002402 p = page_find(addr >> TARGET_PAGE_BITS);
2403 if( !p )
2404 return -1;
2405 if( !(p->flags & PAGE_VALID) )
2406 return -1;
2407
bellarddae32702007-11-14 10:51:00 +00002408 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002409 return -1;
bellarddae32702007-11-14 10:51:00 +00002410 if (flags & PAGE_WRITE) {
2411 if (!(p->flags & PAGE_WRITE_ORG))
2412 return -1;
2413 /* unprotect the page if it was put read-only because it
2414 contains translated code */
2415 if (!(p->flags & PAGE_WRITE)) {
2416 if (!page_unprotect(addr, 0, NULL))
2417 return -1;
2418 }
2419 return 0;
2420 }
ths3d97b402007-11-02 19:02:07 +00002421 }
2422 return 0;
2423}
2424
bellard9fa3e852004-01-04 18:06:42 +00002425/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002426 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002427int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002428{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002429 unsigned int prot;
2430 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002431 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002432
pbrookc8a706f2008-06-02 16:16:42 +00002433 /* Technically this isn't safe inside a signal handler. However we
2434 know this only ever happens in a synchronous SEGV handler, so in
2435 practice it seems to be ok. */
2436 mmap_lock();
2437
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002438 p = page_find(address >> TARGET_PAGE_BITS);
2439 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002440 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002441 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002442 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002443
bellard9fa3e852004-01-04 18:06:42 +00002444 /* if the page was really writable, then we change its
2445 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002446 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2447 host_start = address & qemu_host_page_mask;
2448 host_end = host_start + qemu_host_page_size;
2449
2450 prot = 0;
2451 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2452 p = page_find(addr >> TARGET_PAGE_BITS);
2453 p->flags |= PAGE_WRITE;
2454 prot |= p->flags;
2455
bellard9fa3e852004-01-04 18:06:42 +00002456 /* and since the content will be modified, we must invalidate
2457 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002458 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002459#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002460 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002461#endif
bellard9fa3e852004-01-04 18:06:42 +00002462 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002463 mprotect((void *)g2h(host_start), qemu_host_page_size,
2464 prot & PAGE_BITS);
2465
2466 mmap_unlock();
2467 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002468 }
pbrookc8a706f2008-06-02 16:16:42 +00002469 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002470 return 0;
2471}
2472
bellard6a00d602005-11-21 23:25:50 +00002473static inline void tlb_set_dirty(CPUState *env,
2474 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002475{
2476}
bellard9fa3e852004-01-04 18:06:42 +00002477#endif /* defined(CONFIG_USER_ONLY) */
2478
pbrooke2eef172008-06-08 01:09:01 +00002479#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002480
Paul Brookc04b2b72010-03-01 03:31:14 +00002481#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2482typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002483 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002484 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002485 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2486 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002487} subpage_t;
2488
Anthony Liguoric227f092009-10-01 16:12:16 -05002489static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2490 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002491static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2492 ram_addr_t orig_memory,
2493 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002494#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2495 need_subpage) \
2496 do { \
2497 if (addr > start_addr) \
2498 start_addr2 = 0; \
2499 else { \
2500 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2501 if (start_addr2 > 0) \
2502 need_subpage = 1; \
2503 } \
2504 \
blueswir149e9fba2007-05-30 17:25:06 +00002505 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002506 end_addr2 = TARGET_PAGE_SIZE - 1; \
2507 else { \
2508 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2509 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2510 need_subpage = 1; \
2511 } \
2512 } while (0)
2513
Avi Kivity54688b12012-02-09 17:34:32 +02002514static void destroy_page_desc(PhysPageDesc pd)
2515{
2516 unsigned io_index = pd.phys_offset & ~TARGET_PAGE_MASK;
2517 MemoryRegion *mr = io_mem_region[io_index];
2518
2519 if (mr->subpage) {
2520 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2521 memory_region_destroy(&subpage->iomem);
2522 g_free(subpage);
2523 }
2524}
2525
2526static void destroy_l2_mapping(void **lp, unsigned level)
2527{
2528 unsigned i;
2529 void **p;
2530 PhysPageDesc *pd;
2531
2532 if (!*lp) {
2533 return;
2534 }
2535
2536 if (level > 0) {
2537 p = *lp;
2538 for (i = 0; i < L2_SIZE; ++i) {
2539 destroy_l2_mapping(&p[i], level - 1);
2540 }
2541 g_free(p);
2542 } else {
2543 pd = *lp;
2544 for (i = 0; i < L2_SIZE; ++i) {
2545 destroy_page_desc(pd[i]);
2546 }
2547 g_free(pd);
2548 }
2549 *lp = NULL;
2550}
2551
2552static void destroy_all_mappings(void)
2553{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002554 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivity54688b12012-02-09 17:34:32 +02002555}
2556
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002557/* register physical memory.
2558 For RAM, 'size' must be a multiple of the target page size.
2559 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002560 io memory page. The address used when calling the IO function is
2561 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002562 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002563 before calculating this offset. This should not be a problem unless
2564 the low bits of start_addr and region_offset differ. */
Avi Kivitydd811242012-01-02 12:17:03 +02002565void cpu_register_physical_memory_log(MemoryRegionSection *section,
Avi Kivityd7ec83e2012-02-08 17:07:26 +02002566 bool readonly)
bellard33417e72003-08-10 21:47:01 +00002567{
Avi Kivitydd811242012-01-02 12:17:03 +02002568 target_phys_addr_t start_addr = section->offset_within_address_space;
2569 ram_addr_t size = section->size;
2570 ram_addr_t phys_offset = section->mr->ram_addr;
2571 ram_addr_t region_offset = section->offset_within_region;
Anthony Liguoric227f092009-10-01 16:12:16 -05002572 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002573 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002574 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002575 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002576 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002577
Avi Kivitydd811242012-01-02 12:17:03 +02002578 if (memory_region_is_ram(section->mr)) {
2579 phys_offset += region_offset;
2580 region_offset = 0;
2581 }
2582
Avi Kivitydd811242012-01-02 12:17:03 +02002583 if (readonly) {
2584 phys_offset |= io_mem_rom.ram_addr;
2585 }
2586
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002587 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002588
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002589 if (phys_offset == io_mem_unassigned.ram_addr) {
pbrook67c4d232009-02-23 13:16:07 +00002590 region_offset = start_addr;
2591 }
pbrook8da3ff12008-12-01 18:59:50 +00002592 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002593 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002594 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002595
2596 addr = start_addr;
2597 do {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002598 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002599 if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002600 ram_addr_t orig_memory = p->phys_offset;
2601 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002602 int need_subpage = 0;
Avi Kivity11c7ef02012-01-02 17:21:07 +02002603 MemoryRegion *mr = io_mem_region[orig_memory & ~TARGET_PAGE_MASK];
blueswir1db7b5422007-05-26 17:36:03 +00002604
2605 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2606 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002607 if (need_subpage) {
Avi Kivityb3b00c72012-01-02 13:20:11 +02002608 if (!(mr->subpage)) {
blueswir1db7b5422007-05-26 17:36:03 +00002609 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002610 &p->phys_offset, orig_memory,
2611 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002612 } else {
Avi Kivitya621f382012-01-02 13:12:08 +02002613 subpage = container_of(mr, subpage_t, iomem);
blueswir1db7b5422007-05-26 17:36:03 +00002614 }
pbrook8da3ff12008-12-01 18:59:50 +00002615 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2616 region_offset);
2617 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002618 } else {
2619 p->phys_offset = phys_offset;
Avi Kivity2774c6d2012-01-01 18:24:24 +02002620 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002621 if (is_ram_rom_romd(phys_offset))
blueswir1db7b5422007-05-26 17:36:03 +00002622 phys_offset += TARGET_PAGE_SIZE;
2623 }
2624 } else {
2625 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2626 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002627 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002628 if (is_ram_rom_romd(phys_offset)) {
blueswir1db7b5422007-05-26 17:36:03 +00002629 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002630 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002631 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002632 int need_subpage = 0;
2633
2634 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2635 end_addr2, need_subpage);
2636
Richard Hendersonf6405242010-04-22 16:47:31 -07002637 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002638 subpage = subpage_init((addr & TARGET_PAGE_MASK),
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002639 &p->phys_offset,
2640 io_mem_unassigned.ram_addr,
pbrook67c4d232009-02-23 13:16:07 +00002641 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002642 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002643 phys_offset, region_offset);
2644 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002645 }
2646 }
2647 }
pbrook8da3ff12008-12-01 18:59:50 +00002648 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002649 addr += TARGET_PAGE_SIZE;
2650 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002651
bellard9d420372006-06-25 22:25:22 +00002652 /* since each CPU stores ram addresses in its TLB cache, we must
2653 reset the modified entries */
2654 /* XXX: slow ! */
2655 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2656 tlb_flush(env, 1);
2657 }
bellard33417e72003-08-10 21:47:01 +00002658}
2659
Anthony Liguoric227f092009-10-01 16:12:16 -05002660void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002661{
2662 if (kvm_enabled())
2663 kvm_coalesce_mmio_region(addr, size);
2664}
2665
Anthony Liguoric227f092009-10-01 16:12:16 -05002666void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002667{
2668 if (kvm_enabled())
2669 kvm_uncoalesce_mmio_region(addr, size);
2670}
2671
Sheng Yang62a27442010-01-26 19:21:16 +08002672void qemu_flush_coalesced_mmio_buffer(void)
2673{
2674 if (kvm_enabled())
2675 kvm_flush_coalesced_mmio_buffer();
2676}
2677
Marcelo Tosattic9027602010-03-01 20:25:08 -03002678#if defined(__linux__) && !defined(TARGET_S390X)
2679
2680#include <sys/vfs.h>
2681
2682#define HUGETLBFS_MAGIC 0x958458f6
2683
2684static long gethugepagesize(const char *path)
2685{
2686 struct statfs fs;
2687 int ret;
2688
2689 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002690 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002691 } while (ret != 0 && errno == EINTR);
2692
2693 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002694 perror(path);
2695 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002696 }
2697
2698 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002699 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002700
2701 return fs.f_bsize;
2702}
2703
Alex Williamson04b16652010-07-02 11:13:17 -06002704static void *file_ram_alloc(RAMBlock *block,
2705 ram_addr_t memory,
2706 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002707{
2708 char *filename;
2709 void *area;
2710 int fd;
2711#ifdef MAP_POPULATE
2712 int flags;
2713#endif
2714 unsigned long hpagesize;
2715
2716 hpagesize = gethugepagesize(path);
2717 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002718 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002719 }
2720
2721 if (memory < hpagesize) {
2722 return NULL;
2723 }
2724
2725 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2726 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2727 return NULL;
2728 }
2729
2730 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002731 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002732 }
2733
2734 fd = mkstemp(filename);
2735 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002736 perror("unable to create backing store for hugepages");
2737 free(filename);
2738 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002739 }
2740 unlink(filename);
2741 free(filename);
2742
2743 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2744
2745 /*
2746 * ftruncate is not supported by hugetlbfs in older
2747 * hosts, so don't bother bailing out on errors.
2748 * If anything goes wrong with it under other filesystems,
2749 * mmap will fail.
2750 */
2751 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002752 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002753
2754#ifdef MAP_POPULATE
2755 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2756 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2757 * to sidestep this quirk.
2758 */
2759 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2760 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2761#else
2762 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2763#endif
2764 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002765 perror("file_ram_alloc: can't mmap RAM pages");
2766 close(fd);
2767 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002768 }
Alex Williamson04b16652010-07-02 11:13:17 -06002769 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002770 return area;
2771}
2772#endif
2773
Alex Williamsond17b5282010-06-25 11:08:38 -06002774static ram_addr_t find_ram_offset(ram_addr_t size)
2775{
Alex Williamson04b16652010-07-02 11:13:17 -06002776 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002777 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002778
2779 if (QLIST_EMPTY(&ram_list.blocks))
2780 return 0;
2781
2782 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002783 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002784
2785 end = block->offset + block->length;
2786
2787 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2788 if (next_block->offset >= end) {
2789 next = MIN(next, next_block->offset);
2790 }
2791 }
2792 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002793 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002794 mingap = next - end;
2795 }
2796 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002797
2798 if (offset == RAM_ADDR_MAX) {
2799 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2800 (uint64_t)size);
2801 abort();
2802 }
2803
Alex Williamson04b16652010-07-02 11:13:17 -06002804 return offset;
2805}
2806
2807static ram_addr_t last_ram_offset(void)
2808{
Alex Williamsond17b5282010-06-25 11:08:38 -06002809 RAMBlock *block;
2810 ram_addr_t last = 0;
2811
2812 QLIST_FOREACH(block, &ram_list.blocks, next)
2813 last = MAX(last, block->offset + block->length);
2814
2815 return last;
2816}
2817
Avi Kivityc5705a72011-12-20 15:59:12 +02002818void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002819{
2820 RAMBlock *new_block, *block;
2821
Avi Kivityc5705a72011-12-20 15:59:12 +02002822 new_block = NULL;
2823 QLIST_FOREACH(block, &ram_list.blocks, next) {
2824 if (block->offset == addr) {
2825 new_block = block;
2826 break;
2827 }
2828 }
2829 assert(new_block);
2830 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002831
2832 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2833 char *id = dev->parent_bus->info->get_dev_path(dev);
2834 if (id) {
2835 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002836 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002837 }
2838 }
2839 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2840
2841 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002842 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002843 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2844 new_block->idstr);
2845 abort();
2846 }
2847 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002848}
2849
2850ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2851 MemoryRegion *mr)
2852{
2853 RAMBlock *new_block;
2854
2855 size = TARGET_PAGE_ALIGN(size);
2856 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002857
Avi Kivity7c637362011-12-21 13:09:49 +02002858 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002859 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002860 if (host) {
2861 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002862 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002863 } else {
2864 if (mem_path) {
2865#if defined (__linux__) && !defined(TARGET_S390X)
2866 new_block->host = file_ram_alloc(new_block, size, mem_path);
2867 if (!new_block->host) {
2868 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002869 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002870 }
2871#else
2872 fprintf(stderr, "-mem-path option unsupported\n");
2873 exit(1);
2874#endif
2875 } else {
2876#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002877 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2878 an system defined value, which is at least 256GB. Larger systems
2879 have larger values. We put the guest between the end of data
2880 segment (system break) and this value. We use 32GB as a base to
2881 have enough room for the system break to grow. */
2882 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002883 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002884 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002885 if (new_block->host == MAP_FAILED) {
2886 fprintf(stderr, "Allocating RAM failed\n");
2887 abort();
2888 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002889#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002890 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002891 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002892 } else {
2893 new_block->host = qemu_vmalloc(size);
2894 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002895#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002896 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002897 }
2898 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002899 new_block->length = size;
2900
2901 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2902
Anthony Liguori7267c092011-08-20 22:09:37 -05002903 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002904 last_ram_offset() >> TARGET_PAGE_BITS);
2905 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2906 0xff, size >> TARGET_PAGE_BITS);
2907
2908 if (kvm_enabled())
2909 kvm_setup_guest_memory(new_block->host, size);
2910
2911 return new_block->offset;
2912}
2913
Avi Kivityc5705a72011-12-20 15:59:12 +02002914ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002915{
Avi Kivityc5705a72011-12-20 15:59:12 +02002916 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002917}
bellarde9a1ab12007-02-08 23:08:38 +00002918
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002919void qemu_ram_free_from_ptr(ram_addr_t addr)
2920{
2921 RAMBlock *block;
2922
2923 QLIST_FOREACH(block, &ram_list.blocks, next) {
2924 if (addr == block->offset) {
2925 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002926 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002927 return;
2928 }
2929 }
2930}
2931
Anthony Liguoric227f092009-10-01 16:12:16 -05002932void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002933{
Alex Williamson04b16652010-07-02 11:13:17 -06002934 RAMBlock *block;
2935
2936 QLIST_FOREACH(block, &ram_list.blocks, next) {
2937 if (addr == block->offset) {
2938 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002939 if (block->flags & RAM_PREALLOC_MASK) {
2940 ;
2941 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002942#if defined (__linux__) && !defined(TARGET_S390X)
2943 if (block->fd) {
2944 munmap(block->host, block->length);
2945 close(block->fd);
2946 } else {
2947 qemu_vfree(block->host);
2948 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002949#else
2950 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002951#endif
2952 } else {
2953#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2954 munmap(block->host, block->length);
2955#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002956 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002957 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002958 } else {
2959 qemu_vfree(block->host);
2960 }
Alex Williamson04b16652010-07-02 11:13:17 -06002961#endif
2962 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002963 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002964 return;
2965 }
2966 }
2967
bellarde9a1ab12007-02-08 23:08:38 +00002968}
2969
Huang Yingcd19cfa2011-03-02 08:56:19 +01002970#ifndef _WIN32
2971void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2972{
2973 RAMBlock *block;
2974 ram_addr_t offset;
2975 int flags;
2976 void *area, *vaddr;
2977
2978 QLIST_FOREACH(block, &ram_list.blocks, next) {
2979 offset = addr - block->offset;
2980 if (offset < block->length) {
2981 vaddr = block->host + offset;
2982 if (block->flags & RAM_PREALLOC_MASK) {
2983 ;
2984 } else {
2985 flags = MAP_FIXED;
2986 munmap(vaddr, length);
2987 if (mem_path) {
2988#if defined(__linux__) && !defined(TARGET_S390X)
2989 if (block->fd) {
2990#ifdef MAP_POPULATE
2991 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2992 MAP_PRIVATE;
2993#else
2994 flags |= MAP_PRIVATE;
2995#endif
2996 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2997 flags, block->fd, offset);
2998 } else {
2999 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3000 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3001 flags, -1, 0);
3002 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003003#else
3004 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003005#endif
3006 } else {
3007#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3008 flags |= MAP_SHARED | MAP_ANONYMOUS;
3009 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3010 flags, -1, 0);
3011#else
3012 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3013 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3014 flags, -1, 0);
3015#endif
3016 }
3017 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003018 fprintf(stderr, "Could not remap addr: "
3019 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003020 length, addr);
3021 exit(1);
3022 }
3023 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3024 }
3025 return;
3026 }
3027 }
3028}
3029#endif /* !_WIN32 */
3030
pbrookdc828ca2009-04-09 22:21:07 +00003031/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003032 With the exception of the softmmu code in this file, this should
3033 only be used for local memory (e.g. video ram) that the device owns,
3034 and knows it isn't going to access beyond the end of the block.
3035
3036 It should not be used for general purpose DMA.
3037 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3038 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003039void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003040{
pbrook94a6b542009-04-11 17:15:54 +00003041 RAMBlock *block;
3042
Alex Williamsonf471a172010-06-11 11:11:42 -06003043 QLIST_FOREACH(block, &ram_list.blocks, next) {
3044 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003045 /* Move this entry to to start of the list. */
3046 if (block != QLIST_FIRST(&ram_list.blocks)) {
3047 QLIST_REMOVE(block, next);
3048 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3049 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003050 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003051 /* We need to check if the requested address is in the RAM
3052 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003053 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003054 */
3055 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003056 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003057 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003058 block->host =
3059 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003060 }
3061 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003062 return block->host + (addr - block->offset);
3063 }
pbrook94a6b542009-04-11 17:15:54 +00003064 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003065
3066 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3067 abort();
3068
3069 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003070}
3071
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003072/* Return a host pointer to ram allocated with qemu_ram_alloc.
3073 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3074 */
3075void *qemu_safe_ram_ptr(ram_addr_t addr)
3076{
3077 RAMBlock *block;
3078
3079 QLIST_FOREACH(block, &ram_list.blocks, next) {
3080 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003081 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003082 /* We need to check if the requested address is in the RAM
3083 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003084 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003085 */
3086 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003087 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003088 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003089 block->host =
3090 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003091 }
3092 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003093 return block->host + (addr - block->offset);
3094 }
3095 }
3096
3097 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3098 abort();
3099
3100 return NULL;
3101}
3102
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003103/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3104 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003105void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003106{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003107 if (*size == 0) {
3108 return NULL;
3109 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003110 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003111 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003112 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003113 RAMBlock *block;
3114
3115 QLIST_FOREACH(block, &ram_list.blocks, next) {
3116 if (addr - block->offset < block->length) {
3117 if (addr - block->offset + *size > block->length)
3118 *size = block->length - addr + block->offset;
3119 return block->host + (addr - block->offset);
3120 }
3121 }
3122
3123 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3124 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003125 }
3126}
3127
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003128void qemu_put_ram_ptr(void *addr)
3129{
3130 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003131}
3132
Marcelo Tosattie8902612010-10-11 15:31:19 -03003133int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003134{
pbrook94a6b542009-04-11 17:15:54 +00003135 RAMBlock *block;
3136 uint8_t *host = ptr;
3137
Jan Kiszka868bb332011-06-21 22:59:09 +02003138 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003139 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003140 return 0;
3141 }
3142
Alex Williamsonf471a172010-06-11 11:11:42 -06003143 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003144 /* This case append when the block is not mapped. */
3145 if (block->host == NULL) {
3146 continue;
3147 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003148 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003149 *ram_addr = block->offset + (host - block->host);
3150 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003151 }
pbrook94a6b542009-04-11 17:15:54 +00003152 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003153
Marcelo Tosattie8902612010-10-11 15:31:19 -03003154 return -1;
3155}
Alex Williamsonf471a172010-06-11 11:11:42 -06003156
Marcelo Tosattie8902612010-10-11 15:31:19 -03003157/* Some of the softmmu routines need to translate from a host pointer
3158 (typically a TLB entry) back to a ram offset. */
3159ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3160{
3161 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003162
Marcelo Tosattie8902612010-10-11 15:31:19 -03003163 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3164 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3165 abort();
3166 }
3167 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003168}
3169
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003170static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3171 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003172{
pbrook67d3b952006-12-18 05:03:52 +00003173#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003174 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003175#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003176#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003177 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003178#endif
3179 return 0;
3180}
3181
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003182static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3183 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003184{
3185#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003186 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003187#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003188#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003189 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003190#endif
3191}
3192
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003193static const MemoryRegionOps unassigned_mem_ops = {
3194 .read = unassigned_mem_read,
3195 .write = unassigned_mem_write,
3196 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003197};
3198
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003199static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3200 unsigned size)
3201{
3202 abort();
3203}
3204
3205static void error_mem_write(void *opaque, target_phys_addr_t addr,
3206 uint64_t value, unsigned size)
3207{
3208 abort();
3209}
3210
3211static const MemoryRegionOps error_mem_ops = {
3212 .read = error_mem_read,
3213 .write = error_mem_write,
3214 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003215};
3216
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003217static const MemoryRegionOps rom_mem_ops = {
3218 .read = error_mem_read,
3219 .write = unassigned_mem_write,
3220 .endianness = DEVICE_NATIVE_ENDIAN,
3221};
3222
3223static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3224 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003225{
bellard3a7d9292005-08-21 09:26:42 +00003226 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003227 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003228 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3229#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003230 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003231 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003232#endif
3233 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003234 switch (size) {
3235 case 1:
3236 stb_p(qemu_get_ram_ptr(ram_addr), val);
3237 break;
3238 case 2:
3239 stw_p(qemu_get_ram_ptr(ram_addr), val);
3240 break;
3241 case 4:
3242 stl_p(qemu_get_ram_ptr(ram_addr), val);
3243 break;
3244 default:
3245 abort();
3246 }
bellardf23db162005-08-21 19:12:28 +00003247 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003248 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003249 /* we remove the notdirty callback only if the code has been
3250 flushed */
3251 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003252 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003253}
3254
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003255static const MemoryRegionOps notdirty_mem_ops = {
3256 .read = error_mem_read,
3257 .write = notdirty_mem_write,
3258 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003259};
3260
pbrook0f459d12008-06-09 00:20:13 +00003261/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003262static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003263{
3264 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003265 target_ulong pc, cs_base;
3266 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003267 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003268 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003269 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003270
aliguori06d55cc2008-11-18 20:24:06 +00003271 if (env->watchpoint_hit) {
3272 /* We re-entered the check after replacing the TB. Now raise
3273 * the debug interrupt so that is will trigger after the
3274 * current instruction. */
3275 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3276 return;
3277 }
pbrook2e70f6e2008-06-29 01:03:05 +00003278 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003279 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003280 if ((vaddr == (wp->vaddr & len_mask) ||
3281 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003282 wp->flags |= BP_WATCHPOINT_HIT;
3283 if (!env->watchpoint_hit) {
3284 env->watchpoint_hit = wp;
3285 tb = tb_find_pc(env->mem_io_pc);
3286 if (!tb) {
3287 cpu_abort(env, "check_watchpoint: could not find TB for "
3288 "pc=%p", (void *)env->mem_io_pc);
3289 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003290 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003291 tb_phys_invalidate(tb, -1);
3292 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3293 env->exception_index = EXCP_DEBUG;
3294 } else {
3295 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3296 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3297 }
3298 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003299 }
aliguori6e140f22008-11-18 20:37:55 +00003300 } else {
3301 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003302 }
3303 }
3304}
3305
pbrook6658ffb2007-03-16 23:58:11 +00003306/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3307 so these check for a hit then pass through to the normal out-of-line
3308 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003309static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3310 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003311{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003312 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3313 switch (size) {
3314 case 1: return ldub_phys(addr);
3315 case 2: return lduw_phys(addr);
3316 case 4: return ldl_phys(addr);
3317 default: abort();
3318 }
pbrook6658ffb2007-03-16 23:58:11 +00003319}
3320
Avi Kivity1ec9b902012-01-02 12:47:48 +02003321static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3322 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003323{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003324 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3325 switch (size) {
3326 case 1: stb_phys(addr, val);
3327 case 2: stw_phys(addr, val);
3328 case 4: stl_phys(addr, val);
3329 default: abort();
3330 }
pbrook6658ffb2007-03-16 23:58:11 +00003331}
3332
Avi Kivity1ec9b902012-01-02 12:47:48 +02003333static const MemoryRegionOps watch_mem_ops = {
3334 .read = watch_mem_read,
3335 .write = watch_mem_write,
3336 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003337};
pbrook6658ffb2007-03-16 23:58:11 +00003338
Avi Kivity70c68e42012-01-02 12:32:48 +02003339static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3340 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003341{
Avi Kivity70c68e42012-01-02 12:32:48 +02003342 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003343 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003344#if defined(DEBUG_SUBPAGE)
3345 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3346 mmio, len, addr, idx);
3347#endif
blueswir1db7b5422007-05-26 17:36:03 +00003348
Richard Hendersonf6405242010-04-22 16:47:31 -07003349 addr += mmio->region_offset[idx];
3350 idx = mmio->sub_io_index[idx];
Avi Kivity70c68e42012-01-02 12:32:48 +02003351 return io_mem_read(idx, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003352}
3353
Avi Kivity70c68e42012-01-02 12:32:48 +02003354static void subpage_write(void *opaque, target_phys_addr_t addr,
3355 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003356{
Avi Kivity70c68e42012-01-02 12:32:48 +02003357 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003358 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003359#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003360 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3361 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003362 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003363#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003364
3365 addr += mmio->region_offset[idx];
3366 idx = mmio->sub_io_index[idx];
Avi Kivity70c68e42012-01-02 12:32:48 +02003367 io_mem_write(idx, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003368}
3369
Avi Kivity70c68e42012-01-02 12:32:48 +02003370static const MemoryRegionOps subpage_ops = {
3371 .read = subpage_read,
3372 .write = subpage_write,
3373 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003374};
3375
Avi Kivityde712f92012-01-02 12:41:07 +02003376static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3377 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003378{
3379 ram_addr_t raddr = addr;
3380 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003381 switch (size) {
3382 case 1: return ldub_p(ptr);
3383 case 2: return lduw_p(ptr);
3384 case 4: return ldl_p(ptr);
3385 default: abort();
3386 }
Andreas Färber56384e82011-11-30 16:26:21 +01003387}
3388
Avi Kivityde712f92012-01-02 12:41:07 +02003389static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3390 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003391{
3392 ram_addr_t raddr = addr;
3393 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003394 switch (size) {
3395 case 1: return stb_p(ptr, value);
3396 case 2: return stw_p(ptr, value);
3397 case 4: return stl_p(ptr, value);
3398 default: abort();
3399 }
Andreas Färber56384e82011-11-30 16:26:21 +01003400}
3401
Avi Kivityde712f92012-01-02 12:41:07 +02003402static const MemoryRegionOps subpage_ram_ops = {
3403 .read = subpage_ram_read,
3404 .write = subpage_ram_write,
3405 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003406};
3407
Anthony Liguoric227f092009-10-01 16:12:16 -05003408static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3409 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003410{
3411 int idx, eidx;
3412
3413 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3414 return -1;
3415 idx = SUBPAGE_IDX(start);
3416 eidx = SUBPAGE_IDX(end);
3417#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003418 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003419 mmio, start, end, idx, eidx, memory);
3420#endif
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003421 if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
Avi Kivityde712f92012-01-02 12:41:07 +02003422 memory = io_mem_subpage_ram.ram_addr;
Andreas Färber56384e82011-11-30 16:26:21 +01003423 }
Avi Kivity11c7ef02012-01-02 17:21:07 +02003424 memory &= IO_MEM_NB_ENTRIES - 1;
blueswir1db7b5422007-05-26 17:36:03 +00003425 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003426 mmio->sub_io_index[idx] = memory;
3427 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003428 }
3429
3430 return 0;
3431}
3432
Richard Hendersonf6405242010-04-22 16:47:31 -07003433static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3434 ram_addr_t orig_memory,
3435 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003436{
Anthony Liguoric227f092009-10-01 16:12:16 -05003437 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003438 int subpage_memory;
3439
Anthony Liguori7267c092011-08-20 22:09:37 -05003440 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003441
3442 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003443 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3444 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003445 mmio->iomem.subpage = true;
Avi Kivity70c68e42012-01-02 12:32:48 +02003446 subpage_memory = mmio->iomem.ram_addr;
blueswir1db7b5422007-05-26 17:36:03 +00003447#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003448 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3449 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003450#endif
Avi Kivityb3b00c72012-01-02 13:20:11 +02003451 *phys = subpage_memory;
Richard Hendersonf6405242010-04-22 16:47:31 -07003452 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003453
3454 return mmio;
3455}
3456
aliguori88715652009-02-11 15:20:58 +00003457static int get_free_io_mem_idx(void)
3458{
3459 int i;
3460
3461 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3462 if (!io_mem_used[i]) {
3463 io_mem_used[i] = 1;
3464 return i;
3465 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003466 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003467 return -1;
3468}
3469
bellard33417e72003-08-10 21:47:01 +00003470/* mem_read and mem_write are arrays of functions containing the
3471 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003472 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003473 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003474 modified. If it is zero, a new io zone is allocated. The return
3475 value can be used with cpu_register_physical_memory(). (-1) is
3476 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003477static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003478{
bellard33417e72003-08-10 21:47:01 +00003479 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003480 io_index = get_free_io_mem_idx();
3481 if (io_index == -1)
3482 return io_index;
bellard33417e72003-08-10 21:47:01 +00003483 } else {
3484 if (io_index >= IO_MEM_NB_ENTRIES)
3485 return -1;
3486 }
bellardb5ff1b32005-11-26 10:38:39 +00003487
Avi Kivitya621f382012-01-02 13:12:08 +02003488 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003489
Avi Kivity11c7ef02012-01-02 17:21:07 +02003490 return io_index;
bellard33417e72003-08-10 21:47:01 +00003491}
bellard61382a52003-10-27 21:22:23 +00003492
Avi Kivitya621f382012-01-02 13:12:08 +02003493int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003494{
Avi Kivitya621f382012-01-02 13:12:08 +02003495 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003496}
3497
Avi Kivity11c7ef02012-01-02 17:21:07 +02003498void cpu_unregister_io_memory(int io_index)
aliguori88715652009-02-11 15:20:58 +00003499{
Avi Kivitya621f382012-01-02 13:12:08 +02003500 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003501 io_mem_used[io_index] = 0;
3502}
3503
Avi Kivitye9179ce2009-06-14 11:38:52 +03003504static void io_mem_init(void)
3505{
3506 int i;
3507
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003508 /* Must be first: */
3509 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3510 assert(io_mem_ram.ram_addr == 0);
3511 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3512 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3513 "unassigned", UINT64_MAX);
3514 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3515 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003516 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3517 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003518 for (i=0; i<5; i++)
3519 io_mem_used[i] = 1;
3520
Avi Kivity1ec9b902012-01-02 12:47:48 +02003521 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3522 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003523}
3524
Avi Kivity50c1e142012-02-08 21:36:02 +02003525static void core_begin(MemoryListener *listener)
3526{
Avi Kivity54688b12012-02-09 17:34:32 +02003527 destroy_all_mappings();
Avi Kivity50c1e142012-02-08 21:36:02 +02003528}
3529
3530static void core_commit(MemoryListener *listener)
3531{
3532}
3533
Avi Kivity93632742012-02-08 16:54:16 +02003534static void core_region_add(MemoryListener *listener,
3535 MemoryRegionSection *section)
3536{
Avi Kivity4855d412012-02-08 21:16:05 +02003537 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003538}
3539
3540static void core_region_del(MemoryListener *listener,
3541 MemoryRegionSection *section)
3542{
Avi Kivity93632742012-02-08 16:54:16 +02003543}
3544
Avi Kivity50c1e142012-02-08 21:36:02 +02003545static void core_region_nop(MemoryListener *listener,
3546 MemoryRegionSection *section)
3547{
Avi Kivity54688b12012-02-09 17:34:32 +02003548 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003549}
3550
Avi Kivity93632742012-02-08 16:54:16 +02003551static void core_log_start(MemoryListener *listener,
3552 MemoryRegionSection *section)
3553{
3554}
3555
3556static void core_log_stop(MemoryListener *listener,
3557 MemoryRegionSection *section)
3558{
3559}
3560
3561static void core_log_sync(MemoryListener *listener,
3562 MemoryRegionSection *section)
3563{
3564}
3565
3566static void core_log_global_start(MemoryListener *listener)
3567{
3568 cpu_physical_memory_set_dirty_tracking(1);
3569}
3570
3571static void core_log_global_stop(MemoryListener *listener)
3572{
3573 cpu_physical_memory_set_dirty_tracking(0);
3574}
3575
3576static void core_eventfd_add(MemoryListener *listener,
3577 MemoryRegionSection *section,
3578 bool match_data, uint64_t data, int fd)
3579{
3580}
3581
3582static void core_eventfd_del(MemoryListener *listener,
3583 MemoryRegionSection *section,
3584 bool match_data, uint64_t data, int fd)
3585{
3586}
3587
Avi Kivity50c1e142012-02-08 21:36:02 +02003588static void io_begin(MemoryListener *listener)
3589{
3590}
3591
3592static void io_commit(MemoryListener *listener)
3593{
3594}
3595
Avi Kivity4855d412012-02-08 21:16:05 +02003596static void io_region_add(MemoryListener *listener,
3597 MemoryRegionSection *section)
3598{
3599 iorange_init(&section->mr->iorange, &memory_region_iorange_ops,
3600 section->offset_within_address_space, section->size);
3601 ioport_register(&section->mr->iorange);
3602}
3603
3604static void io_region_del(MemoryListener *listener,
3605 MemoryRegionSection *section)
3606{
3607 isa_unassign_ioport(section->offset_within_address_space, section->size);
3608}
3609
Avi Kivity50c1e142012-02-08 21:36:02 +02003610static void io_region_nop(MemoryListener *listener,
3611 MemoryRegionSection *section)
3612{
3613}
3614
Avi Kivity4855d412012-02-08 21:16:05 +02003615static void io_log_start(MemoryListener *listener,
3616 MemoryRegionSection *section)
3617{
3618}
3619
3620static void io_log_stop(MemoryListener *listener,
3621 MemoryRegionSection *section)
3622{
3623}
3624
3625static void io_log_sync(MemoryListener *listener,
3626 MemoryRegionSection *section)
3627{
3628}
3629
3630static void io_log_global_start(MemoryListener *listener)
3631{
3632}
3633
3634static void io_log_global_stop(MemoryListener *listener)
3635{
3636}
3637
3638static void io_eventfd_add(MemoryListener *listener,
3639 MemoryRegionSection *section,
3640 bool match_data, uint64_t data, int fd)
3641{
3642}
3643
3644static void io_eventfd_del(MemoryListener *listener,
3645 MemoryRegionSection *section,
3646 bool match_data, uint64_t data, int fd)
3647{
3648}
3649
Avi Kivity93632742012-02-08 16:54:16 +02003650static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003651 .begin = core_begin,
3652 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003653 .region_add = core_region_add,
3654 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003655 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003656 .log_start = core_log_start,
3657 .log_stop = core_log_stop,
3658 .log_sync = core_log_sync,
3659 .log_global_start = core_log_global_start,
3660 .log_global_stop = core_log_global_stop,
3661 .eventfd_add = core_eventfd_add,
3662 .eventfd_del = core_eventfd_del,
3663 .priority = 0,
3664};
3665
Avi Kivity4855d412012-02-08 21:16:05 +02003666static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003667 .begin = io_begin,
3668 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003669 .region_add = io_region_add,
3670 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003671 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003672 .log_start = io_log_start,
3673 .log_stop = io_log_stop,
3674 .log_sync = io_log_sync,
3675 .log_global_start = io_log_global_start,
3676 .log_global_stop = io_log_global_stop,
3677 .eventfd_add = io_eventfd_add,
3678 .eventfd_del = io_eventfd_del,
3679 .priority = 0,
3680};
3681
Avi Kivity62152b82011-07-26 14:26:14 +03003682static void memory_map_init(void)
3683{
Anthony Liguori7267c092011-08-20 22:09:37 -05003684 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003685 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003686 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003687
Anthony Liguori7267c092011-08-20 22:09:37 -05003688 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003689 memory_region_init(system_io, "io", 65536);
3690 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003691
Avi Kivity4855d412012-02-08 21:16:05 +02003692 memory_listener_register(&core_memory_listener, system_memory);
3693 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003694}
3695
3696MemoryRegion *get_system_memory(void)
3697{
3698 return system_memory;
3699}
3700
Avi Kivity309cb472011-08-08 16:09:03 +03003701MemoryRegion *get_system_io(void)
3702{
3703 return system_io;
3704}
3705
pbrooke2eef172008-06-08 01:09:01 +00003706#endif /* !defined(CONFIG_USER_ONLY) */
3707
bellard13eb76e2004-01-24 15:23:36 +00003708/* physical memory access (slow version, mainly for debug) */
3709#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003710int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3711 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003712{
3713 int l, flags;
3714 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003715 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003716
3717 while (len > 0) {
3718 page = addr & TARGET_PAGE_MASK;
3719 l = (page + TARGET_PAGE_SIZE) - addr;
3720 if (l > len)
3721 l = len;
3722 flags = page_get_flags(page);
3723 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003724 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003725 if (is_write) {
3726 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003727 return -1;
bellard579a97f2007-11-11 14:26:47 +00003728 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003729 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003730 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003731 memcpy(p, buf, l);
3732 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003733 } else {
3734 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003735 return -1;
bellard579a97f2007-11-11 14:26:47 +00003736 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003737 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003738 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003739 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003740 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003741 }
3742 len -= l;
3743 buf += l;
3744 addr += l;
3745 }
Paul Brooka68fe892010-03-01 00:08:59 +00003746 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003747}
bellard8df1cd02005-01-28 22:37:22 +00003748
bellard13eb76e2004-01-24 15:23:36 +00003749#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003750void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003751 int len, int is_write)
3752{
3753 int l, io_index;
3754 uint8_t *ptr;
3755 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003756 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003757 ram_addr_t pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003758 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003759
bellard13eb76e2004-01-24 15:23:36 +00003760 while (len > 0) {
3761 page = addr & TARGET_PAGE_MASK;
3762 l = (page + TARGET_PAGE_SIZE) - addr;
3763 if (l > len)
3764 l = len;
bellard92e873b2004-05-21 14:52:29 +00003765 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003766 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003767
bellard13eb76e2004-01-24 15:23:36 +00003768 if (is_write) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003769 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003770 target_phys_addr_t addr1;
Avi Kivity11c7ef02012-01-02 17:21:07 +02003771 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003772 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bellard6a00d602005-11-21 23:25:50 +00003773 /* XXX: could force cpu_single_env to NULL to avoid
3774 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003775 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003776 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003777 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003778 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003779 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003780 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003781 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003782 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003783 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003784 l = 2;
3785 } else {
bellard1c213d12005-09-03 10:49:04 +00003786 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003787 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003788 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003789 l = 1;
3790 }
3791 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003792 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003793 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003794 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003795 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003796 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003797 if (!cpu_physical_memory_is_dirty(addr1)) {
3798 /* invalidate code */
3799 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3800 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003801 cpu_physical_memory_set_dirty_flags(
3802 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003803 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003804 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003805 }
3806 } else {
Avi Kivity1d393fa2012-01-01 21:15:42 +02003807 if (!is_ram_rom_romd(pd)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003808 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003809 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02003810 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003811 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
aurel326c2934d2009-02-18 21:37:17 +00003812 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003813 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003814 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003815 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003816 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003817 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003818 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003819 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003820 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003821 l = 2;
3822 } else {
bellard1c213d12005-09-03 10:49:04 +00003823 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003824 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003825 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003826 l = 1;
3827 }
3828 } else {
3829 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003830 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3831 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3832 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003833 }
3834 }
3835 len -= l;
3836 buf += l;
3837 addr += l;
3838 }
3839}
bellard8df1cd02005-01-28 22:37:22 +00003840
bellardd0ecd2a2006-04-23 17:14:48 +00003841/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003842void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003843 const uint8_t *buf, int len)
3844{
3845 int l;
3846 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003847 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003848 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003849 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003850
bellardd0ecd2a2006-04-23 17:14:48 +00003851 while (len > 0) {
3852 page = addr & TARGET_PAGE_MASK;
3853 l = (page + TARGET_PAGE_SIZE) - addr;
3854 if (l > len)
3855 l = len;
3856 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003857 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003858
Avi Kivity1d393fa2012-01-01 21:15:42 +02003859 if (!is_ram_rom_romd(pd)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003860 /* do nothing */
3861 } else {
3862 unsigned long addr1;
3863 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3864 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003865 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003866 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003867 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003868 }
3869 len -= l;
3870 buf += l;
3871 addr += l;
3872 }
3873}
3874
aliguori6d16c2f2009-01-22 16:59:11 +00003875typedef struct {
3876 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003877 target_phys_addr_t addr;
3878 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003879} BounceBuffer;
3880
3881static BounceBuffer bounce;
3882
aliguoriba223c22009-01-22 16:59:16 +00003883typedef struct MapClient {
3884 void *opaque;
3885 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003886 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003887} MapClient;
3888
Blue Swirl72cf2d42009-09-12 07:36:22 +00003889static QLIST_HEAD(map_client_list, MapClient) map_client_list
3890 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003891
3892void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3893{
Anthony Liguori7267c092011-08-20 22:09:37 -05003894 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003895
3896 client->opaque = opaque;
3897 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003898 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003899 return client;
3900}
3901
3902void cpu_unregister_map_client(void *_client)
3903{
3904 MapClient *client = (MapClient *)_client;
3905
Blue Swirl72cf2d42009-09-12 07:36:22 +00003906 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003907 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003908}
3909
3910static void cpu_notify_map_clients(void)
3911{
3912 MapClient *client;
3913
Blue Swirl72cf2d42009-09-12 07:36:22 +00003914 while (!QLIST_EMPTY(&map_client_list)) {
3915 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003916 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003917 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003918 }
3919}
3920
aliguori6d16c2f2009-01-22 16:59:11 +00003921/* Map a physical memory region into a host virtual address.
3922 * May map a subset of the requested range, given by and returned in *plen.
3923 * May return NULL if resources needed to perform the mapping are exhausted.
3924 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003925 * Use cpu_register_map_client() to know when retrying the map operation is
3926 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003927 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003928void *cpu_physical_memory_map(target_phys_addr_t addr,
3929 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003930 int is_write)
3931{
Anthony Liguoric227f092009-10-01 16:12:16 -05003932 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003933 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003934 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003935 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003936 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003937 PhysPageDesc p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003938 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003939 ram_addr_t rlen;
3940 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003941
3942 while (len > 0) {
3943 page = addr & TARGET_PAGE_MASK;
3944 l = (page + TARGET_PAGE_SIZE) - addr;
3945 if (l > len)
3946 l = len;
3947 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003948 pd = p.phys_offset;
aliguori6d16c2f2009-01-22 16:59:11 +00003949
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003950 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003951 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003952 break;
3953 }
3954 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3955 bounce.addr = addr;
3956 bounce.len = l;
3957 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003958 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003959 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003960
3961 *plen = l;
3962 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003963 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003964 if (!todo) {
3965 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3966 }
aliguori6d16c2f2009-01-22 16:59:11 +00003967
3968 len -= l;
3969 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003970 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003971 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003972 rlen = todo;
3973 ret = qemu_ram_ptr_length(raddr, &rlen);
3974 *plen = rlen;
3975 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003976}
3977
3978/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3979 * Will also mark the memory as dirty if is_write == 1. access_len gives
3980 * the amount of memory that was actually read or written by the caller.
3981 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003982void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3983 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003984{
3985 if (buffer != bounce.buffer) {
3986 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003987 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003988 while (access_len) {
3989 unsigned l;
3990 l = TARGET_PAGE_SIZE;
3991 if (l > access_len)
3992 l = access_len;
3993 if (!cpu_physical_memory_is_dirty(addr1)) {
3994 /* invalidate code */
3995 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3996 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003997 cpu_physical_memory_set_dirty_flags(
3998 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003999 }
4000 addr1 += l;
4001 access_len -= l;
4002 }
4003 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004004 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004005 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004006 }
aliguori6d16c2f2009-01-22 16:59:11 +00004007 return;
4008 }
4009 if (is_write) {
4010 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4011 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004012 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004013 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004014 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004015}
bellardd0ecd2a2006-04-23 17:14:48 +00004016
bellard8df1cd02005-01-28 22:37:22 +00004017/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004018static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4019 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004020{
4021 int io_index;
4022 uint8_t *ptr;
4023 uint32_t val;
4024 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004025 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004026
4027 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004028 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004029
Avi Kivity1d393fa2012-01-01 21:15:42 +02004030 if (!is_ram_rom_romd(pd)) {
bellard8df1cd02005-01-28 22:37:22 +00004031 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004032 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004033 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004034 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004035#if defined(TARGET_WORDS_BIGENDIAN)
4036 if (endian == DEVICE_LITTLE_ENDIAN) {
4037 val = bswap32(val);
4038 }
4039#else
4040 if (endian == DEVICE_BIG_ENDIAN) {
4041 val = bswap32(val);
4042 }
4043#endif
bellard8df1cd02005-01-28 22:37:22 +00004044 } else {
4045 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004046 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004047 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004048 switch (endian) {
4049 case DEVICE_LITTLE_ENDIAN:
4050 val = ldl_le_p(ptr);
4051 break;
4052 case DEVICE_BIG_ENDIAN:
4053 val = ldl_be_p(ptr);
4054 break;
4055 default:
4056 val = ldl_p(ptr);
4057 break;
4058 }
bellard8df1cd02005-01-28 22:37:22 +00004059 }
4060 return val;
4061}
4062
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004063uint32_t ldl_phys(target_phys_addr_t addr)
4064{
4065 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4066}
4067
4068uint32_t ldl_le_phys(target_phys_addr_t addr)
4069{
4070 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4071}
4072
4073uint32_t ldl_be_phys(target_phys_addr_t addr)
4074{
4075 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4076}
4077
bellard84b7b8e2005-11-28 21:19:04 +00004078/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004079static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4080 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004081{
4082 int io_index;
4083 uint8_t *ptr;
4084 uint64_t val;
4085 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004086 PhysPageDesc p;
bellard84b7b8e2005-11-28 21:19:04 +00004087
4088 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004089 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004090
Avi Kivity1d393fa2012-01-01 21:15:42 +02004091 if (!is_ram_rom_romd(pd)) {
bellard84b7b8e2005-11-28 21:19:04 +00004092 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004093 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004094 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004095
4096 /* XXX This is broken when device endian != cpu endian.
4097 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004098#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004099 val = io_mem_read(io_index, addr, 4) << 32;
4100 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004101#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004102 val = io_mem_read(io_index, addr, 4);
4103 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004104#endif
4105 } else {
4106 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004107 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004108 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004109 switch (endian) {
4110 case DEVICE_LITTLE_ENDIAN:
4111 val = ldq_le_p(ptr);
4112 break;
4113 case DEVICE_BIG_ENDIAN:
4114 val = ldq_be_p(ptr);
4115 break;
4116 default:
4117 val = ldq_p(ptr);
4118 break;
4119 }
bellard84b7b8e2005-11-28 21:19:04 +00004120 }
4121 return val;
4122}
4123
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004124uint64_t ldq_phys(target_phys_addr_t addr)
4125{
4126 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4127}
4128
4129uint64_t ldq_le_phys(target_phys_addr_t addr)
4130{
4131 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4132}
4133
4134uint64_t ldq_be_phys(target_phys_addr_t addr)
4135{
4136 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4137}
4138
bellardaab33092005-10-30 20:48:42 +00004139/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004140uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004141{
4142 uint8_t val;
4143 cpu_physical_memory_read(addr, &val, 1);
4144 return val;
4145}
4146
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004147/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004148static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4149 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004150{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004151 int io_index;
4152 uint8_t *ptr;
4153 uint64_t val;
4154 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004155 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004156
4157 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004158 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004159
Avi Kivity1d393fa2012-01-01 21:15:42 +02004160 if (!is_ram_rom_romd(pd)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004161 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004162 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004163 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004164 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004165#if defined(TARGET_WORDS_BIGENDIAN)
4166 if (endian == DEVICE_LITTLE_ENDIAN) {
4167 val = bswap16(val);
4168 }
4169#else
4170 if (endian == DEVICE_BIG_ENDIAN) {
4171 val = bswap16(val);
4172 }
4173#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004174 } else {
4175 /* RAM case */
4176 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4177 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004178 switch (endian) {
4179 case DEVICE_LITTLE_ENDIAN:
4180 val = lduw_le_p(ptr);
4181 break;
4182 case DEVICE_BIG_ENDIAN:
4183 val = lduw_be_p(ptr);
4184 break;
4185 default:
4186 val = lduw_p(ptr);
4187 break;
4188 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004189 }
4190 return val;
bellardaab33092005-10-30 20:48:42 +00004191}
4192
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004193uint32_t lduw_phys(target_phys_addr_t addr)
4194{
4195 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4196}
4197
4198uint32_t lduw_le_phys(target_phys_addr_t addr)
4199{
4200 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4201}
4202
4203uint32_t lduw_be_phys(target_phys_addr_t addr)
4204{
4205 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4206}
4207
bellard8df1cd02005-01-28 22:37:22 +00004208/* warning: addr must be aligned. The ram page is not masked as dirty
4209 and the code inside is not invalidated. It is useful if the dirty
4210 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004211void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004212{
4213 int io_index;
4214 uint8_t *ptr;
4215 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004216 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004217
4218 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004219 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004220
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004221 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004222 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004223 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004224 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004225 } else {
aliguori74576192008-10-06 14:02:03 +00004226 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004227 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004228 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004229
4230 if (unlikely(in_migration)) {
4231 if (!cpu_physical_memory_is_dirty(addr1)) {
4232 /* invalidate code */
4233 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4234 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004235 cpu_physical_memory_set_dirty_flags(
4236 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004237 }
4238 }
bellard8df1cd02005-01-28 22:37:22 +00004239 }
4240}
4241
Anthony Liguoric227f092009-10-01 16:12:16 -05004242void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004243{
4244 int io_index;
4245 uint8_t *ptr;
4246 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004247 PhysPageDesc p;
j_mayerbc98a7e2007-04-04 07:55:12 +00004248
4249 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004250 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004251
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004252 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004253 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004254 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004255#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004256 io_mem_write(io_index, addr, val >> 32, 4);
4257 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004258#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004259 io_mem_write(io_index, addr, (uint32_t)val, 4);
4260 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004261#endif
4262 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004263 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004264 (addr & ~TARGET_PAGE_MASK);
4265 stq_p(ptr, val);
4266 }
4267}
4268
bellard8df1cd02005-01-28 22:37:22 +00004269/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004270static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4271 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004272{
4273 int io_index;
4274 uint8_t *ptr;
4275 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004276 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004277
4278 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004279 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004280
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004281 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004282 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004283 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004284#if defined(TARGET_WORDS_BIGENDIAN)
4285 if (endian == DEVICE_LITTLE_ENDIAN) {
4286 val = bswap32(val);
4287 }
4288#else
4289 if (endian == DEVICE_BIG_ENDIAN) {
4290 val = bswap32(val);
4291 }
4292#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004293 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004294 } else {
4295 unsigned long addr1;
4296 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4297 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004298 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004299 switch (endian) {
4300 case DEVICE_LITTLE_ENDIAN:
4301 stl_le_p(ptr, val);
4302 break;
4303 case DEVICE_BIG_ENDIAN:
4304 stl_be_p(ptr, val);
4305 break;
4306 default:
4307 stl_p(ptr, val);
4308 break;
4309 }
bellard3a7d9292005-08-21 09:26:42 +00004310 if (!cpu_physical_memory_is_dirty(addr1)) {
4311 /* invalidate code */
4312 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4313 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004314 cpu_physical_memory_set_dirty_flags(addr1,
4315 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004316 }
bellard8df1cd02005-01-28 22:37:22 +00004317 }
4318}
4319
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004320void stl_phys(target_phys_addr_t addr, uint32_t val)
4321{
4322 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4323}
4324
4325void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4326{
4327 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4328}
4329
4330void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4331{
4332 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4333}
4334
bellardaab33092005-10-30 20:48:42 +00004335/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004336void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004337{
4338 uint8_t v = val;
4339 cpu_physical_memory_write(addr, &v, 1);
4340}
4341
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004342/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004343static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4344 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004345{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004346 int io_index;
4347 uint8_t *ptr;
4348 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004349 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004350
4351 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004352 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004353
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004354 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004355 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004356 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004357#if defined(TARGET_WORDS_BIGENDIAN)
4358 if (endian == DEVICE_LITTLE_ENDIAN) {
4359 val = bswap16(val);
4360 }
4361#else
4362 if (endian == DEVICE_BIG_ENDIAN) {
4363 val = bswap16(val);
4364 }
4365#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004366 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004367 } else {
4368 unsigned long addr1;
4369 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4370 /* RAM case */
4371 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004372 switch (endian) {
4373 case DEVICE_LITTLE_ENDIAN:
4374 stw_le_p(ptr, val);
4375 break;
4376 case DEVICE_BIG_ENDIAN:
4377 stw_be_p(ptr, val);
4378 break;
4379 default:
4380 stw_p(ptr, val);
4381 break;
4382 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004383 if (!cpu_physical_memory_is_dirty(addr1)) {
4384 /* invalidate code */
4385 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4386 /* set dirty bit */
4387 cpu_physical_memory_set_dirty_flags(addr1,
4388 (0xff & ~CODE_DIRTY_FLAG));
4389 }
4390 }
bellardaab33092005-10-30 20:48:42 +00004391}
4392
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004393void stw_phys(target_phys_addr_t addr, uint32_t val)
4394{
4395 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4396}
4397
4398void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4399{
4400 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4401}
4402
4403void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4404{
4405 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4406}
4407
bellardaab33092005-10-30 20:48:42 +00004408/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004409void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004410{
4411 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004412 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004413}
4414
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004415void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4416{
4417 val = cpu_to_le64(val);
4418 cpu_physical_memory_write(addr, &val, 8);
4419}
4420
4421void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4422{
4423 val = cpu_to_be64(val);
4424 cpu_physical_memory_write(addr, &val, 8);
4425}
4426
aliguori5e2972f2009-03-28 17:51:36 +00004427/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004428int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004429 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004430{
4431 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004432 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004433 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004434
4435 while (len > 0) {
4436 page = addr & TARGET_PAGE_MASK;
4437 phys_addr = cpu_get_phys_page_debug(env, page);
4438 /* if no physical page mapped, return an error */
4439 if (phys_addr == -1)
4440 return -1;
4441 l = (page + TARGET_PAGE_SIZE) - addr;
4442 if (l > len)
4443 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004444 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004445 if (is_write)
4446 cpu_physical_memory_write_rom(phys_addr, buf, l);
4447 else
aliguori5e2972f2009-03-28 17:51:36 +00004448 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004449 len -= l;
4450 buf += l;
4451 addr += l;
4452 }
4453 return 0;
4454}
Paul Brooka68fe892010-03-01 00:08:59 +00004455#endif
bellard13eb76e2004-01-24 15:23:36 +00004456
pbrook2e70f6e2008-06-29 01:03:05 +00004457/* in deterministic execution mode, instructions doing device I/Os
4458 must be at the end of the TB */
4459void cpu_io_recompile(CPUState *env, void *retaddr)
4460{
4461 TranslationBlock *tb;
4462 uint32_t n, cflags;
4463 target_ulong pc, cs_base;
4464 uint64_t flags;
4465
4466 tb = tb_find_pc((unsigned long)retaddr);
4467 if (!tb) {
4468 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4469 retaddr);
4470 }
4471 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004472 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004473 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004474 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004475 n = n - env->icount_decr.u16.low;
4476 /* Generate a new TB ending on the I/O insn. */
4477 n++;
4478 /* On MIPS and SH, delay slot instructions can only be restarted if
4479 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004480 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004481 branch. */
4482#if defined(TARGET_MIPS)
4483 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4484 env->active_tc.PC -= 4;
4485 env->icount_decr.u16.low++;
4486 env->hflags &= ~MIPS_HFLAG_BMASK;
4487 }
4488#elif defined(TARGET_SH4)
4489 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4490 && n > 1) {
4491 env->pc -= 2;
4492 env->icount_decr.u16.low++;
4493 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4494 }
4495#endif
4496 /* This should never happen. */
4497 if (n > CF_COUNT_MASK)
4498 cpu_abort(env, "TB too big during recompile");
4499
4500 cflags = n | CF_LAST_IO;
4501 pc = tb->pc;
4502 cs_base = tb->cs_base;
4503 flags = tb->flags;
4504 tb_phys_invalidate(tb, -1);
4505 /* FIXME: In theory this could raise an exception. In practice
4506 we have already translated the block once so it's probably ok. */
4507 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004508 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004509 the first in the TB) then we end up generating a whole new TB and
4510 repeating the fault, which is horribly inefficient.
4511 Better would be to execute just this insn uncached, or generate a
4512 second new TB. */
4513 cpu_resume_from_signal(env, NULL);
4514}
4515
Paul Brookb3755a92010-03-12 16:54:58 +00004516#if !defined(CONFIG_USER_ONLY)
4517
Stefan Weil055403b2010-10-22 23:03:32 +02004518void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004519{
4520 int i, target_code_size, max_target_code_size;
4521 int direct_jmp_count, direct_jmp2_count, cross_page;
4522 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004523
bellarde3db7222005-01-26 22:00:47 +00004524 target_code_size = 0;
4525 max_target_code_size = 0;
4526 cross_page = 0;
4527 direct_jmp_count = 0;
4528 direct_jmp2_count = 0;
4529 for(i = 0; i < nb_tbs; i++) {
4530 tb = &tbs[i];
4531 target_code_size += tb->size;
4532 if (tb->size > max_target_code_size)
4533 max_target_code_size = tb->size;
4534 if (tb->page_addr[1] != -1)
4535 cross_page++;
4536 if (tb->tb_next_offset[0] != 0xffff) {
4537 direct_jmp_count++;
4538 if (tb->tb_next_offset[1] != 0xffff) {
4539 direct_jmp2_count++;
4540 }
4541 }
4542 }
4543 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004544 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004545 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004546 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4547 cpu_fprintf(f, "TB count %d/%d\n",
4548 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004549 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004550 nb_tbs ? target_code_size / nb_tbs : 0,
4551 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004552 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004553 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4554 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004555 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4556 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004557 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4558 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004559 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004560 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4561 direct_jmp2_count,
4562 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004563 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004564 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4565 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4566 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004567 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004568}
4569
Avi Kivityd39e8222012-01-01 23:35:10 +02004570/* NOTE: this function can trigger an exception */
4571/* NOTE2: the returned address is not exactly the physical address: it
4572 is the offset relative to phys_ram_base */
4573tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4574{
4575 int mmu_idx, page_index, pd;
4576 void *p;
4577
4578 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4579 mmu_idx = cpu_mmu_index(env1);
4580 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4581 (addr & TARGET_PAGE_MASK))) {
4582 ldub_code(addr);
4583 }
4584 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004585 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
Avi Kivity75c578d2012-01-02 15:40:52 +02004586 && !is_romd(pd)) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004587#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4588 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4589#else
4590 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4591#endif
4592 }
4593 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4594 return qemu_ram_addr_from_host_nofail(p);
4595}
4596
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004597/*
4598 * A helper function for the _utterly broken_ virtio device model to find out if
4599 * it's running on a big endian machine. Don't do this at home kids!
4600 */
4601bool virtio_is_big_endian(void);
4602bool virtio_is_big_endian(void)
4603{
4604#if defined(TARGET_WORDS_BIGENDIAN)
4605 return true;
4606#else
4607 return false;
4608#endif
4609}
4610
bellard61382a52003-10-27 21:22:23 +00004611#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004612#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004613#define GETPC() NULL
4614#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004615#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004616
4617#define SHIFT 0
4618#include "softmmu_template.h"
4619
4620#define SHIFT 1
4621#include "softmmu_template.h"
4622
4623#define SHIFT 2
4624#include "softmmu_template.h"
4625
4626#define SHIFT 3
4627#include "softmmu_template.h"
4628
4629#undef env
4630
4631#endif