blob: 26e70c3097bc663fa7932d3b3cc6ca97c5ad7278 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Avi Kivity3eef53d2012-02-10 14:57:31 +0200163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200189typedef struct PhysPageEntry PhysPageEntry;
190
Avi Kivity5312bd82012-02-12 18:32:55 +0200191static MemoryRegionSection *phys_sections;
192static unsigned phys_sections_nb, phys_sections_nb_alloc;
193static uint16_t phys_section_unassigned;
194
Avi Kivity4346ae32012-02-10 17:00:01 +0200195struct PhysPageEntry {
196 union {
Avi Kivity5312bd82012-02-12 18:32:55 +0200197 uint16_t leaf; /* index into phys_sections */
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200198 uint16_t node; /* index into phys_map_nodes */
Avi Kivity4346ae32012-02-10 17:00:01 +0200199 } u;
200};
201
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202/* Simple allocator for PhysPageEntry nodes */
203static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
204static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
205
206#define PHYS_MAP_NODE_NIL ((uint16_t)~0)
207
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800208/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200209 The bottom level has pointers to MemoryRegionSections. */
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200210static PhysPageEntry phys_map = { .u.node = PHYS_MAP_NODE_NIL };
Paul Brook6d9a1302010-02-28 23:55:53 +0000211
pbrooke2eef172008-06-08 01:09:01 +0000212static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300213static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000214
bellard33417e72003-08-10 21:47:01 +0000215/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200216MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000217static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200218static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000219#endif
bellard33417e72003-08-10 21:47:01 +0000220
bellard34865132003-10-05 14:28:56 +0000221/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200222#ifdef WIN32
223static const char *logfilename = "qemu.log";
224#else
blueswir1d9b630f2008-10-05 09:57:08 +0000225static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200226#endif
bellard34865132003-10-05 14:28:56 +0000227FILE *logfile;
228int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000229static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000230
bellarde3db7222005-01-26 22:00:47 +0000231/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000232#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000233static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000234#endif
bellarde3db7222005-01-26 22:00:47 +0000235static int tb_flush_count;
236static int tb_phys_invalidate_count;
237
bellard7cb69ca2008-05-10 10:55:51 +0000238#ifdef _WIN32
239static void map_exec(void *addr, long size)
240{
241 DWORD old_protect;
242 VirtualProtect(addr, size,
243 PAGE_EXECUTE_READWRITE, &old_protect);
244
245}
246#else
247static void map_exec(void *addr, long size)
248{
bellard43694152008-05-29 09:35:57 +0000249 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000250
bellard43694152008-05-29 09:35:57 +0000251 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000252 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000253 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000254
255 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000256 end += page_size - 1;
257 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000258
259 mprotect((void *)start, end - start,
260 PROT_READ | PROT_WRITE | PROT_EXEC);
261}
262#endif
263
bellardb346ff42003-06-15 20:05:50 +0000264static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000265{
bellard83fb7ad2004-07-05 21:25:26 +0000266 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000267 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000268#ifdef _WIN32
269 {
270 SYSTEM_INFO system_info;
271
272 GetSystemInfo(&system_info);
273 qemu_real_host_page_size = system_info.dwPageSize;
274 }
275#else
276 qemu_real_host_page_size = getpagesize();
277#endif
bellard83fb7ad2004-07-05 21:25:26 +0000278 if (qemu_host_page_size == 0)
279 qemu_host_page_size = qemu_real_host_page_size;
280 if (qemu_host_page_size < TARGET_PAGE_SIZE)
281 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000282 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000283
Paul Brook2e9a5712010-05-05 16:32:59 +0100284#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000285 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100286#ifdef HAVE_KINFO_GETVMMAP
287 struct kinfo_vmentry *freep;
288 int i, cnt;
289
290 freep = kinfo_getvmmap(getpid(), &cnt);
291 if (freep) {
292 mmap_lock();
293 for (i = 0; i < cnt; i++) {
294 unsigned long startaddr, endaddr;
295
296 startaddr = freep[i].kve_start;
297 endaddr = freep[i].kve_end;
298 if (h2g_valid(startaddr)) {
299 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
300
301 if (h2g_valid(endaddr)) {
302 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200303 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100304 } else {
305#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
306 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200307 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100308#endif
309 }
310 }
311 }
312 free(freep);
313 mmap_unlock();
314 }
315#else
balrog50a95692007-12-12 01:16:23 +0000316 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000317
pbrook07765902008-05-31 16:33:53 +0000318 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800319
Aurelien Jarnofd436902010-04-10 17:20:36 +0200320 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000321 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800322 mmap_lock();
323
balrog50a95692007-12-12 01:16:23 +0000324 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800325 unsigned long startaddr, endaddr;
326 int n;
327
328 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
329
330 if (n == 2 && h2g_valid(startaddr)) {
331 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
332
333 if (h2g_valid(endaddr)) {
334 endaddr = h2g(endaddr);
335 } else {
336 endaddr = ~0ul;
337 }
338 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000339 }
340 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800341
balrog50a95692007-12-12 01:16:23 +0000342 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800343 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000344 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100345#endif
balrog50a95692007-12-12 01:16:23 +0000346 }
347#endif
bellard54936002003-05-13 00:25:15 +0000348}
349
Paul Brook41c1b1c2010-03-12 16:54:58 +0000350static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000351{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000352 PageDesc *pd;
353 void **lp;
354 int i;
355
pbrook17e23772008-06-09 13:47:45 +0000356#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500357 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800358# define ALLOC(P, SIZE) \
359 do { \
360 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
361 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000363#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800364# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500365 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000366#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800367
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800368 /* Level 1. Always allocated. */
369 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
370
371 /* Level 2..N-1. */
372 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
373 void **p = *lp;
374
375 if (p == NULL) {
376 if (!alloc) {
377 return NULL;
378 }
379 ALLOC(p, sizeof(void *) * L2_SIZE);
380 *lp = p;
381 }
382
383 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000384 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800385
386 pd = *lp;
387 if (pd == NULL) {
388 if (!alloc) {
389 return NULL;
390 }
391 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
392 *lp = pd;
393 }
394
395#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800396
397 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000398}
399
Paul Brook41c1b1c2010-03-12 16:54:58 +0000400static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000401{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800402 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000403}
404
Paul Brook6d9a1302010-02-28 23:55:53 +0000405#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200406
407static PhysPageEntry *phys_map_node_alloc(uint16_t *ptr)
408{
409 unsigned i;
410 uint16_t ret;
411
412 /* Assign early to avoid the pointer being invalidated by g_renew() */
413 *ptr = ret = phys_map_nodes_nb++;
414 assert(ret != PHYS_MAP_NODE_NIL);
415 if (ret == phys_map_nodes_nb_alloc) {
416 typedef PhysPageEntry Node[L2_SIZE];
417 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
418 phys_map_nodes = g_renew(Node, phys_map_nodes,
419 phys_map_nodes_nb_alloc);
420 }
421 for (i = 0; i < L2_SIZE; ++i) {
422 phys_map_nodes[ret][i].u.node = PHYS_MAP_NODE_NIL;
423 }
424 return phys_map_nodes[ret];
425}
426
427static void phys_map_nodes_reset(void)
428{
429 phys_map_nodes_nb = 0;
430}
431
Avi Kivitya3918432012-02-13 17:19:30 +0200432static void phys_page_set(target_phys_addr_t index, uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000433{
Avi Kivity4346ae32012-02-10 17:00:01 +0200434 PhysPageEntry *lp, *p;
435 int i, j;
bellard92e873b2004-05-21 14:52:29 +0000436
Avi Kivity3eef53d2012-02-10 14:57:31 +0200437 lp = &phys_map;
bellard108c49b2005-07-24 12:55:09 +0000438
Avi Kivity4346ae32012-02-10 17:00:01 +0200439 /* Level 1..N. */
440 for (i = P_L2_LEVELS - 1; i >= 0; i--) {
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200441 if (lp->u.node == PHYS_MAP_NODE_NIL) {
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200442 p = phys_map_node_alloc(&lp->u.node);
Avi Kivity4346ae32012-02-10 17:00:01 +0200443 if (i == 0) {
Avi Kivity4346ae32012-02-10 17:00:01 +0200444 for (j = 0; j < L2_SIZE; j++) {
Avi Kivity5312bd82012-02-12 18:32:55 +0200445 p[j].u.leaf = phys_section_unassigned;
Avi Kivity4346ae32012-02-10 17:00:01 +0200446 }
447 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200448 } else {
449 p = phys_map_nodes[lp->u.node];
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800450 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200451 lp = &p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
bellard108c49b2005-07-24 12:55:09 +0000452 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800453
Avi Kivitya3918432012-02-13 17:19:30 +0200454 lp->u.leaf = leaf;
bellard92e873b2004-05-21 14:52:29 +0000455}
456
Avi Kivity06ef3522012-02-13 16:11:22 +0200457static MemoryRegionSection phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000458{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200459 PhysPageEntry lp = phys_map;
460 PhysPageEntry *p;
461 int i;
Avi Kivity06ef3522012-02-13 16:11:22 +0200462 MemoryRegionSection section;
463 target_phys_addr_t delta;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200464 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200465
Avi Kivity31ab2b42012-02-13 16:44:19 +0200466 for (i = P_L2_LEVELS - 1; i >= 0; i--) {
467 if (lp.u.node == PHYS_MAP_NODE_NIL) {
468 goto not_found;
469 }
470 p = phys_map_nodes[lp.u.node];
471 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200472 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200473
474 s_index = lp.u.leaf;
475not_found:
Avi Kivity06ef3522012-02-13 16:11:22 +0200476 section = phys_sections[s_index];
Avi Kivity5312bd82012-02-12 18:32:55 +0200477 index <<= TARGET_PAGE_BITS;
Avi Kivity06ef3522012-02-13 16:11:22 +0200478 assert(section.offset_within_address_space <= index
479 && index <= section.offset_within_address_space + section.size-1);
480 delta = index - section.offset_within_address_space;
481 section.offset_within_address_space += delta;
482 section.offset_within_region += delta;
483 section.size -= delta;
484 return section;
bellard92e873b2004-05-21 14:52:29 +0000485}
486
Anthony Liguoric227f092009-10-01 16:12:16 -0500487static void tlb_protect_code(ram_addr_t ram_addr);
488static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000489 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000490#define mmap_lock() do { } while(0)
491#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000492#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000493
bellard43694152008-05-29 09:35:57 +0000494#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
495
496#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100497/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000498 user mode. It will change when a dedicated libc will be used */
499#define USE_STATIC_CODE_GEN_BUFFER
500#endif
501
502#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200503static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
504 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000505#endif
506
blueswir18fcd3692008-08-17 20:26:25 +0000507static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000508{
bellard43694152008-05-29 09:35:57 +0000509#ifdef USE_STATIC_CODE_GEN_BUFFER
510 code_gen_buffer = static_code_gen_buffer;
511 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
512 map_exec(code_gen_buffer, code_gen_buffer_size);
513#else
bellard26a5f132008-05-28 12:30:31 +0000514 code_gen_buffer_size = tb_size;
515 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000516#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000517 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
518#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100519 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000520 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000521#endif
bellard26a5f132008-05-28 12:30:31 +0000522 }
523 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
524 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
525 /* The code gen buffer location may have constraints depending on
526 the host cpu and OS */
527#if defined(__linux__)
528 {
529 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000530 void *start = NULL;
531
bellard26a5f132008-05-28 12:30:31 +0000532 flags = MAP_PRIVATE | MAP_ANONYMOUS;
533#if defined(__x86_64__)
534 flags |= MAP_32BIT;
535 /* Cannot map more than that */
536 if (code_gen_buffer_size > (800 * 1024 * 1024))
537 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000538#elif defined(__sparc_v9__)
539 // Map the buffer below 2G, so we can use direct calls and branches
540 flags |= MAP_FIXED;
541 start = (void *) 0x60000000UL;
542 if (code_gen_buffer_size > (512 * 1024 * 1024))
543 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000544#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100545 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000546 if (code_gen_buffer_size > 16 * 1024 * 1024)
547 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700548#elif defined(__s390x__)
549 /* Map the buffer so that we can use direct calls and branches. */
550 /* We have a +- 4GB range on the branches; leave some slop. */
551 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
552 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
553 }
554 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000555#endif
blueswir1141ac462008-07-26 15:05:57 +0000556 code_gen_buffer = mmap(start, code_gen_buffer_size,
557 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000558 flags, -1, 0);
559 if (code_gen_buffer == MAP_FAILED) {
560 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
561 exit(1);
562 }
563 }
Bradcbb608a2010-12-20 21:25:40 -0500564#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000565 || defined(__DragonFly__) || defined(__OpenBSD__) \
566 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000567 {
568 int flags;
569 void *addr = NULL;
570 flags = MAP_PRIVATE | MAP_ANONYMOUS;
571#if defined(__x86_64__)
572 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
573 * 0x40000000 is free */
574 flags |= MAP_FIXED;
575 addr = (void *)0x40000000;
576 /* Cannot map more than that */
577 if (code_gen_buffer_size > (800 * 1024 * 1024))
578 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000579#elif defined(__sparc_v9__)
580 // Map the buffer below 2G, so we can use direct calls and branches
581 flags |= MAP_FIXED;
582 addr = (void *) 0x60000000UL;
583 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
584 code_gen_buffer_size = (512 * 1024 * 1024);
585 }
aliguori06e67a82008-09-27 15:32:41 +0000586#endif
587 code_gen_buffer = mmap(addr, code_gen_buffer_size,
588 PROT_WRITE | PROT_READ | PROT_EXEC,
589 flags, -1, 0);
590 if (code_gen_buffer == MAP_FAILED) {
591 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
592 exit(1);
593 }
594 }
bellard26a5f132008-05-28 12:30:31 +0000595#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500596 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000597 map_exec(code_gen_buffer, code_gen_buffer_size);
598#endif
bellard43694152008-05-29 09:35:57 +0000599#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000600 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100601 code_gen_buffer_max_size = code_gen_buffer_size -
602 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000603 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500604 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000605}
606
607/* Must be called before using the QEMU cpus. 'tb_size' is the size
608 (in bytes) allocated to the translation buffer. Zero means default
609 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200610void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000611{
bellard26a5f132008-05-28 12:30:31 +0000612 cpu_gen_init();
613 code_gen_alloc(tb_size);
614 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000615 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700616#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
617 /* There's no guest base to take into account, so go ahead and
618 initialize the prologue now. */
619 tcg_prologue_init(&tcg_ctx);
620#endif
bellard26a5f132008-05-28 12:30:31 +0000621}
622
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200623bool tcg_enabled(void)
624{
625 return code_gen_buffer != NULL;
626}
627
628void cpu_exec_init_all(void)
629{
630#if !defined(CONFIG_USER_ONLY)
631 memory_map_init();
632 io_mem_init();
633#endif
634}
635
pbrook9656f322008-07-01 20:01:19 +0000636#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
637
Juan Quintelae59fb372009-09-29 22:48:21 +0200638static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200639{
640 CPUState *env = opaque;
641
aurel323098dba2009-03-07 21:28:24 +0000642 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
643 version_id is increased. */
644 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000645 tlb_flush(env, 1);
646
647 return 0;
648}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200649
650static const VMStateDescription vmstate_cpu_common = {
651 .name = "cpu_common",
652 .version_id = 1,
653 .minimum_version_id = 1,
654 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200655 .post_load = cpu_common_post_load,
656 .fields = (VMStateField []) {
657 VMSTATE_UINT32(halted, CPUState),
658 VMSTATE_UINT32(interrupt_request, CPUState),
659 VMSTATE_END_OF_LIST()
660 }
661};
pbrook9656f322008-07-01 20:01:19 +0000662#endif
663
Glauber Costa950f1472009-06-09 12:15:18 -0400664CPUState *qemu_get_cpu(int cpu)
665{
666 CPUState *env = first_cpu;
667
668 while (env) {
669 if (env->cpu_index == cpu)
670 break;
671 env = env->next_cpu;
672 }
673
674 return env;
675}
676
bellard6a00d602005-11-21 23:25:50 +0000677void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000678{
bellard6a00d602005-11-21 23:25:50 +0000679 CPUState **penv;
680 int cpu_index;
681
pbrookc2764712009-03-07 15:24:59 +0000682#if defined(CONFIG_USER_ONLY)
683 cpu_list_lock();
684#endif
bellard6a00d602005-11-21 23:25:50 +0000685 env->next_cpu = NULL;
686 penv = &first_cpu;
687 cpu_index = 0;
688 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700689 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000690 cpu_index++;
691 }
692 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000693 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000694 QTAILQ_INIT(&env->breakpoints);
695 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100696#ifndef CONFIG_USER_ONLY
697 env->thread_id = qemu_get_thread_id();
698#endif
bellard6a00d602005-11-21 23:25:50 +0000699 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000700#if defined(CONFIG_USER_ONLY)
701 cpu_list_unlock();
702#endif
pbrookb3c77242008-06-30 16:31:04 +0000703#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600704 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
705 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000706 cpu_save, cpu_load, env);
707#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000708}
709
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100710/* Allocate a new translation block. Flush the translation buffer if
711 too many translation blocks or too much generated code. */
712static TranslationBlock *tb_alloc(target_ulong pc)
713{
714 TranslationBlock *tb;
715
716 if (nb_tbs >= code_gen_max_blocks ||
717 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
718 return NULL;
719 tb = &tbs[nb_tbs++];
720 tb->pc = pc;
721 tb->cflags = 0;
722 return tb;
723}
724
725void tb_free(TranslationBlock *tb)
726{
727 /* In practice this is mostly used for single use temporary TB
728 Ignore the hard cases and just back up if this TB happens to
729 be the last one generated. */
730 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
731 code_gen_ptr = tb->tc_ptr;
732 nb_tbs--;
733 }
734}
735
bellard9fa3e852004-01-04 18:06:42 +0000736static inline void invalidate_page_bitmap(PageDesc *p)
737{
738 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500739 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000740 p->code_bitmap = NULL;
741 }
742 p->code_write_count = 0;
743}
744
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800745/* Set to NULL all the 'first_tb' fields in all PageDescs. */
746
747static void page_flush_tb_1 (int level, void **lp)
748{
749 int i;
750
751 if (*lp == NULL) {
752 return;
753 }
754 if (level == 0) {
755 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000756 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800757 pd[i].first_tb = NULL;
758 invalidate_page_bitmap(pd + i);
759 }
760 } else {
761 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000762 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800763 page_flush_tb_1 (level - 1, pp + i);
764 }
765 }
766}
767
bellardfd6ce8f2003-05-14 19:00:11 +0000768static void page_flush_tb(void)
769{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800770 int i;
771 for (i = 0; i < V_L1_SIZE; i++) {
772 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000773 }
774}
775
776/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000777/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000778void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000779{
bellard6a00d602005-11-21 23:25:50 +0000780 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000781#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000782 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
783 (unsigned long)(code_gen_ptr - code_gen_buffer),
784 nb_tbs, nb_tbs > 0 ?
785 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000786#endif
bellard26a5f132008-05-28 12:30:31 +0000787 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000788 cpu_abort(env1, "Internal error: code buffer overflow\n");
789
bellardfd6ce8f2003-05-14 19:00:11 +0000790 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000791
bellard6a00d602005-11-21 23:25:50 +0000792 for(env = first_cpu; env != NULL; env = env->next_cpu) {
793 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
794 }
bellard9fa3e852004-01-04 18:06:42 +0000795
bellard8a8a6082004-10-03 13:36:49 +0000796 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000797 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000798
bellardfd6ce8f2003-05-14 19:00:11 +0000799 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000800 /* XXX: flush processor icache at this point if cache flush is
801 expensive */
bellarde3db7222005-01-26 22:00:47 +0000802 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000803}
804
805#ifdef DEBUG_TB_CHECK
806
j_mayerbc98a7e2007-04-04 07:55:12 +0000807static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000808{
809 TranslationBlock *tb;
810 int i;
811 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000812 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
813 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000814 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
815 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000816 printf("ERROR invalidate: address=" TARGET_FMT_lx
817 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000818 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000819 }
820 }
821 }
822}
823
824/* verify that all the pages have correct rights for code */
825static void tb_page_check(void)
826{
827 TranslationBlock *tb;
828 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000829
pbrook99773bd2006-04-16 15:14:59 +0000830 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
831 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000832 flags1 = page_get_flags(tb->pc);
833 flags2 = page_get_flags(tb->pc + tb->size - 1);
834 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
835 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000836 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000837 }
838 }
839 }
840}
841
842#endif
843
844/* invalidate one TB */
845static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
846 int next_offset)
847{
848 TranslationBlock *tb1;
849 for(;;) {
850 tb1 = *ptb;
851 if (tb1 == tb) {
852 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
853 break;
854 }
855 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
856 }
857}
858
bellard9fa3e852004-01-04 18:06:42 +0000859static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
860{
861 TranslationBlock *tb1;
862 unsigned int n1;
863
864 for(;;) {
865 tb1 = *ptb;
866 n1 = (long)tb1 & 3;
867 tb1 = (TranslationBlock *)((long)tb1 & ~3);
868 if (tb1 == tb) {
869 *ptb = tb1->page_next[n1];
870 break;
871 }
872 ptb = &tb1->page_next[n1];
873 }
874}
875
bellardd4e81642003-05-25 16:46:15 +0000876static inline void tb_jmp_remove(TranslationBlock *tb, int n)
877{
878 TranslationBlock *tb1, **ptb;
879 unsigned int n1;
880
881 ptb = &tb->jmp_next[n];
882 tb1 = *ptb;
883 if (tb1) {
884 /* find tb(n) in circular list */
885 for(;;) {
886 tb1 = *ptb;
887 n1 = (long)tb1 & 3;
888 tb1 = (TranslationBlock *)((long)tb1 & ~3);
889 if (n1 == n && tb1 == tb)
890 break;
891 if (n1 == 2) {
892 ptb = &tb1->jmp_first;
893 } else {
894 ptb = &tb1->jmp_next[n1];
895 }
896 }
897 /* now we can suppress tb(n) from the list */
898 *ptb = tb->jmp_next[n];
899
900 tb->jmp_next[n] = NULL;
901 }
902}
903
904/* reset the jump entry 'n' of a TB so that it is not chained to
905 another TB */
906static inline void tb_reset_jump(TranslationBlock *tb, int n)
907{
908 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
909}
910
Paul Brook41c1b1c2010-03-12 16:54:58 +0000911void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000912{
bellard6a00d602005-11-21 23:25:50 +0000913 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000914 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000915 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000916 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000917 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000918
bellard9fa3e852004-01-04 18:06:42 +0000919 /* remove the TB from the hash list */
920 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
921 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000922 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000923 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000924
bellard9fa3e852004-01-04 18:06:42 +0000925 /* remove the TB from the page list */
926 if (tb->page_addr[0] != page_addr) {
927 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
928 tb_page_remove(&p->first_tb, tb);
929 invalidate_page_bitmap(p);
930 }
931 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
932 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
933 tb_page_remove(&p->first_tb, tb);
934 invalidate_page_bitmap(p);
935 }
936
bellard8a40a182005-11-20 10:35:40 +0000937 tb_invalidated_flag = 1;
938
939 /* remove the TB from the hash list */
940 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000941 for(env = first_cpu; env != NULL; env = env->next_cpu) {
942 if (env->tb_jmp_cache[h] == tb)
943 env->tb_jmp_cache[h] = NULL;
944 }
bellard8a40a182005-11-20 10:35:40 +0000945
946 /* suppress this TB from the two jump lists */
947 tb_jmp_remove(tb, 0);
948 tb_jmp_remove(tb, 1);
949
950 /* suppress any remaining jumps to this TB */
951 tb1 = tb->jmp_first;
952 for(;;) {
953 n1 = (long)tb1 & 3;
954 if (n1 == 2)
955 break;
956 tb1 = (TranslationBlock *)((long)tb1 & ~3);
957 tb2 = tb1->jmp_next[n1];
958 tb_reset_jump(tb1, n1);
959 tb1->jmp_next[n1] = NULL;
960 tb1 = tb2;
961 }
962 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
963
bellarde3db7222005-01-26 22:00:47 +0000964 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000965}
966
967static inline void set_bits(uint8_t *tab, int start, int len)
968{
969 int end, mask, end1;
970
971 end = start + len;
972 tab += start >> 3;
973 mask = 0xff << (start & 7);
974 if ((start & ~7) == (end & ~7)) {
975 if (start < end) {
976 mask &= ~(0xff << (end & 7));
977 *tab |= mask;
978 }
979 } else {
980 *tab++ |= mask;
981 start = (start + 8) & ~7;
982 end1 = end & ~7;
983 while (start < end1) {
984 *tab++ = 0xff;
985 start += 8;
986 }
987 if (start < end) {
988 mask = ~(0xff << (end & 7));
989 *tab |= mask;
990 }
991 }
992}
993
994static void build_page_bitmap(PageDesc *p)
995{
996 int n, tb_start, tb_end;
997 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000998
Anthony Liguori7267c092011-08-20 22:09:37 -0500999 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001000
1001 tb = p->first_tb;
1002 while (tb != NULL) {
1003 n = (long)tb & 3;
1004 tb = (TranslationBlock *)((long)tb & ~3);
1005 /* NOTE: this is subtle as a TB may span two physical pages */
1006 if (n == 0) {
1007 /* NOTE: tb_end may be after the end of the page, but
1008 it is not a problem */
1009 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1010 tb_end = tb_start + tb->size;
1011 if (tb_end > TARGET_PAGE_SIZE)
1012 tb_end = TARGET_PAGE_SIZE;
1013 } else {
1014 tb_start = 0;
1015 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1016 }
1017 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1018 tb = tb->page_next[n];
1019 }
1020}
1021
pbrook2e70f6e2008-06-29 01:03:05 +00001022TranslationBlock *tb_gen_code(CPUState *env,
1023 target_ulong pc, target_ulong cs_base,
1024 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001025{
1026 TranslationBlock *tb;
1027 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001028 tb_page_addr_t phys_pc, phys_page2;
1029 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001030 int code_gen_size;
1031
Paul Brook41c1b1c2010-03-12 16:54:58 +00001032 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001033 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001034 if (!tb) {
1035 /* flush must be done */
1036 tb_flush(env);
1037 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001038 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001039 /* Don't forget to invalidate previous TB info. */
1040 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001041 }
1042 tc_ptr = code_gen_ptr;
1043 tb->tc_ptr = tc_ptr;
1044 tb->cs_base = cs_base;
1045 tb->flags = flags;
1046 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001047 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001048 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001049
bellardd720b932004-04-25 17:57:43 +00001050 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001051 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001052 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001053 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001054 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001055 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001056 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001057 return tb;
bellardd720b932004-04-25 17:57:43 +00001058}
ths3b46e622007-09-17 08:09:54 +00001059
bellard9fa3e852004-01-04 18:06:42 +00001060/* invalidate all TBs which intersect with the target physical page
1061 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001062 the same physical page. 'is_cpu_write_access' should be true if called
1063 from a real cpu write access: the virtual CPU will exit the current
1064 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001065void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001066 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001067{
aliguori6b917542008-11-18 19:46:41 +00001068 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001069 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001070 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001071 PageDesc *p;
1072 int n;
1073#ifdef TARGET_HAS_PRECISE_SMC
1074 int current_tb_not_found = is_cpu_write_access;
1075 TranslationBlock *current_tb = NULL;
1076 int current_tb_modified = 0;
1077 target_ulong current_pc = 0;
1078 target_ulong current_cs_base = 0;
1079 int current_flags = 0;
1080#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001081
1082 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001083 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001084 return;
ths5fafdf22007-09-16 21:08:06 +00001085 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001086 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1087 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001088 /* build code bitmap */
1089 build_page_bitmap(p);
1090 }
1091
1092 /* we remove all the TBs in the range [start, end[ */
1093 /* XXX: see if in some cases it could be faster to invalidate all the code */
1094 tb = p->first_tb;
1095 while (tb != NULL) {
1096 n = (long)tb & 3;
1097 tb = (TranslationBlock *)((long)tb & ~3);
1098 tb_next = tb->page_next[n];
1099 /* NOTE: this is subtle as a TB may span two physical pages */
1100 if (n == 0) {
1101 /* NOTE: tb_end may be after the end of the page, but
1102 it is not a problem */
1103 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1104 tb_end = tb_start + tb->size;
1105 } else {
1106 tb_start = tb->page_addr[1];
1107 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1108 }
1109 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001110#ifdef TARGET_HAS_PRECISE_SMC
1111 if (current_tb_not_found) {
1112 current_tb_not_found = 0;
1113 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001114 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001115 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001116 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001117 }
1118 }
1119 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001120 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001121 /* If we are modifying the current TB, we must stop
1122 its execution. We could be more precise by checking
1123 that the modification is after the current PC, but it
1124 would require a specialized function to partially
1125 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001126
bellardd720b932004-04-25 17:57:43 +00001127 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001128 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001129 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1130 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001131 }
1132#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001133 /* we need to do that to handle the case where a signal
1134 occurs while doing tb_phys_invalidate() */
1135 saved_tb = NULL;
1136 if (env) {
1137 saved_tb = env->current_tb;
1138 env->current_tb = NULL;
1139 }
bellard9fa3e852004-01-04 18:06:42 +00001140 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001141 if (env) {
1142 env->current_tb = saved_tb;
1143 if (env->interrupt_request && env->current_tb)
1144 cpu_interrupt(env, env->interrupt_request);
1145 }
bellard9fa3e852004-01-04 18:06:42 +00001146 }
1147 tb = tb_next;
1148 }
1149#if !defined(CONFIG_USER_ONLY)
1150 /* if no code remaining, no need to continue to use slow writes */
1151 if (!p->first_tb) {
1152 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001153 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001154 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001155 }
1156 }
1157#endif
1158#ifdef TARGET_HAS_PRECISE_SMC
1159 if (current_tb_modified) {
1160 /* we generate a block containing just the instruction
1161 modifying the memory. It will ensure that it cannot modify
1162 itself */
bellardea1c1802004-06-14 18:56:36 +00001163 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001164 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001165 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001166 }
1167#endif
1168}
1169
1170/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001171static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001172{
1173 PageDesc *p;
1174 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001175#if 0
bellarda4193c82004-06-03 14:01:43 +00001176 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001177 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1178 cpu_single_env->mem_io_vaddr, len,
1179 cpu_single_env->eip,
1180 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001181 }
1182#endif
bellard9fa3e852004-01-04 18:06:42 +00001183 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001184 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001185 return;
1186 if (p->code_bitmap) {
1187 offset = start & ~TARGET_PAGE_MASK;
1188 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1189 if (b & ((1 << len) - 1))
1190 goto do_invalidate;
1191 } else {
1192 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001193 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001194 }
1195}
1196
bellard9fa3e852004-01-04 18:06:42 +00001197#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001198static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001199 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001200{
aliguori6b917542008-11-18 19:46:41 +00001201 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001202 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001203 int n;
bellardd720b932004-04-25 17:57:43 +00001204#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001205 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001206 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001207 int current_tb_modified = 0;
1208 target_ulong current_pc = 0;
1209 target_ulong current_cs_base = 0;
1210 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001211#endif
bellard9fa3e852004-01-04 18:06:42 +00001212
1213 addr &= TARGET_PAGE_MASK;
1214 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001215 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001216 return;
1217 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001218#ifdef TARGET_HAS_PRECISE_SMC
1219 if (tb && pc != 0) {
1220 current_tb = tb_find_pc(pc);
1221 }
1222#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001223 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001224 n = (long)tb & 3;
1225 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001226#ifdef TARGET_HAS_PRECISE_SMC
1227 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001228 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001229 /* If we are modifying the current TB, we must stop
1230 its execution. We could be more precise by checking
1231 that the modification is after the current PC, but it
1232 would require a specialized function to partially
1233 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001234
bellardd720b932004-04-25 17:57:43 +00001235 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001236 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001237 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1238 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001239 }
1240#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001241 tb_phys_invalidate(tb, addr);
1242 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001243 }
1244 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001245#ifdef TARGET_HAS_PRECISE_SMC
1246 if (current_tb_modified) {
1247 /* we generate a block containing just the instruction
1248 modifying the memory. It will ensure that it cannot modify
1249 itself */
bellardea1c1802004-06-14 18:56:36 +00001250 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001251 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001252 cpu_resume_from_signal(env, puc);
1253 }
1254#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001255}
bellard9fa3e852004-01-04 18:06:42 +00001256#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001257
1258/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001259static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001260 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001261{
1262 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001263#ifndef CONFIG_USER_ONLY
1264 bool page_already_protected;
1265#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001266
bellard9fa3e852004-01-04 18:06:42 +00001267 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001268 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001269 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001270#ifndef CONFIG_USER_ONLY
1271 page_already_protected = p->first_tb != NULL;
1272#endif
bellard9fa3e852004-01-04 18:06:42 +00001273 p->first_tb = (TranslationBlock *)((long)tb | n);
1274 invalidate_page_bitmap(p);
1275
bellard107db442004-06-22 18:48:46 +00001276#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001277
bellard9fa3e852004-01-04 18:06:42 +00001278#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001279 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001280 target_ulong addr;
1281 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001282 int prot;
1283
bellardfd6ce8f2003-05-14 19:00:11 +00001284 /* force the host page as non writable (writes will have a
1285 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001286 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001287 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001288 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1289 addr += TARGET_PAGE_SIZE) {
1290
1291 p2 = page_find (addr >> TARGET_PAGE_BITS);
1292 if (!p2)
1293 continue;
1294 prot |= p2->flags;
1295 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001296 }
ths5fafdf22007-09-16 21:08:06 +00001297 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001298 (prot & PAGE_BITS) & ~PAGE_WRITE);
1299#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001300 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001301 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001302#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001303 }
bellard9fa3e852004-01-04 18:06:42 +00001304#else
1305 /* if some code is already present, then the pages are already
1306 protected. So we handle the case where only the first TB is
1307 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001308 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001309 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001310 }
1311#endif
bellardd720b932004-04-25 17:57:43 +00001312
1313#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001314}
1315
bellard9fa3e852004-01-04 18:06:42 +00001316/* add a new TB and link it to the physical page tables. phys_page2 is
1317 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001318void tb_link_page(TranslationBlock *tb,
1319 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001320{
bellard9fa3e852004-01-04 18:06:42 +00001321 unsigned int h;
1322 TranslationBlock **ptb;
1323
pbrookc8a706f2008-06-02 16:16:42 +00001324 /* Grab the mmap lock to stop another thread invalidating this TB
1325 before we are done. */
1326 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001327 /* add in the physical hash table */
1328 h = tb_phys_hash_func(phys_pc);
1329 ptb = &tb_phys_hash[h];
1330 tb->phys_hash_next = *ptb;
1331 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001332
1333 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001334 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1335 if (phys_page2 != -1)
1336 tb_alloc_page(tb, 1, phys_page2);
1337 else
1338 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001339
bellardd4e81642003-05-25 16:46:15 +00001340 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1341 tb->jmp_next[0] = NULL;
1342 tb->jmp_next[1] = NULL;
1343
1344 /* init original jump addresses */
1345 if (tb->tb_next_offset[0] != 0xffff)
1346 tb_reset_jump(tb, 0);
1347 if (tb->tb_next_offset[1] != 0xffff)
1348 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001349
1350#ifdef DEBUG_TB_CHECK
1351 tb_page_check();
1352#endif
pbrookc8a706f2008-06-02 16:16:42 +00001353 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001354}
1355
bellarda513fe12003-05-27 23:29:48 +00001356/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1357 tb[1].tc_ptr. Return NULL if not found */
1358TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1359{
1360 int m_min, m_max, m;
1361 unsigned long v;
1362 TranslationBlock *tb;
1363
1364 if (nb_tbs <= 0)
1365 return NULL;
1366 if (tc_ptr < (unsigned long)code_gen_buffer ||
1367 tc_ptr >= (unsigned long)code_gen_ptr)
1368 return NULL;
1369 /* binary search (cf Knuth) */
1370 m_min = 0;
1371 m_max = nb_tbs - 1;
1372 while (m_min <= m_max) {
1373 m = (m_min + m_max) >> 1;
1374 tb = &tbs[m];
1375 v = (unsigned long)tb->tc_ptr;
1376 if (v == tc_ptr)
1377 return tb;
1378 else if (tc_ptr < v) {
1379 m_max = m - 1;
1380 } else {
1381 m_min = m + 1;
1382 }
ths5fafdf22007-09-16 21:08:06 +00001383 }
bellarda513fe12003-05-27 23:29:48 +00001384 return &tbs[m_max];
1385}
bellard75012672003-06-21 13:11:07 +00001386
bellardea041c02003-06-25 16:16:50 +00001387static void tb_reset_jump_recursive(TranslationBlock *tb);
1388
1389static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1390{
1391 TranslationBlock *tb1, *tb_next, **ptb;
1392 unsigned int n1;
1393
1394 tb1 = tb->jmp_next[n];
1395 if (tb1 != NULL) {
1396 /* find head of list */
1397 for(;;) {
1398 n1 = (long)tb1 & 3;
1399 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1400 if (n1 == 2)
1401 break;
1402 tb1 = tb1->jmp_next[n1];
1403 }
1404 /* we are now sure now that tb jumps to tb1 */
1405 tb_next = tb1;
1406
1407 /* remove tb from the jmp_first list */
1408 ptb = &tb_next->jmp_first;
1409 for(;;) {
1410 tb1 = *ptb;
1411 n1 = (long)tb1 & 3;
1412 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1413 if (n1 == n && tb1 == tb)
1414 break;
1415 ptb = &tb1->jmp_next[n1];
1416 }
1417 *ptb = tb->jmp_next[n];
1418 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001419
bellardea041c02003-06-25 16:16:50 +00001420 /* suppress the jump to next tb in generated code */
1421 tb_reset_jump(tb, n);
1422
bellard01243112004-01-04 15:48:17 +00001423 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001424 tb_reset_jump_recursive(tb_next);
1425 }
1426}
1427
1428static void tb_reset_jump_recursive(TranslationBlock *tb)
1429{
1430 tb_reset_jump_recursive2(tb, 0);
1431 tb_reset_jump_recursive2(tb, 1);
1432}
1433
bellard1fddef42005-04-17 19:16:13 +00001434#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001435#if defined(CONFIG_USER_ONLY)
1436static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1437{
1438 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1439}
1440#else
bellardd720b932004-04-25 17:57:43 +00001441static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1442{
Anthony Liguoric227f092009-10-01 16:12:16 -05001443 target_phys_addr_t addr;
Anthony Liguoric227f092009-10-01 16:12:16 -05001444 ram_addr_t ram_addr;
Avi Kivity06ef3522012-02-13 16:11:22 +02001445 MemoryRegionSection section;
bellardd720b932004-04-25 17:57:43 +00001446
pbrookc2f07f82006-04-08 17:14:56 +00001447 addr = cpu_get_phys_page_debug(env, pc);
Avi Kivity06ef3522012-02-13 16:11:22 +02001448 section = phys_page_find(addr >> TARGET_PAGE_BITS);
1449 if (!(memory_region_is_ram(section.mr)
1450 || (section.mr->rom_device && section.mr->readable))) {
1451 return;
1452 }
1453 ram_addr = (memory_region_get_ram_addr(section.mr)
1454 + section.offset_within_region) & TARGET_PAGE_MASK;
1455 ram_addr |= (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001456 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001457}
bellardc27004e2005-01-03 23:35:10 +00001458#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001459#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001460
Paul Brookc527ee82010-03-01 03:31:14 +00001461#if defined(CONFIG_USER_ONLY)
1462void cpu_watchpoint_remove_all(CPUState *env, int mask)
1463
1464{
1465}
1466
1467int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1468 int flags, CPUWatchpoint **watchpoint)
1469{
1470 return -ENOSYS;
1471}
1472#else
pbrook6658ffb2007-03-16 23:58:11 +00001473/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001474int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1475 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001476{
aliguorib4051332008-11-18 20:14:20 +00001477 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001478 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001479
aliguorib4051332008-11-18 20:14:20 +00001480 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1481 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1482 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1483 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1484 return -EINVAL;
1485 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001486 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001487
aliguoria1d1bb32008-11-18 20:07:32 +00001488 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001489 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001490 wp->flags = flags;
1491
aliguori2dc9f412008-11-18 20:56:59 +00001492 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001493 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001494 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001495 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001496 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001497
pbrook6658ffb2007-03-16 23:58:11 +00001498 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001499
1500 if (watchpoint)
1501 *watchpoint = wp;
1502 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001503}
1504
aliguoria1d1bb32008-11-18 20:07:32 +00001505/* Remove a specific watchpoint. */
1506int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1507 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001508{
aliguorib4051332008-11-18 20:14:20 +00001509 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001510 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001511
Blue Swirl72cf2d42009-09-12 07:36:22 +00001512 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001513 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001514 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001515 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001516 return 0;
1517 }
1518 }
aliguoria1d1bb32008-11-18 20:07:32 +00001519 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001520}
1521
aliguoria1d1bb32008-11-18 20:07:32 +00001522/* Remove a specific watchpoint by reference. */
1523void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1524{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001525 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001526
aliguoria1d1bb32008-11-18 20:07:32 +00001527 tlb_flush_page(env, watchpoint->vaddr);
1528
Anthony Liguori7267c092011-08-20 22:09:37 -05001529 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001530}
1531
aliguoria1d1bb32008-11-18 20:07:32 +00001532/* Remove all matching watchpoints. */
1533void cpu_watchpoint_remove_all(CPUState *env, int mask)
1534{
aliguoric0ce9982008-11-25 22:13:57 +00001535 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001536
Blue Swirl72cf2d42009-09-12 07:36:22 +00001537 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001538 if (wp->flags & mask)
1539 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001540 }
aliguoria1d1bb32008-11-18 20:07:32 +00001541}
Paul Brookc527ee82010-03-01 03:31:14 +00001542#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001543
1544/* Add a breakpoint. */
1545int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1546 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001547{
bellard1fddef42005-04-17 19:16:13 +00001548#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001549 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001550
Anthony Liguori7267c092011-08-20 22:09:37 -05001551 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001552
1553 bp->pc = pc;
1554 bp->flags = flags;
1555
aliguori2dc9f412008-11-18 20:56:59 +00001556 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001557 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001558 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001559 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001560 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001561
1562 breakpoint_invalidate(env, pc);
1563
1564 if (breakpoint)
1565 *breakpoint = bp;
1566 return 0;
1567#else
1568 return -ENOSYS;
1569#endif
1570}
1571
1572/* Remove a specific breakpoint. */
1573int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1574{
1575#if defined(TARGET_HAS_ICE)
1576 CPUBreakpoint *bp;
1577
Blue Swirl72cf2d42009-09-12 07:36:22 +00001578 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001579 if (bp->pc == pc && bp->flags == flags) {
1580 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001581 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001582 }
bellard4c3a88a2003-07-26 12:06:08 +00001583 }
aliguoria1d1bb32008-11-18 20:07:32 +00001584 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001585#else
aliguoria1d1bb32008-11-18 20:07:32 +00001586 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001587#endif
1588}
1589
aliguoria1d1bb32008-11-18 20:07:32 +00001590/* Remove a specific breakpoint by reference. */
1591void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001592{
bellard1fddef42005-04-17 19:16:13 +00001593#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001594 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001595
aliguoria1d1bb32008-11-18 20:07:32 +00001596 breakpoint_invalidate(env, breakpoint->pc);
1597
Anthony Liguori7267c092011-08-20 22:09:37 -05001598 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001599#endif
1600}
1601
1602/* Remove all matching breakpoints. */
1603void cpu_breakpoint_remove_all(CPUState *env, int mask)
1604{
1605#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001606 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001607
Blue Swirl72cf2d42009-09-12 07:36:22 +00001608 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001609 if (bp->flags & mask)
1610 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001611 }
bellard4c3a88a2003-07-26 12:06:08 +00001612#endif
1613}
1614
bellardc33a3462003-07-29 20:50:33 +00001615/* enable or disable single step mode. EXCP_DEBUG is returned by the
1616 CPU loop after each instruction */
1617void cpu_single_step(CPUState *env, int enabled)
1618{
bellard1fddef42005-04-17 19:16:13 +00001619#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001620 if (env->singlestep_enabled != enabled) {
1621 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001622 if (kvm_enabled())
1623 kvm_update_guest_debug(env, 0);
1624 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001625 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001626 /* XXX: only flush what is necessary */
1627 tb_flush(env);
1628 }
bellardc33a3462003-07-29 20:50:33 +00001629 }
1630#endif
1631}
1632
bellard34865132003-10-05 14:28:56 +00001633/* enable or disable low levels log */
1634void cpu_set_log(int log_flags)
1635{
1636 loglevel = log_flags;
1637 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001638 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001639 if (!logfile) {
1640 perror(logfilename);
1641 _exit(1);
1642 }
bellard9fa3e852004-01-04 18:06:42 +00001643#if !defined(CONFIG_SOFTMMU)
1644 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1645 {
blueswir1b55266b2008-09-20 08:07:15 +00001646 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001647 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1648 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001649#elif defined(_WIN32)
1650 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1651 setvbuf(logfile, NULL, _IONBF, 0);
1652#else
bellard34865132003-10-05 14:28:56 +00001653 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001654#endif
pbrooke735b912007-06-30 13:53:24 +00001655 log_append = 1;
1656 }
1657 if (!loglevel && logfile) {
1658 fclose(logfile);
1659 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001660 }
1661}
1662
1663void cpu_set_log_filename(const char *filename)
1664{
1665 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001666 if (logfile) {
1667 fclose(logfile);
1668 logfile = NULL;
1669 }
1670 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001671}
bellardc33a3462003-07-29 20:50:33 +00001672
aurel323098dba2009-03-07 21:28:24 +00001673static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001674{
pbrookd5975362008-06-07 20:50:51 +00001675 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1676 problem and hope the cpu will stop of its own accord. For userspace
1677 emulation this often isn't actually as bad as it sounds. Often
1678 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001679 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001680 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001681
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001682 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001683 tb = env->current_tb;
1684 /* if the cpu is currently executing code, we must unlink it and
1685 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001686 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001687 env->current_tb = NULL;
1688 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001689 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001690 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001691}
1692
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001693#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001694/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001695static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001696{
1697 int old_mask;
1698
1699 old_mask = env->interrupt_request;
1700 env->interrupt_request |= mask;
1701
aliguori8edac962009-04-24 18:03:45 +00001702 /*
1703 * If called from iothread context, wake the target cpu in
1704 * case its halted.
1705 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001706 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001707 qemu_cpu_kick(env);
1708 return;
1709 }
aliguori8edac962009-04-24 18:03:45 +00001710
pbrook2e70f6e2008-06-29 01:03:05 +00001711 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001712 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001713 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001714 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001715 cpu_abort(env, "Raised interrupt while not in I/O function");
1716 }
pbrook2e70f6e2008-06-29 01:03:05 +00001717 } else {
aurel323098dba2009-03-07 21:28:24 +00001718 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001719 }
1720}
1721
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001722CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1723
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001724#else /* CONFIG_USER_ONLY */
1725
1726void cpu_interrupt(CPUState *env, int mask)
1727{
1728 env->interrupt_request |= mask;
1729 cpu_unlink_tb(env);
1730}
1731#endif /* CONFIG_USER_ONLY */
1732
bellardb54ad042004-05-20 13:42:52 +00001733void cpu_reset_interrupt(CPUState *env, int mask)
1734{
1735 env->interrupt_request &= ~mask;
1736}
1737
aurel323098dba2009-03-07 21:28:24 +00001738void cpu_exit(CPUState *env)
1739{
1740 env->exit_request = 1;
1741 cpu_unlink_tb(env);
1742}
1743
blueswir1c7cd6a32008-10-02 18:27:46 +00001744const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001745 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001746 "show generated host assembly code for each compiled TB" },
1747 { CPU_LOG_TB_IN_ASM, "in_asm",
1748 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001749 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001750 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001751 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001752 "show micro ops "
1753#ifdef TARGET_I386
1754 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001755#endif
blueswir1e01a1152008-03-14 17:37:11 +00001756 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001757 { CPU_LOG_INT, "int",
1758 "show interrupts/exceptions in short format" },
1759 { CPU_LOG_EXEC, "exec",
1760 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001761 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001762 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001763#ifdef TARGET_I386
1764 { CPU_LOG_PCALL, "pcall",
1765 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001766 { CPU_LOG_RESET, "cpu_reset",
1767 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001768#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001769#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001770 { CPU_LOG_IOPORT, "ioport",
1771 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001772#endif
bellardf193c792004-03-21 17:06:25 +00001773 { 0, NULL, NULL },
1774};
1775
1776static int cmp1(const char *s1, int n, const char *s2)
1777{
1778 if (strlen(s2) != n)
1779 return 0;
1780 return memcmp(s1, s2, n) == 0;
1781}
ths3b46e622007-09-17 08:09:54 +00001782
bellardf193c792004-03-21 17:06:25 +00001783/* takes a comma separated list of log masks. Return 0 if error. */
1784int cpu_str_to_log_mask(const char *str)
1785{
blueswir1c7cd6a32008-10-02 18:27:46 +00001786 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001787 int mask;
1788 const char *p, *p1;
1789
1790 p = str;
1791 mask = 0;
1792 for(;;) {
1793 p1 = strchr(p, ',');
1794 if (!p1)
1795 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001796 if(cmp1(p,p1-p,"all")) {
1797 for(item = cpu_log_items; item->mask != 0; item++) {
1798 mask |= item->mask;
1799 }
1800 } else {
1801 for(item = cpu_log_items; item->mask != 0; item++) {
1802 if (cmp1(p, p1 - p, item->name))
1803 goto found;
1804 }
1805 return 0;
bellardf193c792004-03-21 17:06:25 +00001806 }
bellardf193c792004-03-21 17:06:25 +00001807 found:
1808 mask |= item->mask;
1809 if (*p1 != ',')
1810 break;
1811 p = p1 + 1;
1812 }
1813 return mask;
1814}
bellardea041c02003-06-25 16:16:50 +00001815
bellard75012672003-06-21 13:11:07 +00001816void cpu_abort(CPUState *env, const char *fmt, ...)
1817{
1818 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001819 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001820
1821 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001822 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001823 fprintf(stderr, "qemu: fatal: ");
1824 vfprintf(stderr, fmt, ap);
1825 fprintf(stderr, "\n");
1826#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001827 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1828#else
1829 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001830#endif
aliguori93fcfe32009-01-15 22:34:14 +00001831 if (qemu_log_enabled()) {
1832 qemu_log("qemu: fatal: ");
1833 qemu_log_vprintf(fmt, ap2);
1834 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001835#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001836 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001837#else
aliguori93fcfe32009-01-15 22:34:14 +00001838 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001839#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001840 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001841 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001842 }
pbrook493ae1f2007-11-23 16:53:59 +00001843 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001844 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001845#if defined(CONFIG_USER_ONLY)
1846 {
1847 struct sigaction act;
1848 sigfillset(&act.sa_mask);
1849 act.sa_handler = SIG_DFL;
1850 sigaction(SIGABRT, &act, NULL);
1851 }
1852#endif
bellard75012672003-06-21 13:11:07 +00001853 abort();
1854}
1855
thsc5be9f02007-02-28 20:20:53 +00001856CPUState *cpu_copy(CPUState *env)
1857{
ths01ba9812007-12-09 02:22:57 +00001858 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001859 CPUState *next_cpu = new_env->next_cpu;
1860 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001861#if defined(TARGET_HAS_ICE)
1862 CPUBreakpoint *bp;
1863 CPUWatchpoint *wp;
1864#endif
1865
thsc5be9f02007-02-28 20:20:53 +00001866 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001867
1868 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001869 new_env->next_cpu = next_cpu;
1870 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001871
1872 /* Clone all break/watchpoints.
1873 Note: Once we support ptrace with hw-debug register access, make sure
1874 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001875 QTAILQ_INIT(&env->breakpoints);
1876 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001877#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001878 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001879 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1880 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001881 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001882 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1883 wp->flags, NULL);
1884 }
1885#endif
1886
thsc5be9f02007-02-28 20:20:53 +00001887 return new_env;
1888}
1889
bellard01243112004-01-04 15:48:17 +00001890#if !defined(CONFIG_USER_ONLY)
1891
edgar_igl5c751e92008-05-06 08:44:21 +00001892static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1893{
1894 unsigned int i;
1895
1896 /* Discard jump cache entries for any tb which might potentially
1897 overlap the flushed page. */
1898 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1899 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001900 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001901
1902 i = tb_jmp_cache_hash_page(addr);
1903 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001904 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001905}
1906
Igor Kovalenko08738982009-07-12 02:15:40 +04001907static CPUTLBEntry s_cputlb_empty_entry = {
1908 .addr_read = -1,
1909 .addr_write = -1,
1910 .addr_code = -1,
1911 .addend = -1,
1912};
1913
Peter Maydell771124e2012-01-17 13:23:13 +00001914/* NOTE:
1915 * If flush_global is true (the usual case), flush all tlb entries.
1916 * If flush_global is false, flush (at least) all tlb entries not
1917 * marked global.
1918 *
1919 * Since QEMU doesn't currently implement a global/not-global flag
1920 * for tlb entries, at the moment tlb_flush() will also flush all
1921 * tlb entries in the flush_global == false case. This is OK because
1922 * CPU architectures generally permit an implementation to drop
1923 * entries from the TLB at any time, so flushing more entries than
1924 * required is only an efficiency issue, not a correctness issue.
1925 */
bellardee8b7022004-02-03 23:35:10 +00001926void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001927{
bellard33417e72003-08-10 21:47:01 +00001928 int i;
bellard01243112004-01-04 15:48:17 +00001929
bellard9fa3e852004-01-04 18:06:42 +00001930#if defined(DEBUG_TLB)
1931 printf("tlb_flush:\n");
1932#endif
bellard01243112004-01-04 15:48:17 +00001933 /* must reset current TB so that interrupts cannot modify the
1934 links while we are modifying them */
1935 env->current_tb = NULL;
1936
bellard33417e72003-08-10 21:47:01 +00001937 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001938 int mmu_idx;
1939 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001940 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001941 }
bellard33417e72003-08-10 21:47:01 +00001942 }
bellard9fa3e852004-01-04 18:06:42 +00001943
bellard8a40a182005-11-20 10:35:40 +00001944 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001945
Paul Brookd4c430a2010-03-17 02:14:28 +00001946 env->tlb_flush_addr = -1;
1947 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001948 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001949}
1950
bellard274da6b2004-05-20 21:56:27 +00001951static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001952{
ths5fafdf22007-09-16 21:08:06 +00001953 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001954 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001955 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001956 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001957 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001958 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001959 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001960 }
bellard61382a52003-10-27 21:22:23 +00001961}
1962
bellard2e126692004-04-25 21:28:44 +00001963void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001964{
bellard8a40a182005-11-20 10:35:40 +00001965 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001966 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001967
bellard9fa3e852004-01-04 18:06:42 +00001968#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001969 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001970#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001971 /* Check if we need to flush due to large pages. */
1972 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1973#if defined(DEBUG_TLB)
1974 printf("tlb_flush_page: forced full flush ("
1975 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1976 env->tlb_flush_addr, env->tlb_flush_mask);
1977#endif
1978 tlb_flush(env, 1);
1979 return;
1980 }
bellard01243112004-01-04 15:48:17 +00001981 /* must reset current TB so that interrupts cannot modify the
1982 links while we are modifying them */
1983 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001984
bellard61382a52003-10-27 21:22:23 +00001985 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001986 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001987 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1988 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001989
edgar_igl5c751e92008-05-06 08:44:21 +00001990 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001991}
1992
bellard9fa3e852004-01-04 18:06:42 +00001993/* update the TLBs so that writes to code in the virtual page 'addr'
1994 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001995static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001996{
ths5fafdf22007-09-16 21:08:06 +00001997 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001998 ram_addr + TARGET_PAGE_SIZE,
1999 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002000}
2001
bellard9fa3e852004-01-04 18:06:42 +00002002/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002003 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002004static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002005 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002006{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002007 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002008}
2009
ths5fafdf22007-09-16 21:08:06 +00002010static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002011 unsigned long start, unsigned long length)
2012{
2013 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002014 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00002015 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002016 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002017 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002018 }
2019 }
2020}
2021
pbrook5579c7f2009-04-11 14:47:08 +00002022/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002023void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002024 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002025{
2026 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002027 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002028 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002029
2030 start &= TARGET_PAGE_MASK;
2031 end = TARGET_PAGE_ALIGN(end);
2032
2033 length = end - start;
2034 if (length == 0)
2035 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002036 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002037
bellard1ccde1c2004-02-06 19:46:14 +00002038 /* we modify the TLB cache so that the dirty bit will be set again
2039 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002040 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002041 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002042 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002043 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002044 != (end - 1) - start) {
2045 abort();
2046 }
2047
bellard6a00d602005-11-21 23:25:50 +00002048 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002049 int mmu_idx;
2050 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2051 for(i = 0; i < CPU_TLB_SIZE; i++)
2052 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2053 start1, length);
2054 }
bellard6a00d602005-11-21 23:25:50 +00002055 }
bellard1ccde1c2004-02-06 19:46:14 +00002056}
2057
aliguori74576192008-10-06 14:02:03 +00002058int cpu_physical_memory_set_dirty_tracking(int enable)
2059{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002060 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002061 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002062 return ret;
aliguori74576192008-10-06 14:02:03 +00002063}
2064
bellard3a7d9292005-08-21 09:26:42 +00002065static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2066{
Anthony Liguoric227f092009-10-01 16:12:16 -05002067 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002068 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002069
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002070 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002071 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2072 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002073 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002074 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002075 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002076 }
2077 }
2078}
2079
2080/* update the TLB according to the current state of the dirty bits */
2081void cpu_tlb_update_dirty(CPUState *env)
2082{
2083 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002084 int mmu_idx;
2085 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2086 for(i = 0; i < CPU_TLB_SIZE; i++)
2087 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2088 }
bellard3a7d9292005-08-21 09:26:42 +00002089}
2090
pbrook0f459d12008-06-09 00:20:13 +00002091static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002092{
pbrook0f459d12008-06-09 00:20:13 +00002093 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2094 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002095}
2096
pbrook0f459d12008-06-09 00:20:13 +00002097/* update the TLB corresponding to virtual page vaddr
2098 so that it is no longer dirty */
2099static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002100{
bellard1ccde1c2004-02-06 19:46:14 +00002101 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002102 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002103
pbrook0f459d12008-06-09 00:20:13 +00002104 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002105 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002106 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2107 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002108}
2109
Paul Brookd4c430a2010-03-17 02:14:28 +00002110/* Our TLB does not support large pages, so remember the area covered by
2111 large pages and trigger a full TLB flush if these are invalidated. */
2112static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2113 target_ulong size)
2114{
2115 target_ulong mask = ~(size - 1);
2116
2117 if (env->tlb_flush_addr == (target_ulong)-1) {
2118 env->tlb_flush_addr = vaddr & mask;
2119 env->tlb_flush_mask = mask;
2120 return;
2121 }
2122 /* Extend the existing region to include the new page.
2123 This is a compromise between unnecessary flushes and the cost
2124 of maintaining a full variable size TLB. */
2125 mask &= env->tlb_flush_mask;
2126 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2127 mask <<= 1;
2128 }
2129 env->tlb_flush_addr &= mask;
2130 env->tlb_flush_mask = mask;
2131}
2132
Avi Kivity06ef3522012-02-13 16:11:22 +02002133static bool is_ram_rom(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002134{
Avi Kivity06ef3522012-02-13 16:11:22 +02002135 return memory_region_is_ram(s->mr);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002136}
2137
Avi Kivity06ef3522012-02-13 16:11:22 +02002138static bool is_romd(MemoryRegionSection *s)
Avi Kivity75c578d2012-01-02 15:40:52 +02002139{
Avi Kivity06ef3522012-02-13 16:11:22 +02002140 MemoryRegion *mr = s->mr;
Avi Kivity75c578d2012-01-02 15:40:52 +02002141
Avi Kivity75c578d2012-01-02 15:40:52 +02002142 return mr->rom_device && mr->readable;
2143}
2144
Avi Kivity06ef3522012-02-13 16:11:22 +02002145static bool is_ram_rom_romd(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002146{
Avi Kivity06ef3522012-02-13 16:11:22 +02002147 return is_ram_rom(s) || is_romd(s);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002148}
2149
Paul Brookd4c430a2010-03-17 02:14:28 +00002150/* Add a new TLB entry. At most one entry for a given virtual address
2151 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2152 supplied size is only used by tlb_flush_page. */
2153void tlb_set_page(CPUState *env, target_ulong vaddr,
2154 target_phys_addr_t paddr, int prot,
2155 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002156{
Avi Kivity06ef3522012-02-13 16:11:22 +02002157 MemoryRegionSection section;
bellard9fa3e852004-01-04 18:06:42 +00002158 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002159 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002160 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002161 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002162 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002163 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002164 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002165
Paul Brookd4c430a2010-03-17 02:14:28 +00002166 assert(size >= TARGET_PAGE_SIZE);
2167 if (size != TARGET_PAGE_SIZE) {
2168 tlb_add_large_page(env, vaddr, size);
2169 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002170 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002171#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002172 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2173 " prot=%x idx=%d pd=0x%08lx\n",
2174 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002175#endif
2176
pbrook0f459d12008-06-09 00:20:13 +00002177 address = vaddr;
Avi Kivity06ef3522012-02-13 16:11:22 +02002178 if (!is_ram_rom_romd(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002179 /* IO memory case (romd handled later) */
2180 address |= TLB_MMIO;
2181 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002182 if (is_ram_rom_romd(&section)) {
2183 addend = (unsigned long)(memory_region_get_ram_ptr(section.mr)
2184 + section.offset_within_region);
2185 } else {
2186 addend = 0;
2187 }
2188 if (is_ram_rom(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002189 /* Normal RAM. */
Avi Kivity06ef3522012-02-13 16:11:22 +02002190 iotlb = (memory_region_get_ram_addr(section.mr)
2191 + section.offset_within_region) & TARGET_PAGE_MASK;
2192 if (!section.readonly)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002193 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002194 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002195 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002196 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002197 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002198 It would be nice to pass an offset from the base address
2199 of that region. This would avoid having to special case RAM,
2200 and avoid full address decoding in every device.
2201 We can't use the high bits of pd for this because
2202 IO_MEM_ROMD uses these as a ram address. */
Avi Kivity06ef3522012-02-13 16:11:22 +02002203 iotlb = memory_region_get_ram_addr(section.mr) & ~TARGET_PAGE_MASK;
2204 iotlb += section.offset_within_region;
pbrook0f459d12008-06-09 00:20:13 +00002205 }
pbrook6658ffb2007-03-16 23:58:11 +00002206
pbrook0f459d12008-06-09 00:20:13 +00002207 code_address = address;
2208 /* Make accesses to pages with watchpoints go via the
2209 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002210 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002211 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002212 /* Avoid trapping reads of pages with a write breakpoint. */
2213 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivity1ec9b902012-01-02 12:47:48 +02002214 iotlb = io_mem_watch.ram_addr + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002215 address |= TLB_MMIO;
2216 break;
2217 }
pbrook6658ffb2007-03-16 23:58:11 +00002218 }
pbrook0f459d12008-06-09 00:20:13 +00002219 }
balrogd79acba2007-06-26 20:01:13 +00002220
pbrook0f459d12008-06-09 00:20:13 +00002221 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2222 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2223 te = &env->tlb_table[mmu_idx][index];
2224 te->addend = addend - vaddr;
2225 if (prot & PAGE_READ) {
2226 te->addr_read = address;
2227 } else {
2228 te->addr_read = -1;
2229 }
edgar_igl5c751e92008-05-06 08:44:21 +00002230
pbrook0f459d12008-06-09 00:20:13 +00002231 if (prot & PAGE_EXEC) {
2232 te->addr_code = code_address;
2233 } else {
2234 te->addr_code = -1;
2235 }
2236 if (prot & PAGE_WRITE) {
Avi Kivity06ef3522012-02-13 16:11:22 +02002237 if ((memory_region_is_ram(section.mr) && section.readonly)
2238 || is_romd(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002239 /* Write access calls the I/O callback. */
2240 te->addr_write = address | TLB_MMIO;
Avi Kivity06ef3522012-02-13 16:11:22 +02002241 } else if (memory_region_is_ram(section.mr)
2242 && !cpu_physical_memory_is_dirty(
2243 section.mr->ram_addr
2244 + section.offset_within_region)) {
pbrook0f459d12008-06-09 00:20:13 +00002245 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002246 } else {
pbrook0f459d12008-06-09 00:20:13 +00002247 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002248 }
pbrook0f459d12008-06-09 00:20:13 +00002249 } else {
2250 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002251 }
bellard9fa3e852004-01-04 18:06:42 +00002252}
2253
bellard01243112004-01-04 15:48:17 +00002254#else
2255
bellardee8b7022004-02-03 23:35:10 +00002256void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002257{
2258}
2259
bellard2e126692004-04-25 21:28:44 +00002260void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002261{
2262}
2263
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002264/*
2265 * Walks guest process memory "regions" one by one
2266 * and calls callback function 'fn' for each region.
2267 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002268
2269struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002270{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002271 walk_memory_regions_fn fn;
2272 void *priv;
2273 unsigned long start;
2274 int prot;
2275};
bellard9fa3e852004-01-04 18:06:42 +00002276
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002277static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002278 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002279{
2280 if (data->start != -1ul) {
2281 int rc = data->fn(data->priv, data->start, end, data->prot);
2282 if (rc != 0) {
2283 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002284 }
bellard33417e72003-08-10 21:47:01 +00002285 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002286
2287 data->start = (new_prot ? end : -1ul);
2288 data->prot = new_prot;
2289
2290 return 0;
2291}
2292
2293static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002294 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002295{
Paul Brookb480d9b2010-03-12 23:23:29 +00002296 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002297 int i, rc;
2298
2299 if (*lp == NULL) {
2300 return walk_memory_regions_end(data, base, 0);
2301 }
2302
2303 if (level == 0) {
2304 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002305 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002306 int prot = pd[i].flags;
2307
2308 pa = base | (i << TARGET_PAGE_BITS);
2309 if (prot != data->prot) {
2310 rc = walk_memory_regions_end(data, pa, prot);
2311 if (rc != 0) {
2312 return rc;
2313 }
2314 }
2315 }
2316 } else {
2317 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002318 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002319 pa = base | ((abi_ulong)i <<
2320 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002321 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2322 if (rc != 0) {
2323 return rc;
2324 }
2325 }
2326 }
2327
2328 return 0;
2329}
2330
2331int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2332{
2333 struct walk_memory_regions_data data;
2334 unsigned long i;
2335
2336 data.fn = fn;
2337 data.priv = priv;
2338 data.start = -1ul;
2339 data.prot = 0;
2340
2341 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002342 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002343 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2344 if (rc != 0) {
2345 return rc;
2346 }
2347 }
2348
2349 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002350}
2351
Paul Brookb480d9b2010-03-12 23:23:29 +00002352static int dump_region(void *priv, abi_ulong start,
2353 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002354{
2355 FILE *f = (FILE *)priv;
2356
Paul Brookb480d9b2010-03-12 23:23:29 +00002357 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2358 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002359 start, end, end - start,
2360 ((prot & PAGE_READ) ? 'r' : '-'),
2361 ((prot & PAGE_WRITE) ? 'w' : '-'),
2362 ((prot & PAGE_EXEC) ? 'x' : '-'));
2363
2364 return (0);
2365}
2366
2367/* dump memory mappings */
2368void page_dump(FILE *f)
2369{
2370 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2371 "start", "end", "size", "prot");
2372 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002373}
2374
pbrook53a59602006-03-25 19:31:22 +00002375int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002376{
bellard9fa3e852004-01-04 18:06:42 +00002377 PageDesc *p;
2378
2379 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002380 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002381 return 0;
2382 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002383}
2384
Richard Henderson376a7902010-03-10 15:57:04 -08002385/* Modify the flags of a page and invalidate the code if necessary.
2386 The flag PAGE_WRITE_ORG is positioned automatically depending
2387 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002388void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002389{
Richard Henderson376a7902010-03-10 15:57:04 -08002390 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002391
Richard Henderson376a7902010-03-10 15:57:04 -08002392 /* This function should never be called with addresses outside the
2393 guest address space. If this assert fires, it probably indicates
2394 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002395#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2396 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002397#endif
2398 assert(start < end);
2399
bellard9fa3e852004-01-04 18:06:42 +00002400 start = start & TARGET_PAGE_MASK;
2401 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002402
2403 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002404 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002405 }
2406
2407 for (addr = start, len = end - start;
2408 len != 0;
2409 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2410 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2411
2412 /* If the write protection bit is set, then we invalidate
2413 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002414 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002415 (flags & PAGE_WRITE) &&
2416 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002417 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002418 }
2419 p->flags = flags;
2420 }
bellard9fa3e852004-01-04 18:06:42 +00002421}
2422
ths3d97b402007-11-02 19:02:07 +00002423int page_check_range(target_ulong start, target_ulong len, int flags)
2424{
2425 PageDesc *p;
2426 target_ulong end;
2427 target_ulong addr;
2428
Richard Henderson376a7902010-03-10 15:57:04 -08002429 /* This function should never be called with addresses outside the
2430 guest address space. If this assert fires, it probably indicates
2431 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002432#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2433 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002434#endif
2435
Richard Henderson3e0650a2010-03-29 10:54:42 -07002436 if (len == 0) {
2437 return 0;
2438 }
Richard Henderson376a7902010-03-10 15:57:04 -08002439 if (start + len - 1 < start) {
2440 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002441 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002442 }
balrog55f280c2008-10-28 10:24:11 +00002443
ths3d97b402007-11-02 19:02:07 +00002444 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2445 start = start & TARGET_PAGE_MASK;
2446
Richard Henderson376a7902010-03-10 15:57:04 -08002447 for (addr = start, len = end - start;
2448 len != 0;
2449 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002450 p = page_find(addr >> TARGET_PAGE_BITS);
2451 if( !p )
2452 return -1;
2453 if( !(p->flags & PAGE_VALID) )
2454 return -1;
2455
bellarddae32702007-11-14 10:51:00 +00002456 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002457 return -1;
bellarddae32702007-11-14 10:51:00 +00002458 if (flags & PAGE_WRITE) {
2459 if (!(p->flags & PAGE_WRITE_ORG))
2460 return -1;
2461 /* unprotect the page if it was put read-only because it
2462 contains translated code */
2463 if (!(p->flags & PAGE_WRITE)) {
2464 if (!page_unprotect(addr, 0, NULL))
2465 return -1;
2466 }
2467 return 0;
2468 }
ths3d97b402007-11-02 19:02:07 +00002469 }
2470 return 0;
2471}
2472
bellard9fa3e852004-01-04 18:06:42 +00002473/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002474 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002475int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002476{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002477 unsigned int prot;
2478 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002479 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002480
pbrookc8a706f2008-06-02 16:16:42 +00002481 /* Technically this isn't safe inside a signal handler. However we
2482 know this only ever happens in a synchronous SEGV handler, so in
2483 practice it seems to be ok. */
2484 mmap_lock();
2485
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002486 p = page_find(address >> TARGET_PAGE_BITS);
2487 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002488 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002489 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002490 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002491
bellard9fa3e852004-01-04 18:06:42 +00002492 /* if the page was really writable, then we change its
2493 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002494 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2495 host_start = address & qemu_host_page_mask;
2496 host_end = host_start + qemu_host_page_size;
2497
2498 prot = 0;
2499 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2500 p = page_find(addr >> TARGET_PAGE_BITS);
2501 p->flags |= PAGE_WRITE;
2502 prot |= p->flags;
2503
bellard9fa3e852004-01-04 18:06:42 +00002504 /* and since the content will be modified, we must invalidate
2505 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002506 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002507#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002508 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002509#endif
bellard9fa3e852004-01-04 18:06:42 +00002510 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002511 mprotect((void *)g2h(host_start), qemu_host_page_size,
2512 prot & PAGE_BITS);
2513
2514 mmap_unlock();
2515 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002516 }
pbrookc8a706f2008-06-02 16:16:42 +00002517 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002518 return 0;
2519}
2520
bellard6a00d602005-11-21 23:25:50 +00002521static inline void tlb_set_dirty(CPUState *env,
2522 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002523{
2524}
bellard9fa3e852004-01-04 18:06:42 +00002525#endif /* defined(CONFIG_USER_ONLY) */
2526
pbrooke2eef172008-06-08 01:09:01 +00002527#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002528
Paul Brookc04b2b72010-03-01 03:31:14 +00002529#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2530typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002531 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002532 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002533 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002534} subpage_t;
2535
Anthony Liguoric227f092009-10-01 16:12:16 -05002536static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002537 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002538static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002539static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002540{
Avi Kivity5312bd82012-02-12 18:32:55 +02002541 MemoryRegionSection *section = &phys_sections[section_index];
2542 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002543
2544 if (mr->subpage) {
2545 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2546 memory_region_destroy(&subpage->iomem);
2547 g_free(subpage);
2548 }
2549}
2550
Avi Kivity4346ae32012-02-10 17:00:01 +02002551static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002552{
2553 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002554 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002555
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002556 if (lp->u.node == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002557 return;
2558 }
2559
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002560 p = phys_map_nodes[lp->u.node];
Avi Kivity4346ae32012-02-10 17:00:01 +02002561 for (i = 0; i < L2_SIZE; ++i) {
2562 if (level > 0) {
Avi Kivity54688b12012-02-09 17:34:32 +02002563 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002564 } else {
2565 destroy_page_desc(p[i].u.leaf);
Avi Kivity54688b12012-02-09 17:34:32 +02002566 }
Avi Kivity54688b12012-02-09 17:34:32 +02002567 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002568 lp->u.node = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002569}
2570
2571static void destroy_all_mappings(void)
2572{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002573 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002574 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002575}
2576
Avi Kivity5312bd82012-02-12 18:32:55 +02002577static uint16_t phys_section_add(MemoryRegionSection *section)
2578{
2579 if (phys_sections_nb == phys_sections_nb_alloc) {
2580 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2581 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2582 phys_sections_nb_alloc);
2583 }
2584 phys_sections[phys_sections_nb] = *section;
2585 return phys_sections_nb++;
2586}
2587
2588static void phys_sections_clear(void)
2589{
2590 phys_sections_nb = 0;
2591}
2592
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002593/* register physical memory.
2594 For RAM, 'size' must be a multiple of the target page size.
2595 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002596 io memory page. The address used when calling the IO function is
2597 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002598 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002599 before calculating this offset. This should not be a problem unless
2600 the low bits of start_addr and region_offset differ. */
Avi Kivity0f0cb162012-02-13 17:14:32 +02002601static void register_subpage(MemoryRegionSection *section)
2602{
2603 subpage_t *subpage;
2604 target_phys_addr_t base = section->offset_within_address_space
2605 & TARGET_PAGE_MASK;
2606 MemoryRegionSection existing = phys_page_find(base >> TARGET_PAGE_BITS);
2607 MemoryRegionSection subsection = {
2608 .offset_within_address_space = base,
2609 .size = TARGET_PAGE_SIZE,
2610 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002611 target_phys_addr_t start, end;
2612
2613 assert(existing.mr->subpage || existing.mr == &io_mem_unassigned);
2614
2615 if (!(existing.mr->subpage)) {
2616 subpage = subpage_init(base);
2617 subsection.mr = &subpage->iomem;
Avi Kivitya3918432012-02-13 17:19:30 +02002618 phys_page_set(base >> TARGET_PAGE_BITS, phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002619 } else {
2620 subpage = container_of(existing.mr, subpage_t, iomem);
2621 }
2622 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2623 end = start + section->size;
2624 subpage_register(subpage, start, end, phys_section_add(section));
2625}
2626
2627
2628static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002629{
Avi Kivitydd811242012-01-02 12:17:03 +02002630 target_phys_addr_t start_addr = section->offset_within_address_space;
2631 ram_addr_t size = section->size;
Anthony Liguoric227f092009-10-01 16:12:16 -05002632 target_phys_addr_t addr, end_addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002633 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002634
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002635 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002636
Anthony Liguoric227f092009-10-01 16:12:16 -05002637 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002638
2639 addr = start_addr;
2640 do {
Avi Kivitya3918432012-02-13 17:19:30 +02002641 phys_page_set(addr >> TARGET_PAGE_BITS, section_index);
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002642 addr += TARGET_PAGE_SIZE;
2643 } while (addr != end_addr);
bellard33417e72003-08-10 21:47:01 +00002644}
2645
Avi Kivity0f0cb162012-02-13 17:14:32 +02002646void cpu_register_physical_memory_log(MemoryRegionSection *section,
2647 bool readonly)
2648{
2649 MemoryRegionSection now = *section, remain = *section;
2650
2651 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2652 || (now.size < TARGET_PAGE_SIZE)) {
2653 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2654 - now.offset_within_address_space,
2655 now.size);
2656 register_subpage(&now);
2657 remain.size -= now.size;
2658 remain.offset_within_address_space += now.size;
2659 remain.offset_within_region += now.size;
2660 }
2661 now = remain;
2662 now.size &= TARGET_PAGE_MASK;
2663 if (now.size) {
2664 register_multipage(&now);
2665 remain.size -= now.size;
2666 remain.offset_within_address_space += now.size;
2667 remain.offset_within_region += now.size;
2668 }
2669 now = remain;
2670 if (now.size) {
2671 register_subpage(&now);
2672 }
2673}
2674
2675
Anthony Liguoric227f092009-10-01 16:12:16 -05002676void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002677{
2678 if (kvm_enabled())
2679 kvm_coalesce_mmio_region(addr, size);
2680}
2681
Anthony Liguoric227f092009-10-01 16:12:16 -05002682void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002683{
2684 if (kvm_enabled())
2685 kvm_uncoalesce_mmio_region(addr, size);
2686}
2687
Sheng Yang62a27442010-01-26 19:21:16 +08002688void qemu_flush_coalesced_mmio_buffer(void)
2689{
2690 if (kvm_enabled())
2691 kvm_flush_coalesced_mmio_buffer();
2692}
2693
Marcelo Tosattic9027602010-03-01 20:25:08 -03002694#if defined(__linux__) && !defined(TARGET_S390X)
2695
2696#include <sys/vfs.h>
2697
2698#define HUGETLBFS_MAGIC 0x958458f6
2699
2700static long gethugepagesize(const char *path)
2701{
2702 struct statfs fs;
2703 int ret;
2704
2705 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002706 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002707 } while (ret != 0 && errno == EINTR);
2708
2709 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002710 perror(path);
2711 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002712 }
2713
2714 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002715 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002716
2717 return fs.f_bsize;
2718}
2719
Alex Williamson04b16652010-07-02 11:13:17 -06002720static void *file_ram_alloc(RAMBlock *block,
2721 ram_addr_t memory,
2722 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002723{
2724 char *filename;
2725 void *area;
2726 int fd;
2727#ifdef MAP_POPULATE
2728 int flags;
2729#endif
2730 unsigned long hpagesize;
2731
2732 hpagesize = gethugepagesize(path);
2733 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002734 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002735 }
2736
2737 if (memory < hpagesize) {
2738 return NULL;
2739 }
2740
2741 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2742 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2743 return NULL;
2744 }
2745
2746 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002747 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002748 }
2749
2750 fd = mkstemp(filename);
2751 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002752 perror("unable to create backing store for hugepages");
2753 free(filename);
2754 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002755 }
2756 unlink(filename);
2757 free(filename);
2758
2759 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2760
2761 /*
2762 * ftruncate is not supported by hugetlbfs in older
2763 * hosts, so don't bother bailing out on errors.
2764 * If anything goes wrong with it under other filesystems,
2765 * mmap will fail.
2766 */
2767 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002768 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002769
2770#ifdef MAP_POPULATE
2771 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2772 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2773 * to sidestep this quirk.
2774 */
2775 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2776 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2777#else
2778 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2779#endif
2780 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002781 perror("file_ram_alloc: can't mmap RAM pages");
2782 close(fd);
2783 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002784 }
Alex Williamson04b16652010-07-02 11:13:17 -06002785 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002786 return area;
2787}
2788#endif
2789
Alex Williamsond17b5282010-06-25 11:08:38 -06002790static ram_addr_t find_ram_offset(ram_addr_t size)
2791{
Alex Williamson04b16652010-07-02 11:13:17 -06002792 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002793 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002794
2795 if (QLIST_EMPTY(&ram_list.blocks))
2796 return 0;
2797
2798 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002799 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002800
2801 end = block->offset + block->length;
2802
2803 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2804 if (next_block->offset >= end) {
2805 next = MIN(next, next_block->offset);
2806 }
2807 }
2808 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002809 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002810 mingap = next - end;
2811 }
2812 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002813
2814 if (offset == RAM_ADDR_MAX) {
2815 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2816 (uint64_t)size);
2817 abort();
2818 }
2819
Alex Williamson04b16652010-07-02 11:13:17 -06002820 return offset;
2821}
2822
2823static ram_addr_t last_ram_offset(void)
2824{
Alex Williamsond17b5282010-06-25 11:08:38 -06002825 RAMBlock *block;
2826 ram_addr_t last = 0;
2827
2828 QLIST_FOREACH(block, &ram_list.blocks, next)
2829 last = MAX(last, block->offset + block->length);
2830
2831 return last;
2832}
2833
Avi Kivityc5705a72011-12-20 15:59:12 +02002834void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002835{
2836 RAMBlock *new_block, *block;
2837
Avi Kivityc5705a72011-12-20 15:59:12 +02002838 new_block = NULL;
2839 QLIST_FOREACH(block, &ram_list.blocks, next) {
2840 if (block->offset == addr) {
2841 new_block = block;
2842 break;
2843 }
2844 }
2845 assert(new_block);
2846 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002847
2848 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2849 char *id = dev->parent_bus->info->get_dev_path(dev);
2850 if (id) {
2851 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002852 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002853 }
2854 }
2855 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2856
2857 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002858 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002859 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2860 new_block->idstr);
2861 abort();
2862 }
2863 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002864}
2865
2866ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2867 MemoryRegion *mr)
2868{
2869 RAMBlock *new_block;
2870
2871 size = TARGET_PAGE_ALIGN(size);
2872 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002873
Avi Kivity7c637362011-12-21 13:09:49 +02002874 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002875 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002876 if (host) {
2877 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002878 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002879 } else {
2880 if (mem_path) {
2881#if defined (__linux__) && !defined(TARGET_S390X)
2882 new_block->host = file_ram_alloc(new_block, size, mem_path);
2883 if (!new_block->host) {
2884 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002885 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002886 }
2887#else
2888 fprintf(stderr, "-mem-path option unsupported\n");
2889 exit(1);
2890#endif
2891 } else {
2892#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002893 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2894 an system defined value, which is at least 256GB. Larger systems
2895 have larger values. We put the guest between the end of data
2896 segment (system break) and this value. We use 32GB as a base to
2897 have enough room for the system break to grow. */
2898 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002899 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002900 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002901 if (new_block->host == MAP_FAILED) {
2902 fprintf(stderr, "Allocating RAM failed\n");
2903 abort();
2904 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002905#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002906 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002907 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002908 } else {
2909 new_block->host = qemu_vmalloc(size);
2910 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002911#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002912 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002913 }
2914 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002915 new_block->length = size;
2916
2917 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2918
Anthony Liguori7267c092011-08-20 22:09:37 -05002919 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002920 last_ram_offset() >> TARGET_PAGE_BITS);
2921 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2922 0xff, size >> TARGET_PAGE_BITS);
2923
2924 if (kvm_enabled())
2925 kvm_setup_guest_memory(new_block->host, size);
2926
2927 return new_block->offset;
2928}
2929
Avi Kivityc5705a72011-12-20 15:59:12 +02002930ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002931{
Avi Kivityc5705a72011-12-20 15:59:12 +02002932 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002933}
bellarde9a1ab12007-02-08 23:08:38 +00002934
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002935void qemu_ram_free_from_ptr(ram_addr_t addr)
2936{
2937 RAMBlock *block;
2938
2939 QLIST_FOREACH(block, &ram_list.blocks, next) {
2940 if (addr == block->offset) {
2941 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002942 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002943 return;
2944 }
2945 }
2946}
2947
Anthony Liguoric227f092009-10-01 16:12:16 -05002948void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002949{
Alex Williamson04b16652010-07-02 11:13:17 -06002950 RAMBlock *block;
2951
2952 QLIST_FOREACH(block, &ram_list.blocks, next) {
2953 if (addr == block->offset) {
2954 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002955 if (block->flags & RAM_PREALLOC_MASK) {
2956 ;
2957 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002958#if defined (__linux__) && !defined(TARGET_S390X)
2959 if (block->fd) {
2960 munmap(block->host, block->length);
2961 close(block->fd);
2962 } else {
2963 qemu_vfree(block->host);
2964 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002965#else
2966 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002967#endif
2968 } else {
2969#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2970 munmap(block->host, block->length);
2971#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002972 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002973 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002974 } else {
2975 qemu_vfree(block->host);
2976 }
Alex Williamson04b16652010-07-02 11:13:17 -06002977#endif
2978 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002979 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002980 return;
2981 }
2982 }
2983
bellarde9a1ab12007-02-08 23:08:38 +00002984}
2985
Huang Yingcd19cfa2011-03-02 08:56:19 +01002986#ifndef _WIN32
2987void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2988{
2989 RAMBlock *block;
2990 ram_addr_t offset;
2991 int flags;
2992 void *area, *vaddr;
2993
2994 QLIST_FOREACH(block, &ram_list.blocks, next) {
2995 offset = addr - block->offset;
2996 if (offset < block->length) {
2997 vaddr = block->host + offset;
2998 if (block->flags & RAM_PREALLOC_MASK) {
2999 ;
3000 } else {
3001 flags = MAP_FIXED;
3002 munmap(vaddr, length);
3003 if (mem_path) {
3004#if defined(__linux__) && !defined(TARGET_S390X)
3005 if (block->fd) {
3006#ifdef MAP_POPULATE
3007 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3008 MAP_PRIVATE;
3009#else
3010 flags |= MAP_PRIVATE;
3011#endif
3012 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3013 flags, block->fd, offset);
3014 } else {
3015 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3016 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3017 flags, -1, 0);
3018 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003019#else
3020 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003021#endif
3022 } else {
3023#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3024 flags |= MAP_SHARED | MAP_ANONYMOUS;
3025 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3026 flags, -1, 0);
3027#else
3028 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3029 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3030 flags, -1, 0);
3031#endif
3032 }
3033 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003034 fprintf(stderr, "Could not remap addr: "
3035 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003036 length, addr);
3037 exit(1);
3038 }
3039 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3040 }
3041 return;
3042 }
3043 }
3044}
3045#endif /* !_WIN32 */
3046
pbrookdc828ca2009-04-09 22:21:07 +00003047/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003048 With the exception of the softmmu code in this file, this should
3049 only be used for local memory (e.g. video ram) that the device owns,
3050 and knows it isn't going to access beyond the end of the block.
3051
3052 It should not be used for general purpose DMA.
3053 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3054 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003055void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003056{
pbrook94a6b542009-04-11 17:15:54 +00003057 RAMBlock *block;
3058
Alex Williamsonf471a172010-06-11 11:11:42 -06003059 QLIST_FOREACH(block, &ram_list.blocks, next) {
3060 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003061 /* Move this entry to to start of the list. */
3062 if (block != QLIST_FIRST(&ram_list.blocks)) {
3063 QLIST_REMOVE(block, next);
3064 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3065 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003066 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003067 /* We need to check if the requested address is in the RAM
3068 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003069 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003070 */
3071 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003072 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003073 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003074 block->host =
3075 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003076 }
3077 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003078 return block->host + (addr - block->offset);
3079 }
pbrook94a6b542009-04-11 17:15:54 +00003080 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003081
3082 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3083 abort();
3084
3085 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003086}
3087
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003088/* Return a host pointer to ram allocated with qemu_ram_alloc.
3089 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3090 */
3091void *qemu_safe_ram_ptr(ram_addr_t addr)
3092{
3093 RAMBlock *block;
3094
3095 QLIST_FOREACH(block, &ram_list.blocks, next) {
3096 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003097 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003098 /* We need to check if the requested address is in the RAM
3099 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003100 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003101 */
3102 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003103 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003104 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003105 block->host =
3106 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003107 }
3108 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003109 return block->host + (addr - block->offset);
3110 }
3111 }
3112
3113 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3114 abort();
3115
3116 return NULL;
3117}
3118
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003119/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3120 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003121void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003122{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003123 if (*size == 0) {
3124 return NULL;
3125 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003126 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003127 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003128 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003129 RAMBlock *block;
3130
3131 QLIST_FOREACH(block, &ram_list.blocks, next) {
3132 if (addr - block->offset < block->length) {
3133 if (addr - block->offset + *size > block->length)
3134 *size = block->length - addr + block->offset;
3135 return block->host + (addr - block->offset);
3136 }
3137 }
3138
3139 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3140 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003141 }
3142}
3143
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003144void qemu_put_ram_ptr(void *addr)
3145{
3146 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003147}
3148
Marcelo Tosattie8902612010-10-11 15:31:19 -03003149int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003150{
pbrook94a6b542009-04-11 17:15:54 +00003151 RAMBlock *block;
3152 uint8_t *host = ptr;
3153
Jan Kiszka868bb332011-06-21 22:59:09 +02003154 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003155 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003156 return 0;
3157 }
3158
Alex Williamsonf471a172010-06-11 11:11:42 -06003159 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003160 /* This case append when the block is not mapped. */
3161 if (block->host == NULL) {
3162 continue;
3163 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003164 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003165 *ram_addr = block->offset + (host - block->host);
3166 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003167 }
pbrook94a6b542009-04-11 17:15:54 +00003168 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003169
Marcelo Tosattie8902612010-10-11 15:31:19 -03003170 return -1;
3171}
Alex Williamsonf471a172010-06-11 11:11:42 -06003172
Marcelo Tosattie8902612010-10-11 15:31:19 -03003173/* Some of the softmmu routines need to translate from a host pointer
3174 (typically a TLB entry) back to a ram offset. */
3175ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3176{
3177 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003178
Marcelo Tosattie8902612010-10-11 15:31:19 -03003179 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3180 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3181 abort();
3182 }
3183 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003184}
3185
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003186static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3187 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003188{
pbrook67d3b952006-12-18 05:03:52 +00003189#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003190 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003191#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003192#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003193 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003194#endif
3195 return 0;
3196}
3197
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003198static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3199 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003200{
3201#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003202 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003203#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003204#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003205 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003206#endif
3207}
3208
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003209static const MemoryRegionOps unassigned_mem_ops = {
3210 .read = unassigned_mem_read,
3211 .write = unassigned_mem_write,
3212 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003213};
3214
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003215static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3216 unsigned size)
3217{
3218 abort();
3219}
3220
3221static void error_mem_write(void *opaque, target_phys_addr_t addr,
3222 uint64_t value, unsigned size)
3223{
3224 abort();
3225}
3226
3227static const MemoryRegionOps error_mem_ops = {
3228 .read = error_mem_read,
3229 .write = error_mem_write,
3230 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003231};
3232
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003233static const MemoryRegionOps rom_mem_ops = {
3234 .read = error_mem_read,
3235 .write = unassigned_mem_write,
3236 .endianness = DEVICE_NATIVE_ENDIAN,
3237};
3238
3239static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3240 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003241{
bellard3a7d9292005-08-21 09:26:42 +00003242 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003243 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003244 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3245#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003246 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003247 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003248#endif
3249 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003250 switch (size) {
3251 case 1:
3252 stb_p(qemu_get_ram_ptr(ram_addr), val);
3253 break;
3254 case 2:
3255 stw_p(qemu_get_ram_ptr(ram_addr), val);
3256 break;
3257 case 4:
3258 stl_p(qemu_get_ram_ptr(ram_addr), val);
3259 break;
3260 default:
3261 abort();
3262 }
bellardf23db162005-08-21 19:12:28 +00003263 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003264 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003265 /* we remove the notdirty callback only if the code has been
3266 flushed */
3267 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003268 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003269}
3270
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003271static const MemoryRegionOps notdirty_mem_ops = {
3272 .read = error_mem_read,
3273 .write = notdirty_mem_write,
3274 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003275};
3276
pbrook0f459d12008-06-09 00:20:13 +00003277/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003278static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003279{
3280 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003281 target_ulong pc, cs_base;
3282 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003283 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003284 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003285 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003286
aliguori06d55cc2008-11-18 20:24:06 +00003287 if (env->watchpoint_hit) {
3288 /* We re-entered the check after replacing the TB. Now raise
3289 * the debug interrupt so that is will trigger after the
3290 * current instruction. */
3291 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3292 return;
3293 }
pbrook2e70f6e2008-06-29 01:03:05 +00003294 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003295 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003296 if ((vaddr == (wp->vaddr & len_mask) ||
3297 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003298 wp->flags |= BP_WATCHPOINT_HIT;
3299 if (!env->watchpoint_hit) {
3300 env->watchpoint_hit = wp;
3301 tb = tb_find_pc(env->mem_io_pc);
3302 if (!tb) {
3303 cpu_abort(env, "check_watchpoint: could not find TB for "
3304 "pc=%p", (void *)env->mem_io_pc);
3305 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003306 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003307 tb_phys_invalidate(tb, -1);
3308 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3309 env->exception_index = EXCP_DEBUG;
3310 } else {
3311 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3312 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3313 }
3314 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003315 }
aliguori6e140f22008-11-18 20:37:55 +00003316 } else {
3317 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003318 }
3319 }
3320}
3321
pbrook6658ffb2007-03-16 23:58:11 +00003322/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3323 so these check for a hit then pass through to the normal out-of-line
3324 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003325static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3326 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003327{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003328 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3329 switch (size) {
3330 case 1: return ldub_phys(addr);
3331 case 2: return lduw_phys(addr);
3332 case 4: return ldl_phys(addr);
3333 default: abort();
3334 }
pbrook6658ffb2007-03-16 23:58:11 +00003335}
3336
Avi Kivity1ec9b902012-01-02 12:47:48 +02003337static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3338 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003339{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003340 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3341 switch (size) {
3342 case 1: stb_phys(addr, val);
3343 case 2: stw_phys(addr, val);
3344 case 4: stl_phys(addr, val);
3345 default: abort();
3346 }
pbrook6658ffb2007-03-16 23:58:11 +00003347}
3348
Avi Kivity1ec9b902012-01-02 12:47:48 +02003349static const MemoryRegionOps watch_mem_ops = {
3350 .read = watch_mem_read,
3351 .write = watch_mem_write,
3352 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003353};
pbrook6658ffb2007-03-16 23:58:11 +00003354
Avi Kivity70c68e42012-01-02 12:32:48 +02003355static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3356 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003357{
Avi Kivity70c68e42012-01-02 12:32:48 +02003358 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003359 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003360 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003361#if defined(DEBUG_SUBPAGE)
3362 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3363 mmio, len, addr, idx);
3364#endif
blueswir1db7b5422007-05-26 17:36:03 +00003365
Avi Kivity5312bd82012-02-12 18:32:55 +02003366 section = &phys_sections[mmio->sub_section[idx]];
3367 addr += mmio->base;
3368 addr -= section->offset_within_address_space;
3369 addr += section->offset_within_region;
3370 return io_mem_read(section->mr->ram_addr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003371}
3372
Avi Kivity70c68e42012-01-02 12:32:48 +02003373static void subpage_write(void *opaque, target_phys_addr_t addr,
3374 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003375{
Avi Kivity70c68e42012-01-02 12:32:48 +02003376 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003377 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003378 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003379#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003380 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3381 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003382 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003383#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003384
Avi Kivity5312bd82012-02-12 18:32:55 +02003385 section = &phys_sections[mmio->sub_section[idx]];
3386 addr += mmio->base;
3387 addr -= section->offset_within_address_space;
3388 addr += section->offset_within_region;
3389 io_mem_write(section->mr->ram_addr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003390}
3391
Avi Kivity70c68e42012-01-02 12:32:48 +02003392static const MemoryRegionOps subpage_ops = {
3393 .read = subpage_read,
3394 .write = subpage_write,
3395 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003396};
3397
Avi Kivityde712f92012-01-02 12:41:07 +02003398static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3399 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003400{
3401 ram_addr_t raddr = addr;
3402 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003403 switch (size) {
3404 case 1: return ldub_p(ptr);
3405 case 2: return lduw_p(ptr);
3406 case 4: return ldl_p(ptr);
3407 default: abort();
3408 }
Andreas Färber56384e82011-11-30 16:26:21 +01003409}
3410
Avi Kivityde712f92012-01-02 12:41:07 +02003411static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3412 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003413{
3414 ram_addr_t raddr = addr;
3415 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003416 switch (size) {
3417 case 1: return stb_p(ptr, value);
3418 case 2: return stw_p(ptr, value);
3419 case 4: return stl_p(ptr, value);
3420 default: abort();
3421 }
Andreas Färber56384e82011-11-30 16:26:21 +01003422}
3423
Avi Kivityde712f92012-01-02 12:41:07 +02003424static const MemoryRegionOps subpage_ram_ops = {
3425 .read = subpage_ram_read,
3426 .write = subpage_ram_write,
3427 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003428};
3429
Anthony Liguoric227f092009-10-01 16:12:16 -05003430static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003431 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003432{
3433 int idx, eidx;
3434
3435 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3436 return -1;
3437 idx = SUBPAGE_IDX(start);
3438 eidx = SUBPAGE_IDX(end);
3439#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003440 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003441 mmio, start, end, idx, eidx, memory);
3442#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003443 if (memory_region_is_ram(phys_sections[section].mr)) {
3444 MemoryRegionSection new_section = phys_sections[section];
3445 new_section.mr = &io_mem_subpage_ram;
3446 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003447 }
blueswir1db7b5422007-05-26 17:36:03 +00003448 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003449 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003450 }
3451
3452 return 0;
3453}
3454
Avi Kivity0f0cb162012-02-13 17:14:32 +02003455static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003456{
Anthony Liguoric227f092009-10-01 16:12:16 -05003457 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003458
Anthony Liguori7267c092011-08-20 22:09:37 -05003459 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003460
3461 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003462 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3463 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003464 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003465#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003466 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3467 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003468#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003469 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003470
3471 return mmio;
3472}
3473
aliguori88715652009-02-11 15:20:58 +00003474static int get_free_io_mem_idx(void)
3475{
3476 int i;
3477
3478 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3479 if (!io_mem_used[i]) {
3480 io_mem_used[i] = 1;
3481 return i;
3482 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003483 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003484 return -1;
3485}
3486
bellard33417e72003-08-10 21:47:01 +00003487/* mem_read and mem_write are arrays of functions containing the
3488 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003489 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003490 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003491 modified. If it is zero, a new io zone is allocated. The return
3492 value can be used with cpu_register_physical_memory(). (-1) is
3493 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003494static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003495{
bellard33417e72003-08-10 21:47:01 +00003496 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003497 io_index = get_free_io_mem_idx();
3498 if (io_index == -1)
3499 return io_index;
bellard33417e72003-08-10 21:47:01 +00003500 } else {
3501 if (io_index >= IO_MEM_NB_ENTRIES)
3502 return -1;
3503 }
bellardb5ff1b32005-11-26 10:38:39 +00003504
Avi Kivitya621f382012-01-02 13:12:08 +02003505 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003506
Avi Kivity11c7ef02012-01-02 17:21:07 +02003507 return io_index;
bellard33417e72003-08-10 21:47:01 +00003508}
bellard61382a52003-10-27 21:22:23 +00003509
Avi Kivitya621f382012-01-02 13:12:08 +02003510int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003511{
Avi Kivitya621f382012-01-02 13:12:08 +02003512 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003513}
3514
Avi Kivity11c7ef02012-01-02 17:21:07 +02003515void cpu_unregister_io_memory(int io_index)
aliguori88715652009-02-11 15:20:58 +00003516{
Avi Kivitya621f382012-01-02 13:12:08 +02003517 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003518 io_mem_used[io_index] = 0;
3519}
3520
Avi Kivity5312bd82012-02-12 18:32:55 +02003521static uint16_t dummy_section(MemoryRegion *mr)
3522{
3523 MemoryRegionSection section = {
3524 .mr = mr,
3525 .offset_within_address_space = 0,
3526 .offset_within_region = 0,
3527 .size = UINT64_MAX,
3528 };
3529
3530 return phys_section_add(&section);
3531}
3532
Avi Kivitye9179ce2009-06-14 11:38:52 +03003533static void io_mem_init(void)
3534{
3535 int i;
3536
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003537 /* Must be first: */
3538 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3539 assert(io_mem_ram.ram_addr == 0);
3540 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3541 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3542 "unassigned", UINT64_MAX);
3543 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3544 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003545 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3546 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003547 for (i=0; i<5; i++)
3548 io_mem_used[i] = 1;
3549
Avi Kivity1ec9b902012-01-02 12:47:48 +02003550 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3551 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003552}
3553
Avi Kivity50c1e142012-02-08 21:36:02 +02003554static void core_begin(MemoryListener *listener)
3555{
Avi Kivity54688b12012-02-09 17:34:32 +02003556 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003557 phys_sections_clear();
Avi Kivityd6f2ea22012-02-12 20:12:49 +02003558 phys_map.u.node = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003559 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivity50c1e142012-02-08 21:36:02 +02003560}
3561
3562static void core_commit(MemoryListener *listener)
3563{
Avi Kivity117712c2012-02-12 21:23:17 +02003564 CPUState *env;
3565
3566 /* since each CPU stores ram addresses in its TLB cache, we must
3567 reset the modified entries */
3568 /* XXX: slow ! */
3569 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3570 tlb_flush(env, 1);
3571 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003572}
3573
Avi Kivity93632742012-02-08 16:54:16 +02003574static void core_region_add(MemoryListener *listener,
3575 MemoryRegionSection *section)
3576{
Avi Kivity4855d412012-02-08 21:16:05 +02003577 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003578}
3579
3580static void core_region_del(MemoryListener *listener,
3581 MemoryRegionSection *section)
3582{
Avi Kivity93632742012-02-08 16:54:16 +02003583}
3584
Avi Kivity50c1e142012-02-08 21:36:02 +02003585static void core_region_nop(MemoryListener *listener,
3586 MemoryRegionSection *section)
3587{
Avi Kivity54688b12012-02-09 17:34:32 +02003588 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003589}
3590
Avi Kivity93632742012-02-08 16:54:16 +02003591static void core_log_start(MemoryListener *listener,
3592 MemoryRegionSection *section)
3593{
3594}
3595
3596static void core_log_stop(MemoryListener *listener,
3597 MemoryRegionSection *section)
3598{
3599}
3600
3601static void core_log_sync(MemoryListener *listener,
3602 MemoryRegionSection *section)
3603{
3604}
3605
3606static void core_log_global_start(MemoryListener *listener)
3607{
3608 cpu_physical_memory_set_dirty_tracking(1);
3609}
3610
3611static void core_log_global_stop(MemoryListener *listener)
3612{
3613 cpu_physical_memory_set_dirty_tracking(0);
3614}
3615
3616static void core_eventfd_add(MemoryListener *listener,
3617 MemoryRegionSection *section,
3618 bool match_data, uint64_t data, int fd)
3619{
3620}
3621
3622static void core_eventfd_del(MemoryListener *listener,
3623 MemoryRegionSection *section,
3624 bool match_data, uint64_t data, int fd)
3625{
3626}
3627
Avi Kivity50c1e142012-02-08 21:36:02 +02003628static void io_begin(MemoryListener *listener)
3629{
3630}
3631
3632static void io_commit(MemoryListener *listener)
3633{
3634}
3635
Avi Kivity4855d412012-02-08 21:16:05 +02003636static void io_region_add(MemoryListener *listener,
3637 MemoryRegionSection *section)
3638{
3639 iorange_init(&section->mr->iorange, &memory_region_iorange_ops,
3640 section->offset_within_address_space, section->size);
3641 ioport_register(&section->mr->iorange);
3642}
3643
3644static void io_region_del(MemoryListener *listener,
3645 MemoryRegionSection *section)
3646{
3647 isa_unassign_ioport(section->offset_within_address_space, section->size);
3648}
3649
Avi Kivity50c1e142012-02-08 21:36:02 +02003650static void io_region_nop(MemoryListener *listener,
3651 MemoryRegionSection *section)
3652{
3653}
3654
Avi Kivity4855d412012-02-08 21:16:05 +02003655static void io_log_start(MemoryListener *listener,
3656 MemoryRegionSection *section)
3657{
3658}
3659
3660static void io_log_stop(MemoryListener *listener,
3661 MemoryRegionSection *section)
3662{
3663}
3664
3665static void io_log_sync(MemoryListener *listener,
3666 MemoryRegionSection *section)
3667{
3668}
3669
3670static void io_log_global_start(MemoryListener *listener)
3671{
3672}
3673
3674static void io_log_global_stop(MemoryListener *listener)
3675{
3676}
3677
3678static void io_eventfd_add(MemoryListener *listener,
3679 MemoryRegionSection *section,
3680 bool match_data, uint64_t data, int fd)
3681{
3682}
3683
3684static void io_eventfd_del(MemoryListener *listener,
3685 MemoryRegionSection *section,
3686 bool match_data, uint64_t data, int fd)
3687{
3688}
3689
Avi Kivity93632742012-02-08 16:54:16 +02003690static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003691 .begin = core_begin,
3692 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003693 .region_add = core_region_add,
3694 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003695 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003696 .log_start = core_log_start,
3697 .log_stop = core_log_stop,
3698 .log_sync = core_log_sync,
3699 .log_global_start = core_log_global_start,
3700 .log_global_stop = core_log_global_stop,
3701 .eventfd_add = core_eventfd_add,
3702 .eventfd_del = core_eventfd_del,
3703 .priority = 0,
3704};
3705
Avi Kivity4855d412012-02-08 21:16:05 +02003706static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003707 .begin = io_begin,
3708 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003709 .region_add = io_region_add,
3710 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003711 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003712 .log_start = io_log_start,
3713 .log_stop = io_log_stop,
3714 .log_sync = io_log_sync,
3715 .log_global_start = io_log_global_start,
3716 .log_global_stop = io_log_global_stop,
3717 .eventfd_add = io_eventfd_add,
3718 .eventfd_del = io_eventfd_del,
3719 .priority = 0,
3720};
3721
Avi Kivity62152b82011-07-26 14:26:14 +03003722static void memory_map_init(void)
3723{
Anthony Liguori7267c092011-08-20 22:09:37 -05003724 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003725 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003726 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003727
Anthony Liguori7267c092011-08-20 22:09:37 -05003728 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003729 memory_region_init(system_io, "io", 65536);
3730 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003731
Avi Kivity4855d412012-02-08 21:16:05 +02003732 memory_listener_register(&core_memory_listener, system_memory);
3733 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003734}
3735
3736MemoryRegion *get_system_memory(void)
3737{
3738 return system_memory;
3739}
3740
Avi Kivity309cb472011-08-08 16:09:03 +03003741MemoryRegion *get_system_io(void)
3742{
3743 return system_io;
3744}
3745
pbrooke2eef172008-06-08 01:09:01 +00003746#endif /* !defined(CONFIG_USER_ONLY) */
3747
bellard13eb76e2004-01-24 15:23:36 +00003748/* physical memory access (slow version, mainly for debug) */
3749#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003750int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3751 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003752{
3753 int l, flags;
3754 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003755 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003756
3757 while (len > 0) {
3758 page = addr & TARGET_PAGE_MASK;
3759 l = (page + TARGET_PAGE_SIZE) - addr;
3760 if (l > len)
3761 l = len;
3762 flags = page_get_flags(page);
3763 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003764 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003765 if (is_write) {
3766 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003767 return -1;
bellard579a97f2007-11-11 14:26:47 +00003768 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003769 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003770 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003771 memcpy(p, buf, l);
3772 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003773 } else {
3774 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003775 return -1;
bellard579a97f2007-11-11 14:26:47 +00003776 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003777 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003778 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003779 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003780 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003781 }
3782 len -= l;
3783 buf += l;
3784 addr += l;
3785 }
Paul Brooka68fe892010-03-01 00:08:59 +00003786 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003787}
bellard8df1cd02005-01-28 22:37:22 +00003788
bellard13eb76e2004-01-24 15:23:36 +00003789#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003790void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003791 int len, int is_write)
3792{
3793 int l, io_index;
3794 uint8_t *ptr;
3795 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003796 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003797 MemoryRegionSection section;
ths3b46e622007-09-17 08:09:54 +00003798
bellard13eb76e2004-01-24 15:23:36 +00003799 while (len > 0) {
3800 page = addr & TARGET_PAGE_MASK;
3801 l = (page + TARGET_PAGE_SIZE) - addr;
3802 if (l > len)
3803 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003804 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003805
bellard13eb76e2004-01-24 15:23:36 +00003806 if (is_write) {
Avi Kivity06ef3522012-02-13 16:11:22 +02003807 if (!memory_region_is_ram(section.mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003808 target_phys_addr_t addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003809 io_index = memory_region_get_ram_addr(section.mr)
3810 & (IO_MEM_NB_ENTRIES - 1);
3811 addr1 = (addr & ~TARGET_PAGE_MASK)
3812 + section.offset_within_region;
bellard6a00d602005-11-21 23:25:50 +00003813 /* XXX: could force cpu_single_env to NULL to avoid
3814 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003815 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003816 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003817 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003818 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003819 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003820 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003821 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003822 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003823 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003824 l = 2;
3825 } else {
bellard1c213d12005-09-03 10:49:04 +00003826 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003827 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003828 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003829 l = 1;
3830 }
Avi Kivity06ef3522012-02-13 16:11:22 +02003831 } else if (!section.readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003832 ram_addr_t addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003833 addr1 = (memory_region_get_ram_addr(section.mr)
3834 + section.offset_within_region)
3835 | (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003836 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003837 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003838 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003839 if (!cpu_physical_memory_is_dirty(addr1)) {
3840 /* invalidate code */
3841 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3842 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003843 cpu_physical_memory_set_dirty_flags(
3844 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003845 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003846 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003847 }
3848 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02003849 if (!is_ram_rom_romd(&section)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003850 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003851 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02003852 io_index = memory_region_get_ram_addr(section.mr)
3853 & (IO_MEM_NB_ENTRIES - 1);
3854 addr1 = (addr & ~TARGET_PAGE_MASK)
3855 + section.offset_within_region;
aurel326c2934d2009-02-18 21:37:17 +00003856 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003857 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003858 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003859 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003860 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003861 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003862 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003863 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003864 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003865 l = 2;
3866 } else {
bellard1c213d12005-09-03 10:49:04 +00003867 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003868 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003869 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003870 l = 1;
3871 }
3872 } else {
3873 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02003874 ptr = qemu_get_ram_ptr(section.mr->ram_addr
3875 + section.offset_within_region);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003876 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3877 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003878 }
3879 }
3880 len -= l;
3881 buf += l;
3882 addr += l;
3883 }
3884}
bellard8df1cd02005-01-28 22:37:22 +00003885
bellardd0ecd2a2006-04-23 17:14:48 +00003886/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003887void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003888 const uint8_t *buf, int len)
3889{
3890 int l;
3891 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003892 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003893 MemoryRegionSection section;
ths3b46e622007-09-17 08:09:54 +00003894
bellardd0ecd2a2006-04-23 17:14:48 +00003895 while (len > 0) {
3896 page = addr & TARGET_PAGE_MASK;
3897 l = (page + TARGET_PAGE_SIZE) - addr;
3898 if (l > len)
3899 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003900 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003901
Avi Kivity06ef3522012-02-13 16:11:22 +02003902 if (!is_ram_rom_romd(&section)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003903 /* do nothing */
3904 } else {
3905 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003906 addr1 = (memory_region_get_ram_addr(section.mr)
3907 + section.offset_within_region)
3908 + (addr & ~TARGET_PAGE_MASK);
bellardd0ecd2a2006-04-23 17:14:48 +00003909 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003910 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003911 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003912 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003913 }
3914 len -= l;
3915 buf += l;
3916 addr += l;
3917 }
3918}
3919
aliguori6d16c2f2009-01-22 16:59:11 +00003920typedef struct {
3921 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003922 target_phys_addr_t addr;
3923 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003924} BounceBuffer;
3925
3926static BounceBuffer bounce;
3927
aliguoriba223c22009-01-22 16:59:16 +00003928typedef struct MapClient {
3929 void *opaque;
3930 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003931 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003932} MapClient;
3933
Blue Swirl72cf2d42009-09-12 07:36:22 +00003934static QLIST_HEAD(map_client_list, MapClient) map_client_list
3935 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003936
3937void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3938{
Anthony Liguori7267c092011-08-20 22:09:37 -05003939 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003940
3941 client->opaque = opaque;
3942 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003943 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003944 return client;
3945}
3946
3947void cpu_unregister_map_client(void *_client)
3948{
3949 MapClient *client = (MapClient *)_client;
3950
Blue Swirl72cf2d42009-09-12 07:36:22 +00003951 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003952 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003953}
3954
3955static void cpu_notify_map_clients(void)
3956{
3957 MapClient *client;
3958
Blue Swirl72cf2d42009-09-12 07:36:22 +00003959 while (!QLIST_EMPTY(&map_client_list)) {
3960 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003961 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003962 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003963 }
3964}
3965
aliguori6d16c2f2009-01-22 16:59:11 +00003966/* Map a physical memory region into a host virtual address.
3967 * May map a subset of the requested range, given by and returned in *plen.
3968 * May return NULL if resources needed to perform the mapping are exhausted.
3969 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003970 * Use cpu_register_map_client() to know when retrying the map operation is
3971 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003972 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003973void *cpu_physical_memory_map(target_phys_addr_t addr,
3974 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003975 int is_write)
3976{
Anthony Liguoric227f092009-10-01 16:12:16 -05003977 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003978 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003979 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003980 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003981 MemoryRegionSection section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003982 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003983 ram_addr_t rlen;
3984 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003985
3986 while (len > 0) {
3987 page = addr & TARGET_PAGE_MASK;
3988 l = (page + TARGET_PAGE_SIZE) - addr;
3989 if (l > len)
3990 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003991 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003992
Avi Kivity06ef3522012-02-13 16:11:22 +02003993 if (!(memory_region_is_ram(section.mr) && !section.readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003994 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003995 break;
3996 }
3997 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3998 bounce.addr = addr;
3999 bounce.len = l;
4000 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004001 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004002 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004003
4004 *plen = l;
4005 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004006 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004007 if (!todo) {
Avi Kivity06ef3522012-02-13 16:11:22 +02004008 raddr = memory_region_get_ram_addr(section.mr)
4009 + section.offset_within_region
4010 + (addr & ~TARGET_PAGE_MASK);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004011 }
aliguori6d16c2f2009-01-22 16:59:11 +00004012
4013 len -= l;
4014 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004015 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004016 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004017 rlen = todo;
4018 ret = qemu_ram_ptr_length(raddr, &rlen);
4019 *plen = rlen;
4020 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004021}
4022
4023/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4024 * Will also mark the memory as dirty if is_write == 1. access_len gives
4025 * the amount of memory that was actually read or written by the caller.
4026 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004027void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4028 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004029{
4030 if (buffer != bounce.buffer) {
4031 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004032 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004033 while (access_len) {
4034 unsigned l;
4035 l = TARGET_PAGE_SIZE;
4036 if (l > access_len)
4037 l = access_len;
4038 if (!cpu_physical_memory_is_dirty(addr1)) {
4039 /* invalidate code */
4040 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4041 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004042 cpu_physical_memory_set_dirty_flags(
4043 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004044 }
4045 addr1 += l;
4046 access_len -= l;
4047 }
4048 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004049 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004050 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004051 }
aliguori6d16c2f2009-01-22 16:59:11 +00004052 return;
4053 }
4054 if (is_write) {
4055 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4056 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004057 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004058 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004059 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004060}
bellardd0ecd2a2006-04-23 17:14:48 +00004061
bellard8df1cd02005-01-28 22:37:22 +00004062/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004063static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4064 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004065{
4066 int io_index;
4067 uint8_t *ptr;
4068 uint32_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004069 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004070
Avi Kivity06ef3522012-02-13 16:11:22 +02004071 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004072
Avi Kivity06ef3522012-02-13 16:11:22 +02004073 if (!is_ram_rom_romd(&section)) {
bellard8df1cd02005-01-28 22:37:22 +00004074 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004075 io_index = memory_region_get_ram_addr(section.mr)
4076 & (IO_MEM_NB_ENTRIES - 1);
4077 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004078 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004079#if defined(TARGET_WORDS_BIGENDIAN)
4080 if (endian == DEVICE_LITTLE_ENDIAN) {
4081 val = bswap32(val);
4082 }
4083#else
4084 if (endian == DEVICE_BIG_ENDIAN) {
4085 val = bswap32(val);
4086 }
4087#endif
bellard8df1cd02005-01-28 22:37:22 +00004088 } else {
4089 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004090 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4091 & TARGET_PAGE_MASK)
4092 + section.offset_within_region) +
bellard8df1cd02005-01-28 22:37:22 +00004093 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004094 switch (endian) {
4095 case DEVICE_LITTLE_ENDIAN:
4096 val = ldl_le_p(ptr);
4097 break;
4098 case DEVICE_BIG_ENDIAN:
4099 val = ldl_be_p(ptr);
4100 break;
4101 default:
4102 val = ldl_p(ptr);
4103 break;
4104 }
bellard8df1cd02005-01-28 22:37:22 +00004105 }
4106 return val;
4107}
4108
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004109uint32_t ldl_phys(target_phys_addr_t addr)
4110{
4111 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4112}
4113
4114uint32_t ldl_le_phys(target_phys_addr_t addr)
4115{
4116 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4117}
4118
4119uint32_t ldl_be_phys(target_phys_addr_t addr)
4120{
4121 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4122}
4123
bellard84b7b8e2005-11-28 21:19:04 +00004124/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004125static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4126 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004127{
4128 int io_index;
4129 uint8_t *ptr;
4130 uint64_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004131 MemoryRegionSection section;
bellard84b7b8e2005-11-28 21:19:04 +00004132
Avi Kivity06ef3522012-02-13 16:11:22 +02004133 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004134
Avi Kivity06ef3522012-02-13 16:11:22 +02004135 if (!is_ram_rom_romd(&section)) {
bellard84b7b8e2005-11-28 21:19:04 +00004136 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004137 io_index = memory_region_get_ram_addr(section.mr)
4138 & (IO_MEM_NB_ENTRIES - 1);
4139 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004140
4141 /* XXX This is broken when device endian != cpu endian.
4142 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004143#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004144 val = io_mem_read(io_index, addr, 4) << 32;
4145 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004146#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004147 val = io_mem_read(io_index, addr, 4);
4148 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004149#endif
4150 } else {
4151 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004152 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4153 & TARGET_PAGE_MASK)
4154 + section.offset_within_region)
4155 + (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004156 switch (endian) {
4157 case DEVICE_LITTLE_ENDIAN:
4158 val = ldq_le_p(ptr);
4159 break;
4160 case DEVICE_BIG_ENDIAN:
4161 val = ldq_be_p(ptr);
4162 break;
4163 default:
4164 val = ldq_p(ptr);
4165 break;
4166 }
bellard84b7b8e2005-11-28 21:19:04 +00004167 }
4168 return val;
4169}
4170
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004171uint64_t ldq_phys(target_phys_addr_t addr)
4172{
4173 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4174}
4175
4176uint64_t ldq_le_phys(target_phys_addr_t addr)
4177{
4178 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4179}
4180
4181uint64_t ldq_be_phys(target_phys_addr_t addr)
4182{
4183 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4184}
4185
bellardaab33092005-10-30 20:48:42 +00004186/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004187uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004188{
4189 uint8_t val;
4190 cpu_physical_memory_read(addr, &val, 1);
4191 return val;
4192}
4193
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004194/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004195static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4196 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004197{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004198 int io_index;
4199 uint8_t *ptr;
4200 uint64_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004201 MemoryRegionSection section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004202
Avi Kivity06ef3522012-02-13 16:11:22 +02004203 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004204
Avi Kivity06ef3522012-02-13 16:11:22 +02004205 if (!is_ram_rom_romd(&section)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004206 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004207 io_index = memory_region_get_ram_addr(section.mr)
4208 & (IO_MEM_NB_ENTRIES - 1);
4209 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004210 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004211#if defined(TARGET_WORDS_BIGENDIAN)
4212 if (endian == DEVICE_LITTLE_ENDIAN) {
4213 val = bswap16(val);
4214 }
4215#else
4216 if (endian == DEVICE_BIG_ENDIAN) {
4217 val = bswap16(val);
4218 }
4219#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004220 } else {
4221 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004222 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4223 & TARGET_PAGE_MASK)
4224 + section.offset_within_region)
4225 + (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004226 switch (endian) {
4227 case DEVICE_LITTLE_ENDIAN:
4228 val = lduw_le_p(ptr);
4229 break;
4230 case DEVICE_BIG_ENDIAN:
4231 val = lduw_be_p(ptr);
4232 break;
4233 default:
4234 val = lduw_p(ptr);
4235 break;
4236 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004237 }
4238 return val;
bellardaab33092005-10-30 20:48:42 +00004239}
4240
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004241uint32_t lduw_phys(target_phys_addr_t addr)
4242{
4243 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4244}
4245
4246uint32_t lduw_le_phys(target_phys_addr_t addr)
4247{
4248 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4249}
4250
4251uint32_t lduw_be_phys(target_phys_addr_t addr)
4252{
4253 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4254}
4255
bellard8df1cd02005-01-28 22:37:22 +00004256/* warning: addr must be aligned. The ram page is not masked as dirty
4257 and the code inside is not invalidated. It is useful if the dirty
4258 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004259void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004260{
4261 int io_index;
4262 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004263 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004264
Avi Kivity06ef3522012-02-13 16:11:22 +02004265 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004266
Avi Kivity06ef3522012-02-13 16:11:22 +02004267 if (!memory_region_is_ram(section.mr) || section.readonly) {
4268 if (memory_region_is_ram(section.mr)) {
4269 io_index = io_mem_rom.ram_addr;
4270 } else {
4271 io_index = memory_region_get_ram_addr(section.mr);
4272 }
4273 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004274 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004275 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02004276 unsigned long addr1 = (memory_region_get_ram_addr(section.mr)
4277 & TARGET_PAGE_MASK)
4278 + section.offset_within_region
4279 + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004280 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004281 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004282
4283 if (unlikely(in_migration)) {
4284 if (!cpu_physical_memory_is_dirty(addr1)) {
4285 /* invalidate code */
4286 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4287 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004288 cpu_physical_memory_set_dirty_flags(
4289 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004290 }
4291 }
bellard8df1cd02005-01-28 22:37:22 +00004292 }
4293}
4294
Anthony Liguoric227f092009-10-01 16:12:16 -05004295void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004296{
4297 int io_index;
4298 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004299 MemoryRegionSection section;
j_mayerbc98a7e2007-04-04 07:55:12 +00004300
Avi Kivity06ef3522012-02-13 16:11:22 +02004301 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004302
Avi Kivity06ef3522012-02-13 16:11:22 +02004303 if (!memory_region_is_ram(section.mr) || section.readonly) {
4304 if (memory_region_is_ram(section.mr)) {
4305 io_index = io_mem_rom.ram_addr;
4306 } else {
4307 io_index = memory_region_get_ram_addr(section.mr)
4308 & (IO_MEM_NB_ENTRIES - 1);
4309 }
4310 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
j_mayerbc98a7e2007-04-04 07:55:12 +00004311#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004312 io_mem_write(io_index, addr, val >> 32, 4);
4313 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004314#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004315 io_mem_write(io_index, addr, (uint32_t)val, 4);
4316 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004317#endif
4318 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02004319 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4320 & TARGET_PAGE_MASK)
4321 + section.offset_within_region)
4322 + (addr & ~TARGET_PAGE_MASK);
j_mayerbc98a7e2007-04-04 07:55:12 +00004323 stq_p(ptr, val);
4324 }
4325}
4326
bellard8df1cd02005-01-28 22:37:22 +00004327/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004328static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4329 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004330{
4331 int io_index;
4332 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004333 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004334
Avi Kivity06ef3522012-02-13 16:11:22 +02004335 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004336
Avi Kivity06ef3522012-02-13 16:11:22 +02004337 if (!memory_region_is_ram(section.mr) || section.readonly) {
4338 if (memory_region_is_ram(section.mr)) {
4339 io_index = io_mem_rom.ram_addr;
4340 } else {
4341 io_index = memory_region_get_ram_addr(section.mr)
4342 & (IO_MEM_NB_ENTRIES - 1);
4343 }
4344 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004345#if defined(TARGET_WORDS_BIGENDIAN)
4346 if (endian == DEVICE_LITTLE_ENDIAN) {
4347 val = bswap32(val);
4348 }
4349#else
4350 if (endian == DEVICE_BIG_ENDIAN) {
4351 val = bswap32(val);
4352 }
4353#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004354 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004355 } else {
4356 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02004357 addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
4358 + section.offset_within_region
4359 + (addr & ~TARGET_PAGE_MASK);
bellard8df1cd02005-01-28 22:37:22 +00004360 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004361 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004362 switch (endian) {
4363 case DEVICE_LITTLE_ENDIAN:
4364 stl_le_p(ptr, val);
4365 break;
4366 case DEVICE_BIG_ENDIAN:
4367 stl_be_p(ptr, val);
4368 break;
4369 default:
4370 stl_p(ptr, val);
4371 break;
4372 }
bellard3a7d9292005-08-21 09:26:42 +00004373 if (!cpu_physical_memory_is_dirty(addr1)) {
4374 /* invalidate code */
4375 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4376 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004377 cpu_physical_memory_set_dirty_flags(addr1,
4378 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004379 }
bellard8df1cd02005-01-28 22:37:22 +00004380 }
4381}
4382
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004383void stl_phys(target_phys_addr_t addr, uint32_t val)
4384{
4385 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4386}
4387
4388void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4389{
4390 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4391}
4392
4393void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4394{
4395 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4396}
4397
bellardaab33092005-10-30 20:48:42 +00004398/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004399void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004400{
4401 uint8_t v = val;
4402 cpu_physical_memory_write(addr, &v, 1);
4403}
4404
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004405/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004406static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4407 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004408{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004409 int io_index;
4410 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004411 MemoryRegionSection section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004412
Avi Kivity06ef3522012-02-13 16:11:22 +02004413 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004414
Avi Kivity06ef3522012-02-13 16:11:22 +02004415 if (!memory_region_is_ram(section.mr) || section.readonly) {
4416 if (memory_region_is_ram(section.mr)) {
4417 io_index = io_mem_rom.ram_addr;
4418 } else {
4419 io_index = memory_region_get_ram_addr(section.mr)
4420 & (IO_MEM_NB_ENTRIES - 1);
4421 }
4422 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004423#if defined(TARGET_WORDS_BIGENDIAN)
4424 if (endian == DEVICE_LITTLE_ENDIAN) {
4425 val = bswap16(val);
4426 }
4427#else
4428 if (endian == DEVICE_BIG_ENDIAN) {
4429 val = bswap16(val);
4430 }
4431#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004432 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004433 } else {
4434 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02004435 addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
4436 + section.offset_within_region + (addr & ~TARGET_PAGE_MASK);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004437 /* RAM case */
4438 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004439 switch (endian) {
4440 case DEVICE_LITTLE_ENDIAN:
4441 stw_le_p(ptr, val);
4442 break;
4443 case DEVICE_BIG_ENDIAN:
4444 stw_be_p(ptr, val);
4445 break;
4446 default:
4447 stw_p(ptr, val);
4448 break;
4449 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004450 if (!cpu_physical_memory_is_dirty(addr1)) {
4451 /* invalidate code */
4452 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4453 /* set dirty bit */
4454 cpu_physical_memory_set_dirty_flags(addr1,
4455 (0xff & ~CODE_DIRTY_FLAG));
4456 }
4457 }
bellardaab33092005-10-30 20:48:42 +00004458}
4459
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004460void stw_phys(target_phys_addr_t addr, uint32_t val)
4461{
4462 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4463}
4464
4465void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4466{
4467 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4468}
4469
4470void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4471{
4472 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4473}
4474
bellardaab33092005-10-30 20:48:42 +00004475/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004476void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004477{
4478 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004479 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004480}
4481
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004482void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4483{
4484 val = cpu_to_le64(val);
4485 cpu_physical_memory_write(addr, &val, 8);
4486}
4487
4488void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4489{
4490 val = cpu_to_be64(val);
4491 cpu_physical_memory_write(addr, &val, 8);
4492}
4493
aliguori5e2972f2009-03-28 17:51:36 +00004494/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004495int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004496 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004497{
4498 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004499 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004500 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004501
4502 while (len > 0) {
4503 page = addr & TARGET_PAGE_MASK;
4504 phys_addr = cpu_get_phys_page_debug(env, page);
4505 /* if no physical page mapped, return an error */
4506 if (phys_addr == -1)
4507 return -1;
4508 l = (page + TARGET_PAGE_SIZE) - addr;
4509 if (l > len)
4510 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004511 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004512 if (is_write)
4513 cpu_physical_memory_write_rom(phys_addr, buf, l);
4514 else
aliguori5e2972f2009-03-28 17:51:36 +00004515 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004516 len -= l;
4517 buf += l;
4518 addr += l;
4519 }
4520 return 0;
4521}
Paul Brooka68fe892010-03-01 00:08:59 +00004522#endif
bellard13eb76e2004-01-24 15:23:36 +00004523
pbrook2e70f6e2008-06-29 01:03:05 +00004524/* in deterministic execution mode, instructions doing device I/Os
4525 must be at the end of the TB */
4526void cpu_io_recompile(CPUState *env, void *retaddr)
4527{
4528 TranslationBlock *tb;
4529 uint32_t n, cflags;
4530 target_ulong pc, cs_base;
4531 uint64_t flags;
4532
4533 tb = tb_find_pc((unsigned long)retaddr);
4534 if (!tb) {
4535 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4536 retaddr);
4537 }
4538 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004539 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004540 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004541 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004542 n = n - env->icount_decr.u16.low;
4543 /* Generate a new TB ending on the I/O insn. */
4544 n++;
4545 /* On MIPS and SH, delay slot instructions can only be restarted if
4546 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004547 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004548 branch. */
4549#if defined(TARGET_MIPS)
4550 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4551 env->active_tc.PC -= 4;
4552 env->icount_decr.u16.low++;
4553 env->hflags &= ~MIPS_HFLAG_BMASK;
4554 }
4555#elif defined(TARGET_SH4)
4556 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4557 && n > 1) {
4558 env->pc -= 2;
4559 env->icount_decr.u16.low++;
4560 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4561 }
4562#endif
4563 /* This should never happen. */
4564 if (n > CF_COUNT_MASK)
4565 cpu_abort(env, "TB too big during recompile");
4566
4567 cflags = n | CF_LAST_IO;
4568 pc = tb->pc;
4569 cs_base = tb->cs_base;
4570 flags = tb->flags;
4571 tb_phys_invalidate(tb, -1);
4572 /* FIXME: In theory this could raise an exception. In practice
4573 we have already translated the block once so it's probably ok. */
4574 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004575 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004576 the first in the TB) then we end up generating a whole new TB and
4577 repeating the fault, which is horribly inefficient.
4578 Better would be to execute just this insn uncached, or generate a
4579 second new TB. */
4580 cpu_resume_from_signal(env, NULL);
4581}
4582
Paul Brookb3755a92010-03-12 16:54:58 +00004583#if !defined(CONFIG_USER_ONLY)
4584
Stefan Weil055403b2010-10-22 23:03:32 +02004585void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004586{
4587 int i, target_code_size, max_target_code_size;
4588 int direct_jmp_count, direct_jmp2_count, cross_page;
4589 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004590
bellarde3db7222005-01-26 22:00:47 +00004591 target_code_size = 0;
4592 max_target_code_size = 0;
4593 cross_page = 0;
4594 direct_jmp_count = 0;
4595 direct_jmp2_count = 0;
4596 for(i = 0; i < nb_tbs; i++) {
4597 tb = &tbs[i];
4598 target_code_size += tb->size;
4599 if (tb->size > max_target_code_size)
4600 max_target_code_size = tb->size;
4601 if (tb->page_addr[1] != -1)
4602 cross_page++;
4603 if (tb->tb_next_offset[0] != 0xffff) {
4604 direct_jmp_count++;
4605 if (tb->tb_next_offset[1] != 0xffff) {
4606 direct_jmp2_count++;
4607 }
4608 }
4609 }
4610 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004611 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004612 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004613 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4614 cpu_fprintf(f, "TB count %d/%d\n",
4615 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004616 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004617 nb_tbs ? target_code_size / nb_tbs : 0,
4618 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004619 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004620 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4621 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004622 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4623 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004624 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4625 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004626 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004627 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4628 direct_jmp2_count,
4629 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004630 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004631 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4632 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4633 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004634 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004635}
4636
Avi Kivityd39e8222012-01-01 23:35:10 +02004637/* NOTE: this function can trigger an exception */
4638/* NOTE2: the returned address is not exactly the physical address: it
4639 is the offset relative to phys_ram_base */
4640tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4641{
4642 int mmu_idx, page_index, pd;
4643 void *p;
4644
4645 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4646 mmu_idx = cpu_mmu_index(env1);
4647 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4648 (addr & TARGET_PAGE_MASK))) {
4649 ldub_code(addr);
4650 }
4651 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004652 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
Avi Kivity06ef3522012-02-13 16:11:22 +02004653 && !io_mem_region[pd]->rom_device) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004654#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4655 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4656#else
4657 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4658#endif
4659 }
4660 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4661 return qemu_ram_addr_from_host_nofail(p);
4662}
4663
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004664/*
4665 * A helper function for the _utterly broken_ virtio device model to find out if
4666 * it's running on a big endian machine. Don't do this at home kids!
4667 */
4668bool virtio_is_big_endian(void);
4669bool virtio_is_big_endian(void)
4670{
4671#if defined(TARGET_WORDS_BIGENDIAN)
4672 return true;
4673#else
4674 return false;
4675#endif
4676}
4677
bellard61382a52003-10-27 21:22:23 +00004678#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004679#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004680#define GETPC() NULL
4681#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004682#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004683
4684#define SHIFT 0
4685#include "softmmu_template.h"
4686
4687#define SHIFT 1
4688#include "softmmu_template.h"
4689
4690#define SHIFT 2
4691#include "softmmu_template.h"
4692
4693#define SHIFT 3
4694#include "softmmu_template.h"
4695
4696#undef env
4697
4698#endif