blob: f4cd867d51d0b99c8b286c63469eb6894cfa7569 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Avi Kivity3eef53d2012-02-10 14:57:31 +0200163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200189typedef struct PhysPageEntry PhysPageEntry;
190
Avi Kivity5312bd82012-02-12 18:32:55 +0200191static MemoryRegionSection *phys_sections;
192static unsigned phys_sections_nb, phys_sections_nb_alloc;
193static uint16_t phys_section_unassigned;
194
Avi Kivity4346ae32012-02-10 17:00:01 +0200195struct PhysPageEntry {
196 union {
Avi Kivity5312bd82012-02-12 18:32:55 +0200197 uint16_t leaf; /* index into phys_sections */
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200198 uint16_t node; /* index into phys_map_nodes */
Avi Kivity4346ae32012-02-10 17:00:01 +0200199 } u;
200};
201
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202/* Simple allocator for PhysPageEntry nodes */
203static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
204static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
205
206#define PHYS_MAP_NODE_NIL ((uint16_t)~0)
207
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800208/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200209 The bottom level has pointers to MemoryRegionSections. */
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200210static PhysPageEntry phys_map = { .u.node = PHYS_MAP_NODE_NIL };
Paul Brook6d9a1302010-02-28 23:55:53 +0000211
pbrooke2eef172008-06-08 01:09:01 +0000212static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300213static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000214
bellard33417e72003-08-10 21:47:01 +0000215/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200216MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000217static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200218static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000219#endif
bellard33417e72003-08-10 21:47:01 +0000220
bellard34865132003-10-05 14:28:56 +0000221/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200222#ifdef WIN32
223static const char *logfilename = "qemu.log";
224#else
blueswir1d9b630f2008-10-05 09:57:08 +0000225static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200226#endif
bellard34865132003-10-05 14:28:56 +0000227FILE *logfile;
228int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000229static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000230
bellarde3db7222005-01-26 22:00:47 +0000231/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000232#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000233static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000234#endif
bellarde3db7222005-01-26 22:00:47 +0000235static int tb_flush_count;
236static int tb_phys_invalidate_count;
237
bellard7cb69ca2008-05-10 10:55:51 +0000238#ifdef _WIN32
239static void map_exec(void *addr, long size)
240{
241 DWORD old_protect;
242 VirtualProtect(addr, size,
243 PAGE_EXECUTE_READWRITE, &old_protect);
244
245}
246#else
247static void map_exec(void *addr, long size)
248{
bellard43694152008-05-29 09:35:57 +0000249 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000250
bellard43694152008-05-29 09:35:57 +0000251 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000252 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000253 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000254
255 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000256 end += page_size - 1;
257 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000258
259 mprotect((void *)start, end - start,
260 PROT_READ | PROT_WRITE | PROT_EXEC);
261}
262#endif
263
bellardb346ff42003-06-15 20:05:50 +0000264static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000265{
bellard83fb7ad2004-07-05 21:25:26 +0000266 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000267 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000268#ifdef _WIN32
269 {
270 SYSTEM_INFO system_info;
271
272 GetSystemInfo(&system_info);
273 qemu_real_host_page_size = system_info.dwPageSize;
274 }
275#else
276 qemu_real_host_page_size = getpagesize();
277#endif
bellard83fb7ad2004-07-05 21:25:26 +0000278 if (qemu_host_page_size == 0)
279 qemu_host_page_size = qemu_real_host_page_size;
280 if (qemu_host_page_size < TARGET_PAGE_SIZE)
281 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000282 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000283
Paul Brook2e9a5712010-05-05 16:32:59 +0100284#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000285 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100286#ifdef HAVE_KINFO_GETVMMAP
287 struct kinfo_vmentry *freep;
288 int i, cnt;
289
290 freep = kinfo_getvmmap(getpid(), &cnt);
291 if (freep) {
292 mmap_lock();
293 for (i = 0; i < cnt; i++) {
294 unsigned long startaddr, endaddr;
295
296 startaddr = freep[i].kve_start;
297 endaddr = freep[i].kve_end;
298 if (h2g_valid(startaddr)) {
299 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
300
301 if (h2g_valid(endaddr)) {
302 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200303 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100304 } else {
305#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
306 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200307 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100308#endif
309 }
310 }
311 }
312 free(freep);
313 mmap_unlock();
314 }
315#else
balrog50a95692007-12-12 01:16:23 +0000316 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000317
pbrook07765902008-05-31 16:33:53 +0000318 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800319
Aurelien Jarnofd436902010-04-10 17:20:36 +0200320 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000321 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800322 mmap_lock();
323
balrog50a95692007-12-12 01:16:23 +0000324 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800325 unsigned long startaddr, endaddr;
326 int n;
327
328 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
329
330 if (n == 2 && h2g_valid(startaddr)) {
331 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
332
333 if (h2g_valid(endaddr)) {
334 endaddr = h2g(endaddr);
335 } else {
336 endaddr = ~0ul;
337 }
338 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000339 }
340 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800341
balrog50a95692007-12-12 01:16:23 +0000342 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800343 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000344 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100345#endif
balrog50a95692007-12-12 01:16:23 +0000346 }
347#endif
bellard54936002003-05-13 00:25:15 +0000348}
349
Paul Brook41c1b1c2010-03-12 16:54:58 +0000350static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000351{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000352 PageDesc *pd;
353 void **lp;
354 int i;
355
pbrook17e23772008-06-09 13:47:45 +0000356#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500357 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800358# define ALLOC(P, SIZE) \
359 do { \
360 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
361 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000363#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800364# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500365 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000366#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800367
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800368 /* Level 1. Always allocated. */
369 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
370
371 /* Level 2..N-1. */
372 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
373 void **p = *lp;
374
375 if (p == NULL) {
376 if (!alloc) {
377 return NULL;
378 }
379 ALLOC(p, sizeof(void *) * L2_SIZE);
380 *lp = p;
381 }
382
383 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000384 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800385
386 pd = *lp;
387 if (pd == NULL) {
388 if (!alloc) {
389 return NULL;
390 }
391 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
392 *lp = pd;
393 }
394
395#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800396
397 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000398}
399
Paul Brook41c1b1c2010-03-12 16:54:58 +0000400static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000401{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800402 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000403}
404
Paul Brook6d9a1302010-02-28 23:55:53 +0000405#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200406
Avi Kivityf7bf5462012-02-13 20:12:05 +0200407static void phys_map_node_reserve(unsigned nodes)
408{
409 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
410 typedef PhysPageEntry Node[L2_SIZE];
411 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
412 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
413 phys_map_nodes_nb + nodes);
414 phys_map_nodes = g_renew(Node, phys_map_nodes,
415 phys_map_nodes_nb_alloc);
416 }
417}
418
419static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200420{
421 unsigned i;
422 uint16_t ret;
423
Avi Kivityf7bf5462012-02-13 20:12:05 +0200424 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200425 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200426 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200427 for (i = 0; i < L2_SIZE; ++i) {
428 phys_map_nodes[ret][i].u.node = PHYS_MAP_NODE_NIL;
429 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200430 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200431}
432
433static void phys_map_nodes_reset(void)
434{
435 phys_map_nodes_nb = 0;
436}
437
Avi Kivityf7bf5462012-02-13 20:12:05 +0200438
439static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t index,
440 uint16_t leaf, int level)
441{
442 PhysPageEntry *p;
443 int i;
444
445 if (lp->u.node == PHYS_MAP_NODE_NIL) {
446 lp->u.node = phys_map_node_alloc();
447 p = phys_map_nodes[lp->u.node];
448 if (level == 0) {
449 for (i = 0; i < L2_SIZE; i++) {
450 p[i].u.leaf = phys_section_unassigned;
451 }
452 }
453 } else {
454 p = phys_map_nodes[lp->u.node];
455 }
456 lp = &p[(index >> (level * L2_BITS)) & (L2_SIZE - 1)];
457
458 if (level == 0) {
459 lp->u.leaf = leaf;
460 } else {
461 phys_page_set_level(lp, index, leaf, level - 1);
462 }
463}
464
Avi Kivitya3918432012-02-13 17:19:30 +0200465static void phys_page_set(target_phys_addr_t index, uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000466{
Avi Kivityf7bf5462012-02-13 20:12:05 +0200467 phys_map_node_reserve(P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000468
Avi Kivityf7bf5462012-02-13 20:12:05 +0200469 phys_page_set_level(&phys_map, index, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000470}
471
Avi Kivity06ef3522012-02-13 16:11:22 +0200472static MemoryRegionSection phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000473{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200474 PhysPageEntry lp = phys_map;
475 PhysPageEntry *p;
476 int i;
Avi Kivity06ef3522012-02-13 16:11:22 +0200477 MemoryRegionSection section;
478 target_phys_addr_t delta;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200479 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200480
Avi Kivity31ab2b42012-02-13 16:44:19 +0200481 for (i = P_L2_LEVELS - 1; i >= 0; i--) {
482 if (lp.u.node == PHYS_MAP_NODE_NIL) {
483 goto not_found;
484 }
485 p = phys_map_nodes[lp.u.node];
486 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200487 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200488
489 s_index = lp.u.leaf;
490not_found:
Avi Kivity06ef3522012-02-13 16:11:22 +0200491 section = phys_sections[s_index];
Avi Kivity5312bd82012-02-12 18:32:55 +0200492 index <<= TARGET_PAGE_BITS;
Avi Kivity06ef3522012-02-13 16:11:22 +0200493 assert(section.offset_within_address_space <= index
494 && index <= section.offset_within_address_space + section.size-1);
495 delta = index - section.offset_within_address_space;
496 section.offset_within_address_space += delta;
497 section.offset_within_region += delta;
498 section.size -= delta;
499 return section;
bellard92e873b2004-05-21 14:52:29 +0000500}
501
Anthony Liguoric227f092009-10-01 16:12:16 -0500502static void tlb_protect_code(ram_addr_t ram_addr);
503static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000504 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000505#define mmap_lock() do { } while(0)
506#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000507#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000508
bellard43694152008-05-29 09:35:57 +0000509#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
510
511#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100512/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000513 user mode. It will change when a dedicated libc will be used */
514#define USE_STATIC_CODE_GEN_BUFFER
515#endif
516
517#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200518static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
519 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000520#endif
521
blueswir18fcd3692008-08-17 20:26:25 +0000522static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000523{
bellard43694152008-05-29 09:35:57 +0000524#ifdef USE_STATIC_CODE_GEN_BUFFER
525 code_gen_buffer = static_code_gen_buffer;
526 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
527 map_exec(code_gen_buffer, code_gen_buffer_size);
528#else
bellard26a5f132008-05-28 12:30:31 +0000529 code_gen_buffer_size = tb_size;
530 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000531#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000532 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
533#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100534 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000535 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000536#endif
bellard26a5f132008-05-28 12:30:31 +0000537 }
538 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
539 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
540 /* The code gen buffer location may have constraints depending on
541 the host cpu and OS */
542#if defined(__linux__)
543 {
544 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000545 void *start = NULL;
546
bellard26a5f132008-05-28 12:30:31 +0000547 flags = MAP_PRIVATE | MAP_ANONYMOUS;
548#if defined(__x86_64__)
549 flags |= MAP_32BIT;
550 /* Cannot map more than that */
551 if (code_gen_buffer_size > (800 * 1024 * 1024))
552 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000553#elif defined(__sparc_v9__)
554 // Map the buffer below 2G, so we can use direct calls and branches
555 flags |= MAP_FIXED;
556 start = (void *) 0x60000000UL;
557 if (code_gen_buffer_size > (512 * 1024 * 1024))
558 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000559#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100560 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000561 if (code_gen_buffer_size > 16 * 1024 * 1024)
562 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700563#elif defined(__s390x__)
564 /* Map the buffer so that we can use direct calls and branches. */
565 /* We have a +- 4GB range on the branches; leave some slop. */
566 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
567 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
568 }
569 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000570#endif
blueswir1141ac462008-07-26 15:05:57 +0000571 code_gen_buffer = mmap(start, code_gen_buffer_size,
572 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000573 flags, -1, 0);
574 if (code_gen_buffer == MAP_FAILED) {
575 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
576 exit(1);
577 }
578 }
Bradcbb608a2010-12-20 21:25:40 -0500579#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000580 || defined(__DragonFly__) || defined(__OpenBSD__) \
581 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000582 {
583 int flags;
584 void *addr = NULL;
585 flags = MAP_PRIVATE | MAP_ANONYMOUS;
586#if defined(__x86_64__)
587 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
588 * 0x40000000 is free */
589 flags |= MAP_FIXED;
590 addr = (void *)0x40000000;
591 /* Cannot map more than that */
592 if (code_gen_buffer_size > (800 * 1024 * 1024))
593 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000594#elif defined(__sparc_v9__)
595 // Map the buffer below 2G, so we can use direct calls and branches
596 flags |= MAP_FIXED;
597 addr = (void *) 0x60000000UL;
598 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
599 code_gen_buffer_size = (512 * 1024 * 1024);
600 }
aliguori06e67a82008-09-27 15:32:41 +0000601#endif
602 code_gen_buffer = mmap(addr, code_gen_buffer_size,
603 PROT_WRITE | PROT_READ | PROT_EXEC,
604 flags, -1, 0);
605 if (code_gen_buffer == MAP_FAILED) {
606 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
607 exit(1);
608 }
609 }
bellard26a5f132008-05-28 12:30:31 +0000610#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500611 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000612 map_exec(code_gen_buffer, code_gen_buffer_size);
613#endif
bellard43694152008-05-29 09:35:57 +0000614#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000615 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100616 code_gen_buffer_max_size = code_gen_buffer_size -
617 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000618 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500619 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000620}
621
622/* Must be called before using the QEMU cpus. 'tb_size' is the size
623 (in bytes) allocated to the translation buffer. Zero means default
624 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200625void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000626{
bellard26a5f132008-05-28 12:30:31 +0000627 cpu_gen_init();
628 code_gen_alloc(tb_size);
629 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000630 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700631#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
632 /* There's no guest base to take into account, so go ahead and
633 initialize the prologue now. */
634 tcg_prologue_init(&tcg_ctx);
635#endif
bellard26a5f132008-05-28 12:30:31 +0000636}
637
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200638bool tcg_enabled(void)
639{
640 return code_gen_buffer != NULL;
641}
642
643void cpu_exec_init_all(void)
644{
645#if !defined(CONFIG_USER_ONLY)
646 memory_map_init();
647 io_mem_init();
648#endif
649}
650
pbrook9656f322008-07-01 20:01:19 +0000651#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
652
Juan Quintelae59fb372009-09-29 22:48:21 +0200653static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200654{
655 CPUState *env = opaque;
656
aurel323098dba2009-03-07 21:28:24 +0000657 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
658 version_id is increased. */
659 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000660 tlb_flush(env, 1);
661
662 return 0;
663}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200664
665static const VMStateDescription vmstate_cpu_common = {
666 .name = "cpu_common",
667 .version_id = 1,
668 .minimum_version_id = 1,
669 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200670 .post_load = cpu_common_post_load,
671 .fields = (VMStateField []) {
672 VMSTATE_UINT32(halted, CPUState),
673 VMSTATE_UINT32(interrupt_request, CPUState),
674 VMSTATE_END_OF_LIST()
675 }
676};
pbrook9656f322008-07-01 20:01:19 +0000677#endif
678
Glauber Costa950f1472009-06-09 12:15:18 -0400679CPUState *qemu_get_cpu(int cpu)
680{
681 CPUState *env = first_cpu;
682
683 while (env) {
684 if (env->cpu_index == cpu)
685 break;
686 env = env->next_cpu;
687 }
688
689 return env;
690}
691
bellard6a00d602005-11-21 23:25:50 +0000692void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000693{
bellard6a00d602005-11-21 23:25:50 +0000694 CPUState **penv;
695 int cpu_index;
696
pbrookc2764712009-03-07 15:24:59 +0000697#if defined(CONFIG_USER_ONLY)
698 cpu_list_lock();
699#endif
bellard6a00d602005-11-21 23:25:50 +0000700 env->next_cpu = NULL;
701 penv = &first_cpu;
702 cpu_index = 0;
703 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700704 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000705 cpu_index++;
706 }
707 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000708 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000709 QTAILQ_INIT(&env->breakpoints);
710 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100711#ifndef CONFIG_USER_ONLY
712 env->thread_id = qemu_get_thread_id();
713#endif
bellard6a00d602005-11-21 23:25:50 +0000714 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000715#if defined(CONFIG_USER_ONLY)
716 cpu_list_unlock();
717#endif
pbrookb3c77242008-06-30 16:31:04 +0000718#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600719 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
720 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000721 cpu_save, cpu_load, env);
722#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000723}
724
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100725/* Allocate a new translation block. Flush the translation buffer if
726 too many translation blocks or too much generated code. */
727static TranslationBlock *tb_alloc(target_ulong pc)
728{
729 TranslationBlock *tb;
730
731 if (nb_tbs >= code_gen_max_blocks ||
732 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
733 return NULL;
734 tb = &tbs[nb_tbs++];
735 tb->pc = pc;
736 tb->cflags = 0;
737 return tb;
738}
739
740void tb_free(TranslationBlock *tb)
741{
742 /* In practice this is mostly used for single use temporary TB
743 Ignore the hard cases and just back up if this TB happens to
744 be the last one generated. */
745 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
746 code_gen_ptr = tb->tc_ptr;
747 nb_tbs--;
748 }
749}
750
bellard9fa3e852004-01-04 18:06:42 +0000751static inline void invalidate_page_bitmap(PageDesc *p)
752{
753 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500754 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000755 p->code_bitmap = NULL;
756 }
757 p->code_write_count = 0;
758}
759
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800760/* Set to NULL all the 'first_tb' fields in all PageDescs. */
761
762static void page_flush_tb_1 (int level, void **lp)
763{
764 int i;
765
766 if (*lp == NULL) {
767 return;
768 }
769 if (level == 0) {
770 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000771 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800772 pd[i].first_tb = NULL;
773 invalidate_page_bitmap(pd + i);
774 }
775 } else {
776 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000777 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800778 page_flush_tb_1 (level - 1, pp + i);
779 }
780 }
781}
782
bellardfd6ce8f2003-05-14 19:00:11 +0000783static void page_flush_tb(void)
784{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800785 int i;
786 for (i = 0; i < V_L1_SIZE; i++) {
787 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000788 }
789}
790
791/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000792/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000793void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000794{
bellard6a00d602005-11-21 23:25:50 +0000795 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000796#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000797 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
798 (unsigned long)(code_gen_ptr - code_gen_buffer),
799 nb_tbs, nb_tbs > 0 ?
800 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000801#endif
bellard26a5f132008-05-28 12:30:31 +0000802 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000803 cpu_abort(env1, "Internal error: code buffer overflow\n");
804
bellardfd6ce8f2003-05-14 19:00:11 +0000805 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000806
bellard6a00d602005-11-21 23:25:50 +0000807 for(env = first_cpu; env != NULL; env = env->next_cpu) {
808 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
809 }
bellard9fa3e852004-01-04 18:06:42 +0000810
bellard8a8a6082004-10-03 13:36:49 +0000811 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000812 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000813
bellardfd6ce8f2003-05-14 19:00:11 +0000814 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000815 /* XXX: flush processor icache at this point if cache flush is
816 expensive */
bellarde3db7222005-01-26 22:00:47 +0000817 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000818}
819
820#ifdef DEBUG_TB_CHECK
821
j_mayerbc98a7e2007-04-04 07:55:12 +0000822static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000823{
824 TranslationBlock *tb;
825 int i;
826 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000827 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
828 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000829 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
830 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000831 printf("ERROR invalidate: address=" TARGET_FMT_lx
832 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000833 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000834 }
835 }
836 }
837}
838
839/* verify that all the pages have correct rights for code */
840static void tb_page_check(void)
841{
842 TranslationBlock *tb;
843 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000844
pbrook99773bd2006-04-16 15:14:59 +0000845 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
846 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000847 flags1 = page_get_flags(tb->pc);
848 flags2 = page_get_flags(tb->pc + tb->size - 1);
849 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
850 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000851 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000852 }
853 }
854 }
855}
856
857#endif
858
859/* invalidate one TB */
860static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
861 int next_offset)
862{
863 TranslationBlock *tb1;
864 for(;;) {
865 tb1 = *ptb;
866 if (tb1 == tb) {
867 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
868 break;
869 }
870 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
871 }
872}
873
bellard9fa3e852004-01-04 18:06:42 +0000874static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
875{
876 TranslationBlock *tb1;
877 unsigned int n1;
878
879 for(;;) {
880 tb1 = *ptb;
881 n1 = (long)tb1 & 3;
882 tb1 = (TranslationBlock *)((long)tb1 & ~3);
883 if (tb1 == tb) {
884 *ptb = tb1->page_next[n1];
885 break;
886 }
887 ptb = &tb1->page_next[n1];
888 }
889}
890
bellardd4e81642003-05-25 16:46:15 +0000891static inline void tb_jmp_remove(TranslationBlock *tb, int n)
892{
893 TranslationBlock *tb1, **ptb;
894 unsigned int n1;
895
896 ptb = &tb->jmp_next[n];
897 tb1 = *ptb;
898 if (tb1) {
899 /* find tb(n) in circular list */
900 for(;;) {
901 tb1 = *ptb;
902 n1 = (long)tb1 & 3;
903 tb1 = (TranslationBlock *)((long)tb1 & ~3);
904 if (n1 == n && tb1 == tb)
905 break;
906 if (n1 == 2) {
907 ptb = &tb1->jmp_first;
908 } else {
909 ptb = &tb1->jmp_next[n1];
910 }
911 }
912 /* now we can suppress tb(n) from the list */
913 *ptb = tb->jmp_next[n];
914
915 tb->jmp_next[n] = NULL;
916 }
917}
918
919/* reset the jump entry 'n' of a TB so that it is not chained to
920 another TB */
921static inline void tb_reset_jump(TranslationBlock *tb, int n)
922{
923 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
924}
925
Paul Brook41c1b1c2010-03-12 16:54:58 +0000926void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000927{
bellard6a00d602005-11-21 23:25:50 +0000928 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000929 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000930 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000931 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000932 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000933
bellard9fa3e852004-01-04 18:06:42 +0000934 /* remove the TB from the hash list */
935 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
936 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000937 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000938 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000939
bellard9fa3e852004-01-04 18:06:42 +0000940 /* remove the TB from the page list */
941 if (tb->page_addr[0] != page_addr) {
942 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
943 tb_page_remove(&p->first_tb, tb);
944 invalidate_page_bitmap(p);
945 }
946 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
947 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
948 tb_page_remove(&p->first_tb, tb);
949 invalidate_page_bitmap(p);
950 }
951
bellard8a40a182005-11-20 10:35:40 +0000952 tb_invalidated_flag = 1;
953
954 /* remove the TB from the hash list */
955 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000956 for(env = first_cpu; env != NULL; env = env->next_cpu) {
957 if (env->tb_jmp_cache[h] == tb)
958 env->tb_jmp_cache[h] = NULL;
959 }
bellard8a40a182005-11-20 10:35:40 +0000960
961 /* suppress this TB from the two jump lists */
962 tb_jmp_remove(tb, 0);
963 tb_jmp_remove(tb, 1);
964
965 /* suppress any remaining jumps to this TB */
966 tb1 = tb->jmp_first;
967 for(;;) {
968 n1 = (long)tb1 & 3;
969 if (n1 == 2)
970 break;
971 tb1 = (TranslationBlock *)((long)tb1 & ~3);
972 tb2 = tb1->jmp_next[n1];
973 tb_reset_jump(tb1, n1);
974 tb1->jmp_next[n1] = NULL;
975 tb1 = tb2;
976 }
977 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
978
bellarde3db7222005-01-26 22:00:47 +0000979 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000980}
981
982static inline void set_bits(uint8_t *tab, int start, int len)
983{
984 int end, mask, end1;
985
986 end = start + len;
987 tab += start >> 3;
988 mask = 0xff << (start & 7);
989 if ((start & ~7) == (end & ~7)) {
990 if (start < end) {
991 mask &= ~(0xff << (end & 7));
992 *tab |= mask;
993 }
994 } else {
995 *tab++ |= mask;
996 start = (start + 8) & ~7;
997 end1 = end & ~7;
998 while (start < end1) {
999 *tab++ = 0xff;
1000 start += 8;
1001 }
1002 if (start < end) {
1003 mask = ~(0xff << (end & 7));
1004 *tab |= mask;
1005 }
1006 }
1007}
1008
1009static void build_page_bitmap(PageDesc *p)
1010{
1011 int n, tb_start, tb_end;
1012 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001013
Anthony Liguori7267c092011-08-20 22:09:37 -05001014 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001015
1016 tb = p->first_tb;
1017 while (tb != NULL) {
1018 n = (long)tb & 3;
1019 tb = (TranslationBlock *)((long)tb & ~3);
1020 /* NOTE: this is subtle as a TB may span two physical pages */
1021 if (n == 0) {
1022 /* NOTE: tb_end may be after the end of the page, but
1023 it is not a problem */
1024 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1025 tb_end = tb_start + tb->size;
1026 if (tb_end > TARGET_PAGE_SIZE)
1027 tb_end = TARGET_PAGE_SIZE;
1028 } else {
1029 tb_start = 0;
1030 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1031 }
1032 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1033 tb = tb->page_next[n];
1034 }
1035}
1036
pbrook2e70f6e2008-06-29 01:03:05 +00001037TranslationBlock *tb_gen_code(CPUState *env,
1038 target_ulong pc, target_ulong cs_base,
1039 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001040{
1041 TranslationBlock *tb;
1042 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001043 tb_page_addr_t phys_pc, phys_page2;
1044 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001045 int code_gen_size;
1046
Paul Brook41c1b1c2010-03-12 16:54:58 +00001047 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001048 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001049 if (!tb) {
1050 /* flush must be done */
1051 tb_flush(env);
1052 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001053 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001054 /* Don't forget to invalidate previous TB info. */
1055 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001056 }
1057 tc_ptr = code_gen_ptr;
1058 tb->tc_ptr = tc_ptr;
1059 tb->cs_base = cs_base;
1060 tb->flags = flags;
1061 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001062 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001063 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001064
bellardd720b932004-04-25 17:57:43 +00001065 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001066 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001067 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001068 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001069 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001070 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001071 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001072 return tb;
bellardd720b932004-04-25 17:57:43 +00001073}
ths3b46e622007-09-17 08:09:54 +00001074
bellard9fa3e852004-01-04 18:06:42 +00001075/* invalidate all TBs which intersect with the target physical page
1076 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001077 the same physical page. 'is_cpu_write_access' should be true if called
1078 from a real cpu write access: the virtual CPU will exit the current
1079 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001080void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001081 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001082{
aliguori6b917542008-11-18 19:46:41 +00001083 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001084 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001085 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001086 PageDesc *p;
1087 int n;
1088#ifdef TARGET_HAS_PRECISE_SMC
1089 int current_tb_not_found = is_cpu_write_access;
1090 TranslationBlock *current_tb = NULL;
1091 int current_tb_modified = 0;
1092 target_ulong current_pc = 0;
1093 target_ulong current_cs_base = 0;
1094 int current_flags = 0;
1095#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001096
1097 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001098 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001099 return;
ths5fafdf22007-09-16 21:08:06 +00001100 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001101 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1102 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001103 /* build code bitmap */
1104 build_page_bitmap(p);
1105 }
1106
1107 /* we remove all the TBs in the range [start, end[ */
1108 /* XXX: see if in some cases it could be faster to invalidate all the code */
1109 tb = p->first_tb;
1110 while (tb != NULL) {
1111 n = (long)tb & 3;
1112 tb = (TranslationBlock *)((long)tb & ~3);
1113 tb_next = tb->page_next[n];
1114 /* NOTE: this is subtle as a TB may span two physical pages */
1115 if (n == 0) {
1116 /* NOTE: tb_end may be after the end of the page, but
1117 it is not a problem */
1118 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1119 tb_end = tb_start + tb->size;
1120 } else {
1121 tb_start = tb->page_addr[1];
1122 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1123 }
1124 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001125#ifdef TARGET_HAS_PRECISE_SMC
1126 if (current_tb_not_found) {
1127 current_tb_not_found = 0;
1128 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001129 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001130 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001131 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001132 }
1133 }
1134 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001135 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001136 /* If we are modifying the current TB, we must stop
1137 its execution. We could be more precise by checking
1138 that the modification is after the current PC, but it
1139 would require a specialized function to partially
1140 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001141
bellardd720b932004-04-25 17:57:43 +00001142 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001143 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001144 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1145 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001146 }
1147#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001148 /* we need to do that to handle the case where a signal
1149 occurs while doing tb_phys_invalidate() */
1150 saved_tb = NULL;
1151 if (env) {
1152 saved_tb = env->current_tb;
1153 env->current_tb = NULL;
1154 }
bellard9fa3e852004-01-04 18:06:42 +00001155 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001156 if (env) {
1157 env->current_tb = saved_tb;
1158 if (env->interrupt_request && env->current_tb)
1159 cpu_interrupt(env, env->interrupt_request);
1160 }
bellard9fa3e852004-01-04 18:06:42 +00001161 }
1162 tb = tb_next;
1163 }
1164#if !defined(CONFIG_USER_ONLY)
1165 /* if no code remaining, no need to continue to use slow writes */
1166 if (!p->first_tb) {
1167 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001168 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001169 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001170 }
1171 }
1172#endif
1173#ifdef TARGET_HAS_PRECISE_SMC
1174 if (current_tb_modified) {
1175 /* we generate a block containing just the instruction
1176 modifying the memory. It will ensure that it cannot modify
1177 itself */
bellardea1c1802004-06-14 18:56:36 +00001178 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001179 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001180 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001181 }
1182#endif
1183}
1184
1185/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001186static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001187{
1188 PageDesc *p;
1189 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001190#if 0
bellarda4193c82004-06-03 14:01:43 +00001191 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001192 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1193 cpu_single_env->mem_io_vaddr, len,
1194 cpu_single_env->eip,
1195 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001196 }
1197#endif
bellard9fa3e852004-01-04 18:06:42 +00001198 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001199 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001200 return;
1201 if (p->code_bitmap) {
1202 offset = start & ~TARGET_PAGE_MASK;
1203 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1204 if (b & ((1 << len) - 1))
1205 goto do_invalidate;
1206 } else {
1207 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001208 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001209 }
1210}
1211
bellard9fa3e852004-01-04 18:06:42 +00001212#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001213static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001214 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001215{
aliguori6b917542008-11-18 19:46:41 +00001216 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001217 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001218 int n;
bellardd720b932004-04-25 17:57:43 +00001219#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001220 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001221 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001222 int current_tb_modified = 0;
1223 target_ulong current_pc = 0;
1224 target_ulong current_cs_base = 0;
1225 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001226#endif
bellard9fa3e852004-01-04 18:06:42 +00001227
1228 addr &= TARGET_PAGE_MASK;
1229 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001230 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001231 return;
1232 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001233#ifdef TARGET_HAS_PRECISE_SMC
1234 if (tb && pc != 0) {
1235 current_tb = tb_find_pc(pc);
1236 }
1237#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001238 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001239 n = (long)tb & 3;
1240 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001241#ifdef TARGET_HAS_PRECISE_SMC
1242 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001243 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001244 /* If we are modifying the current TB, we must stop
1245 its execution. We could be more precise by checking
1246 that the modification is after the current PC, but it
1247 would require a specialized function to partially
1248 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001249
bellardd720b932004-04-25 17:57:43 +00001250 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001251 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001252 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1253 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001254 }
1255#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001256 tb_phys_invalidate(tb, addr);
1257 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001258 }
1259 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001260#ifdef TARGET_HAS_PRECISE_SMC
1261 if (current_tb_modified) {
1262 /* we generate a block containing just the instruction
1263 modifying the memory. It will ensure that it cannot modify
1264 itself */
bellardea1c1802004-06-14 18:56:36 +00001265 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001266 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001267 cpu_resume_from_signal(env, puc);
1268 }
1269#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001270}
bellard9fa3e852004-01-04 18:06:42 +00001271#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001272
1273/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001274static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001275 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001276{
1277 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001278#ifndef CONFIG_USER_ONLY
1279 bool page_already_protected;
1280#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001281
bellard9fa3e852004-01-04 18:06:42 +00001282 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001283 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001284 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001285#ifndef CONFIG_USER_ONLY
1286 page_already_protected = p->first_tb != NULL;
1287#endif
bellard9fa3e852004-01-04 18:06:42 +00001288 p->first_tb = (TranslationBlock *)((long)tb | n);
1289 invalidate_page_bitmap(p);
1290
bellard107db442004-06-22 18:48:46 +00001291#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001292
bellard9fa3e852004-01-04 18:06:42 +00001293#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001294 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001295 target_ulong addr;
1296 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001297 int prot;
1298
bellardfd6ce8f2003-05-14 19:00:11 +00001299 /* force the host page as non writable (writes will have a
1300 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001301 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001302 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001303 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1304 addr += TARGET_PAGE_SIZE) {
1305
1306 p2 = page_find (addr >> TARGET_PAGE_BITS);
1307 if (!p2)
1308 continue;
1309 prot |= p2->flags;
1310 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001311 }
ths5fafdf22007-09-16 21:08:06 +00001312 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001313 (prot & PAGE_BITS) & ~PAGE_WRITE);
1314#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001315 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001316 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001317#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001318 }
bellard9fa3e852004-01-04 18:06:42 +00001319#else
1320 /* if some code is already present, then the pages are already
1321 protected. So we handle the case where only the first TB is
1322 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001323 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001324 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001325 }
1326#endif
bellardd720b932004-04-25 17:57:43 +00001327
1328#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001329}
1330
bellard9fa3e852004-01-04 18:06:42 +00001331/* add a new TB and link it to the physical page tables. phys_page2 is
1332 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001333void tb_link_page(TranslationBlock *tb,
1334 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001335{
bellard9fa3e852004-01-04 18:06:42 +00001336 unsigned int h;
1337 TranslationBlock **ptb;
1338
pbrookc8a706f2008-06-02 16:16:42 +00001339 /* Grab the mmap lock to stop another thread invalidating this TB
1340 before we are done. */
1341 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001342 /* add in the physical hash table */
1343 h = tb_phys_hash_func(phys_pc);
1344 ptb = &tb_phys_hash[h];
1345 tb->phys_hash_next = *ptb;
1346 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001347
1348 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001349 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1350 if (phys_page2 != -1)
1351 tb_alloc_page(tb, 1, phys_page2);
1352 else
1353 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001354
bellardd4e81642003-05-25 16:46:15 +00001355 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1356 tb->jmp_next[0] = NULL;
1357 tb->jmp_next[1] = NULL;
1358
1359 /* init original jump addresses */
1360 if (tb->tb_next_offset[0] != 0xffff)
1361 tb_reset_jump(tb, 0);
1362 if (tb->tb_next_offset[1] != 0xffff)
1363 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001364
1365#ifdef DEBUG_TB_CHECK
1366 tb_page_check();
1367#endif
pbrookc8a706f2008-06-02 16:16:42 +00001368 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001369}
1370
bellarda513fe12003-05-27 23:29:48 +00001371/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1372 tb[1].tc_ptr. Return NULL if not found */
1373TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1374{
1375 int m_min, m_max, m;
1376 unsigned long v;
1377 TranslationBlock *tb;
1378
1379 if (nb_tbs <= 0)
1380 return NULL;
1381 if (tc_ptr < (unsigned long)code_gen_buffer ||
1382 tc_ptr >= (unsigned long)code_gen_ptr)
1383 return NULL;
1384 /* binary search (cf Knuth) */
1385 m_min = 0;
1386 m_max = nb_tbs - 1;
1387 while (m_min <= m_max) {
1388 m = (m_min + m_max) >> 1;
1389 tb = &tbs[m];
1390 v = (unsigned long)tb->tc_ptr;
1391 if (v == tc_ptr)
1392 return tb;
1393 else if (tc_ptr < v) {
1394 m_max = m - 1;
1395 } else {
1396 m_min = m + 1;
1397 }
ths5fafdf22007-09-16 21:08:06 +00001398 }
bellarda513fe12003-05-27 23:29:48 +00001399 return &tbs[m_max];
1400}
bellard75012672003-06-21 13:11:07 +00001401
bellardea041c02003-06-25 16:16:50 +00001402static void tb_reset_jump_recursive(TranslationBlock *tb);
1403
1404static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1405{
1406 TranslationBlock *tb1, *tb_next, **ptb;
1407 unsigned int n1;
1408
1409 tb1 = tb->jmp_next[n];
1410 if (tb1 != NULL) {
1411 /* find head of list */
1412 for(;;) {
1413 n1 = (long)tb1 & 3;
1414 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1415 if (n1 == 2)
1416 break;
1417 tb1 = tb1->jmp_next[n1];
1418 }
1419 /* we are now sure now that tb jumps to tb1 */
1420 tb_next = tb1;
1421
1422 /* remove tb from the jmp_first list */
1423 ptb = &tb_next->jmp_first;
1424 for(;;) {
1425 tb1 = *ptb;
1426 n1 = (long)tb1 & 3;
1427 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1428 if (n1 == n && tb1 == tb)
1429 break;
1430 ptb = &tb1->jmp_next[n1];
1431 }
1432 *ptb = tb->jmp_next[n];
1433 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001434
bellardea041c02003-06-25 16:16:50 +00001435 /* suppress the jump to next tb in generated code */
1436 tb_reset_jump(tb, n);
1437
bellard01243112004-01-04 15:48:17 +00001438 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001439 tb_reset_jump_recursive(tb_next);
1440 }
1441}
1442
1443static void tb_reset_jump_recursive(TranslationBlock *tb)
1444{
1445 tb_reset_jump_recursive2(tb, 0);
1446 tb_reset_jump_recursive2(tb, 1);
1447}
1448
bellard1fddef42005-04-17 19:16:13 +00001449#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001450#if defined(CONFIG_USER_ONLY)
1451static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1452{
1453 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1454}
1455#else
bellardd720b932004-04-25 17:57:43 +00001456static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1457{
Anthony Liguoric227f092009-10-01 16:12:16 -05001458 target_phys_addr_t addr;
Anthony Liguoric227f092009-10-01 16:12:16 -05001459 ram_addr_t ram_addr;
Avi Kivity06ef3522012-02-13 16:11:22 +02001460 MemoryRegionSection section;
bellardd720b932004-04-25 17:57:43 +00001461
pbrookc2f07f82006-04-08 17:14:56 +00001462 addr = cpu_get_phys_page_debug(env, pc);
Avi Kivity06ef3522012-02-13 16:11:22 +02001463 section = phys_page_find(addr >> TARGET_PAGE_BITS);
1464 if (!(memory_region_is_ram(section.mr)
1465 || (section.mr->rom_device && section.mr->readable))) {
1466 return;
1467 }
1468 ram_addr = (memory_region_get_ram_addr(section.mr)
1469 + section.offset_within_region) & TARGET_PAGE_MASK;
1470 ram_addr |= (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001471 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001472}
bellardc27004e2005-01-03 23:35:10 +00001473#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001474#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001475
Paul Brookc527ee82010-03-01 03:31:14 +00001476#if defined(CONFIG_USER_ONLY)
1477void cpu_watchpoint_remove_all(CPUState *env, int mask)
1478
1479{
1480}
1481
1482int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1483 int flags, CPUWatchpoint **watchpoint)
1484{
1485 return -ENOSYS;
1486}
1487#else
pbrook6658ffb2007-03-16 23:58:11 +00001488/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001489int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1490 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001491{
aliguorib4051332008-11-18 20:14:20 +00001492 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001493 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001494
aliguorib4051332008-11-18 20:14:20 +00001495 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1496 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1497 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1498 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1499 return -EINVAL;
1500 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001501 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001502
aliguoria1d1bb32008-11-18 20:07:32 +00001503 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001504 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001505 wp->flags = flags;
1506
aliguori2dc9f412008-11-18 20:56:59 +00001507 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001508 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001509 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001510 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001511 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001512
pbrook6658ffb2007-03-16 23:58:11 +00001513 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001514
1515 if (watchpoint)
1516 *watchpoint = wp;
1517 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001518}
1519
aliguoria1d1bb32008-11-18 20:07:32 +00001520/* Remove a specific watchpoint. */
1521int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1522 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001523{
aliguorib4051332008-11-18 20:14:20 +00001524 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001525 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001526
Blue Swirl72cf2d42009-09-12 07:36:22 +00001527 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001528 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001529 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001530 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001531 return 0;
1532 }
1533 }
aliguoria1d1bb32008-11-18 20:07:32 +00001534 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001535}
1536
aliguoria1d1bb32008-11-18 20:07:32 +00001537/* Remove a specific watchpoint by reference. */
1538void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1539{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001540 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001541
aliguoria1d1bb32008-11-18 20:07:32 +00001542 tlb_flush_page(env, watchpoint->vaddr);
1543
Anthony Liguori7267c092011-08-20 22:09:37 -05001544 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001545}
1546
aliguoria1d1bb32008-11-18 20:07:32 +00001547/* Remove all matching watchpoints. */
1548void cpu_watchpoint_remove_all(CPUState *env, int mask)
1549{
aliguoric0ce9982008-11-25 22:13:57 +00001550 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001551
Blue Swirl72cf2d42009-09-12 07:36:22 +00001552 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001553 if (wp->flags & mask)
1554 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001555 }
aliguoria1d1bb32008-11-18 20:07:32 +00001556}
Paul Brookc527ee82010-03-01 03:31:14 +00001557#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001558
1559/* Add a breakpoint. */
1560int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1561 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001562{
bellard1fddef42005-04-17 19:16:13 +00001563#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001564 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001565
Anthony Liguori7267c092011-08-20 22:09:37 -05001566 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001567
1568 bp->pc = pc;
1569 bp->flags = flags;
1570
aliguori2dc9f412008-11-18 20:56:59 +00001571 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001572 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001573 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001574 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001575 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001576
1577 breakpoint_invalidate(env, pc);
1578
1579 if (breakpoint)
1580 *breakpoint = bp;
1581 return 0;
1582#else
1583 return -ENOSYS;
1584#endif
1585}
1586
1587/* Remove a specific breakpoint. */
1588int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1589{
1590#if defined(TARGET_HAS_ICE)
1591 CPUBreakpoint *bp;
1592
Blue Swirl72cf2d42009-09-12 07:36:22 +00001593 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001594 if (bp->pc == pc && bp->flags == flags) {
1595 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001596 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001597 }
bellard4c3a88a2003-07-26 12:06:08 +00001598 }
aliguoria1d1bb32008-11-18 20:07:32 +00001599 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001600#else
aliguoria1d1bb32008-11-18 20:07:32 +00001601 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001602#endif
1603}
1604
aliguoria1d1bb32008-11-18 20:07:32 +00001605/* Remove a specific breakpoint by reference. */
1606void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001607{
bellard1fddef42005-04-17 19:16:13 +00001608#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001609 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001610
aliguoria1d1bb32008-11-18 20:07:32 +00001611 breakpoint_invalidate(env, breakpoint->pc);
1612
Anthony Liguori7267c092011-08-20 22:09:37 -05001613 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001614#endif
1615}
1616
1617/* Remove all matching breakpoints. */
1618void cpu_breakpoint_remove_all(CPUState *env, int mask)
1619{
1620#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001621 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001622
Blue Swirl72cf2d42009-09-12 07:36:22 +00001623 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001624 if (bp->flags & mask)
1625 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001626 }
bellard4c3a88a2003-07-26 12:06:08 +00001627#endif
1628}
1629
bellardc33a3462003-07-29 20:50:33 +00001630/* enable or disable single step mode. EXCP_DEBUG is returned by the
1631 CPU loop after each instruction */
1632void cpu_single_step(CPUState *env, int enabled)
1633{
bellard1fddef42005-04-17 19:16:13 +00001634#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001635 if (env->singlestep_enabled != enabled) {
1636 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001637 if (kvm_enabled())
1638 kvm_update_guest_debug(env, 0);
1639 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001640 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001641 /* XXX: only flush what is necessary */
1642 tb_flush(env);
1643 }
bellardc33a3462003-07-29 20:50:33 +00001644 }
1645#endif
1646}
1647
bellard34865132003-10-05 14:28:56 +00001648/* enable or disable low levels log */
1649void cpu_set_log(int log_flags)
1650{
1651 loglevel = log_flags;
1652 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001653 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001654 if (!logfile) {
1655 perror(logfilename);
1656 _exit(1);
1657 }
bellard9fa3e852004-01-04 18:06:42 +00001658#if !defined(CONFIG_SOFTMMU)
1659 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1660 {
blueswir1b55266b2008-09-20 08:07:15 +00001661 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001662 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1663 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001664#elif defined(_WIN32)
1665 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1666 setvbuf(logfile, NULL, _IONBF, 0);
1667#else
bellard34865132003-10-05 14:28:56 +00001668 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001669#endif
pbrooke735b912007-06-30 13:53:24 +00001670 log_append = 1;
1671 }
1672 if (!loglevel && logfile) {
1673 fclose(logfile);
1674 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001675 }
1676}
1677
1678void cpu_set_log_filename(const char *filename)
1679{
1680 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001681 if (logfile) {
1682 fclose(logfile);
1683 logfile = NULL;
1684 }
1685 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001686}
bellardc33a3462003-07-29 20:50:33 +00001687
aurel323098dba2009-03-07 21:28:24 +00001688static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001689{
pbrookd5975362008-06-07 20:50:51 +00001690 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1691 problem and hope the cpu will stop of its own accord. For userspace
1692 emulation this often isn't actually as bad as it sounds. Often
1693 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001694 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001695 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001696
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001697 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001698 tb = env->current_tb;
1699 /* if the cpu is currently executing code, we must unlink it and
1700 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001701 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001702 env->current_tb = NULL;
1703 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001704 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001705 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001706}
1707
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001708#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001709/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001710static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001711{
1712 int old_mask;
1713
1714 old_mask = env->interrupt_request;
1715 env->interrupt_request |= mask;
1716
aliguori8edac962009-04-24 18:03:45 +00001717 /*
1718 * If called from iothread context, wake the target cpu in
1719 * case its halted.
1720 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001721 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001722 qemu_cpu_kick(env);
1723 return;
1724 }
aliguori8edac962009-04-24 18:03:45 +00001725
pbrook2e70f6e2008-06-29 01:03:05 +00001726 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001727 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001728 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001729 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001730 cpu_abort(env, "Raised interrupt while not in I/O function");
1731 }
pbrook2e70f6e2008-06-29 01:03:05 +00001732 } else {
aurel323098dba2009-03-07 21:28:24 +00001733 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001734 }
1735}
1736
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001737CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1738
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001739#else /* CONFIG_USER_ONLY */
1740
1741void cpu_interrupt(CPUState *env, int mask)
1742{
1743 env->interrupt_request |= mask;
1744 cpu_unlink_tb(env);
1745}
1746#endif /* CONFIG_USER_ONLY */
1747
bellardb54ad042004-05-20 13:42:52 +00001748void cpu_reset_interrupt(CPUState *env, int mask)
1749{
1750 env->interrupt_request &= ~mask;
1751}
1752
aurel323098dba2009-03-07 21:28:24 +00001753void cpu_exit(CPUState *env)
1754{
1755 env->exit_request = 1;
1756 cpu_unlink_tb(env);
1757}
1758
blueswir1c7cd6a32008-10-02 18:27:46 +00001759const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001760 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001761 "show generated host assembly code for each compiled TB" },
1762 { CPU_LOG_TB_IN_ASM, "in_asm",
1763 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001764 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001765 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001766 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001767 "show micro ops "
1768#ifdef TARGET_I386
1769 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001770#endif
blueswir1e01a1152008-03-14 17:37:11 +00001771 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001772 { CPU_LOG_INT, "int",
1773 "show interrupts/exceptions in short format" },
1774 { CPU_LOG_EXEC, "exec",
1775 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001776 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001777 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001778#ifdef TARGET_I386
1779 { CPU_LOG_PCALL, "pcall",
1780 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001781 { CPU_LOG_RESET, "cpu_reset",
1782 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001783#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001784#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001785 { CPU_LOG_IOPORT, "ioport",
1786 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001787#endif
bellardf193c792004-03-21 17:06:25 +00001788 { 0, NULL, NULL },
1789};
1790
1791static int cmp1(const char *s1, int n, const char *s2)
1792{
1793 if (strlen(s2) != n)
1794 return 0;
1795 return memcmp(s1, s2, n) == 0;
1796}
ths3b46e622007-09-17 08:09:54 +00001797
bellardf193c792004-03-21 17:06:25 +00001798/* takes a comma separated list of log masks. Return 0 if error. */
1799int cpu_str_to_log_mask(const char *str)
1800{
blueswir1c7cd6a32008-10-02 18:27:46 +00001801 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001802 int mask;
1803 const char *p, *p1;
1804
1805 p = str;
1806 mask = 0;
1807 for(;;) {
1808 p1 = strchr(p, ',');
1809 if (!p1)
1810 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001811 if(cmp1(p,p1-p,"all")) {
1812 for(item = cpu_log_items; item->mask != 0; item++) {
1813 mask |= item->mask;
1814 }
1815 } else {
1816 for(item = cpu_log_items; item->mask != 0; item++) {
1817 if (cmp1(p, p1 - p, item->name))
1818 goto found;
1819 }
1820 return 0;
bellardf193c792004-03-21 17:06:25 +00001821 }
bellardf193c792004-03-21 17:06:25 +00001822 found:
1823 mask |= item->mask;
1824 if (*p1 != ',')
1825 break;
1826 p = p1 + 1;
1827 }
1828 return mask;
1829}
bellardea041c02003-06-25 16:16:50 +00001830
bellard75012672003-06-21 13:11:07 +00001831void cpu_abort(CPUState *env, const char *fmt, ...)
1832{
1833 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001834 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001835
1836 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001837 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001838 fprintf(stderr, "qemu: fatal: ");
1839 vfprintf(stderr, fmt, ap);
1840 fprintf(stderr, "\n");
1841#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001842 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1843#else
1844 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001845#endif
aliguori93fcfe32009-01-15 22:34:14 +00001846 if (qemu_log_enabled()) {
1847 qemu_log("qemu: fatal: ");
1848 qemu_log_vprintf(fmt, ap2);
1849 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001850#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001851 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001852#else
aliguori93fcfe32009-01-15 22:34:14 +00001853 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001854#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001855 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001856 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001857 }
pbrook493ae1f2007-11-23 16:53:59 +00001858 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001859 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001860#if defined(CONFIG_USER_ONLY)
1861 {
1862 struct sigaction act;
1863 sigfillset(&act.sa_mask);
1864 act.sa_handler = SIG_DFL;
1865 sigaction(SIGABRT, &act, NULL);
1866 }
1867#endif
bellard75012672003-06-21 13:11:07 +00001868 abort();
1869}
1870
thsc5be9f02007-02-28 20:20:53 +00001871CPUState *cpu_copy(CPUState *env)
1872{
ths01ba9812007-12-09 02:22:57 +00001873 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001874 CPUState *next_cpu = new_env->next_cpu;
1875 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001876#if defined(TARGET_HAS_ICE)
1877 CPUBreakpoint *bp;
1878 CPUWatchpoint *wp;
1879#endif
1880
thsc5be9f02007-02-28 20:20:53 +00001881 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001882
1883 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001884 new_env->next_cpu = next_cpu;
1885 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001886
1887 /* Clone all break/watchpoints.
1888 Note: Once we support ptrace with hw-debug register access, make sure
1889 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001890 QTAILQ_INIT(&env->breakpoints);
1891 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001892#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001893 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001894 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1895 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001896 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001897 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1898 wp->flags, NULL);
1899 }
1900#endif
1901
thsc5be9f02007-02-28 20:20:53 +00001902 return new_env;
1903}
1904
bellard01243112004-01-04 15:48:17 +00001905#if !defined(CONFIG_USER_ONLY)
1906
edgar_igl5c751e92008-05-06 08:44:21 +00001907static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1908{
1909 unsigned int i;
1910
1911 /* Discard jump cache entries for any tb which might potentially
1912 overlap the flushed page. */
1913 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1914 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001915 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001916
1917 i = tb_jmp_cache_hash_page(addr);
1918 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001919 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001920}
1921
Igor Kovalenko08738982009-07-12 02:15:40 +04001922static CPUTLBEntry s_cputlb_empty_entry = {
1923 .addr_read = -1,
1924 .addr_write = -1,
1925 .addr_code = -1,
1926 .addend = -1,
1927};
1928
Peter Maydell771124e2012-01-17 13:23:13 +00001929/* NOTE:
1930 * If flush_global is true (the usual case), flush all tlb entries.
1931 * If flush_global is false, flush (at least) all tlb entries not
1932 * marked global.
1933 *
1934 * Since QEMU doesn't currently implement a global/not-global flag
1935 * for tlb entries, at the moment tlb_flush() will also flush all
1936 * tlb entries in the flush_global == false case. This is OK because
1937 * CPU architectures generally permit an implementation to drop
1938 * entries from the TLB at any time, so flushing more entries than
1939 * required is only an efficiency issue, not a correctness issue.
1940 */
bellardee8b7022004-02-03 23:35:10 +00001941void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001942{
bellard33417e72003-08-10 21:47:01 +00001943 int i;
bellard01243112004-01-04 15:48:17 +00001944
bellard9fa3e852004-01-04 18:06:42 +00001945#if defined(DEBUG_TLB)
1946 printf("tlb_flush:\n");
1947#endif
bellard01243112004-01-04 15:48:17 +00001948 /* must reset current TB so that interrupts cannot modify the
1949 links while we are modifying them */
1950 env->current_tb = NULL;
1951
bellard33417e72003-08-10 21:47:01 +00001952 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001953 int mmu_idx;
1954 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001955 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001956 }
bellard33417e72003-08-10 21:47:01 +00001957 }
bellard9fa3e852004-01-04 18:06:42 +00001958
bellard8a40a182005-11-20 10:35:40 +00001959 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001960
Paul Brookd4c430a2010-03-17 02:14:28 +00001961 env->tlb_flush_addr = -1;
1962 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001963 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001964}
1965
bellard274da6b2004-05-20 21:56:27 +00001966static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001967{
ths5fafdf22007-09-16 21:08:06 +00001968 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001969 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001970 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001971 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001972 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001973 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001974 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001975 }
bellard61382a52003-10-27 21:22:23 +00001976}
1977
bellard2e126692004-04-25 21:28:44 +00001978void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001979{
bellard8a40a182005-11-20 10:35:40 +00001980 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001981 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001982
bellard9fa3e852004-01-04 18:06:42 +00001983#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001984 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001985#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001986 /* Check if we need to flush due to large pages. */
1987 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1988#if defined(DEBUG_TLB)
1989 printf("tlb_flush_page: forced full flush ("
1990 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1991 env->tlb_flush_addr, env->tlb_flush_mask);
1992#endif
1993 tlb_flush(env, 1);
1994 return;
1995 }
bellard01243112004-01-04 15:48:17 +00001996 /* must reset current TB so that interrupts cannot modify the
1997 links while we are modifying them */
1998 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001999
bellard61382a52003-10-27 21:22:23 +00002000 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002001 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002002 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2003 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002004
edgar_igl5c751e92008-05-06 08:44:21 +00002005 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002006}
2007
bellard9fa3e852004-01-04 18:06:42 +00002008/* update the TLBs so that writes to code in the virtual page 'addr'
2009 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002010static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002011{
ths5fafdf22007-09-16 21:08:06 +00002012 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002013 ram_addr + TARGET_PAGE_SIZE,
2014 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002015}
2016
bellard9fa3e852004-01-04 18:06:42 +00002017/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002018 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002019static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002020 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002021{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002022 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002023}
2024
ths5fafdf22007-09-16 21:08:06 +00002025static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002026 unsigned long start, unsigned long length)
2027{
2028 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002029 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00002030 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002031 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002032 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002033 }
2034 }
2035}
2036
pbrook5579c7f2009-04-11 14:47:08 +00002037/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002038void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002039 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002040{
2041 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002042 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002043 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002044
2045 start &= TARGET_PAGE_MASK;
2046 end = TARGET_PAGE_ALIGN(end);
2047
2048 length = end - start;
2049 if (length == 0)
2050 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002051 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002052
bellard1ccde1c2004-02-06 19:46:14 +00002053 /* we modify the TLB cache so that the dirty bit will be set again
2054 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002055 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002056 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002057 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002058 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002059 != (end - 1) - start) {
2060 abort();
2061 }
2062
bellard6a00d602005-11-21 23:25:50 +00002063 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002064 int mmu_idx;
2065 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2066 for(i = 0; i < CPU_TLB_SIZE; i++)
2067 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2068 start1, length);
2069 }
bellard6a00d602005-11-21 23:25:50 +00002070 }
bellard1ccde1c2004-02-06 19:46:14 +00002071}
2072
aliguori74576192008-10-06 14:02:03 +00002073int cpu_physical_memory_set_dirty_tracking(int enable)
2074{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002075 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002076 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002077 return ret;
aliguori74576192008-10-06 14:02:03 +00002078}
2079
bellard3a7d9292005-08-21 09:26:42 +00002080static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2081{
Anthony Liguoric227f092009-10-01 16:12:16 -05002082 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002083 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002084
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002085 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002086 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2087 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002088 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002089 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002090 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002091 }
2092 }
2093}
2094
2095/* update the TLB according to the current state of the dirty bits */
2096void cpu_tlb_update_dirty(CPUState *env)
2097{
2098 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002099 int mmu_idx;
2100 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2101 for(i = 0; i < CPU_TLB_SIZE; i++)
2102 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2103 }
bellard3a7d9292005-08-21 09:26:42 +00002104}
2105
pbrook0f459d12008-06-09 00:20:13 +00002106static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002107{
pbrook0f459d12008-06-09 00:20:13 +00002108 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2109 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002110}
2111
pbrook0f459d12008-06-09 00:20:13 +00002112/* update the TLB corresponding to virtual page vaddr
2113 so that it is no longer dirty */
2114static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002115{
bellard1ccde1c2004-02-06 19:46:14 +00002116 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002117 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002118
pbrook0f459d12008-06-09 00:20:13 +00002119 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002120 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002121 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2122 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002123}
2124
Paul Brookd4c430a2010-03-17 02:14:28 +00002125/* Our TLB does not support large pages, so remember the area covered by
2126 large pages and trigger a full TLB flush if these are invalidated. */
2127static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2128 target_ulong size)
2129{
2130 target_ulong mask = ~(size - 1);
2131
2132 if (env->tlb_flush_addr == (target_ulong)-1) {
2133 env->tlb_flush_addr = vaddr & mask;
2134 env->tlb_flush_mask = mask;
2135 return;
2136 }
2137 /* Extend the existing region to include the new page.
2138 This is a compromise between unnecessary flushes and the cost
2139 of maintaining a full variable size TLB. */
2140 mask &= env->tlb_flush_mask;
2141 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2142 mask <<= 1;
2143 }
2144 env->tlb_flush_addr &= mask;
2145 env->tlb_flush_mask = mask;
2146}
2147
Avi Kivity06ef3522012-02-13 16:11:22 +02002148static bool is_ram_rom(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002149{
Avi Kivity06ef3522012-02-13 16:11:22 +02002150 return memory_region_is_ram(s->mr);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002151}
2152
Avi Kivity06ef3522012-02-13 16:11:22 +02002153static bool is_romd(MemoryRegionSection *s)
Avi Kivity75c578d2012-01-02 15:40:52 +02002154{
Avi Kivity06ef3522012-02-13 16:11:22 +02002155 MemoryRegion *mr = s->mr;
Avi Kivity75c578d2012-01-02 15:40:52 +02002156
Avi Kivity75c578d2012-01-02 15:40:52 +02002157 return mr->rom_device && mr->readable;
2158}
2159
Avi Kivity06ef3522012-02-13 16:11:22 +02002160static bool is_ram_rom_romd(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002161{
Avi Kivity06ef3522012-02-13 16:11:22 +02002162 return is_ram_rom(s) || is_romd(s);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002163}
2164
Paul Brookd4c430a2010-03-17 02:14:28 +00002165/* Add a new TLB entry. At most one entry for a given virtual address
2166 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2167 supplied size is only used by tlb_flush_page. */
2168void tlb_set_page(CPUState *env, target_ulong vaddr,
2169 target_phys_addr_t paddr, int prot,
2170 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002171{
Avi Kivity06ef3522012-02-13 16:11:22 +02002172 MemoryRegionSection section;
bellard9fa3e852004-01-04 18:06:42 +00002173 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002174 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002175 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002176 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002177 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002178 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002179 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002180
Paul Brookd4c430a2010-03-17 02:14:28 +00002181 assert(size >= TARGET_PAGE_SIZE);
2182 if (size != TARGET_PAGE_SIZE) {
2183 tlb_add_large_page(env, vaddr, size);
2184 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002185 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002186#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002187 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2188 " prot=%x idx=%d pd=0x%08lx\n",
2189 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002190#endif
2191
pbrook0f459d12008-06-09 00:20:13 +00002192 address = vaddr;
Avi Kivity06ef3522012-02-13 16:11:22 +02002193 if (!is_ram_rom_romd(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002194 /* IO memory case (romd handled later) */
2195 address |= TLB_MMIO;
2196 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002197 if (is_ram_rom_romd(&section)) {
2198 addend = (unsigned long)(memory_region_get_ram_ptr(section.mr)
2199 + section.offset_within_region);
2200 } else {
2201 addend = 0;
2202 }
2203 if (is_ram_rom(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002204 /* Normal RAM. */
Avi Kivity06ef3522012-02-13 16:11:22 +02002205 iotlb = (memory_region_get_ram_addr(section.mr)
2206 + section.offset_within_region) & TARGET_PAGE_MASK;
2207 if (!section.readonly)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002208 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002209 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002210 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002211 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002212 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002213 It would be nice to pass an offset from the base address
2214 of that region. This would avoid having to special case RAM,
2215 and avoid full address decoding in every device.
2216 We can't use the high bits of pd for this because
2217 IO_MEM_ROMD uses these as a ram address. */
Avi Kivity06ef3522012-02-13 16:11:22 +02002218 iotlb = memory_region_get_ram_addr(section.mr) & ~TARGET_PAGE_MASK;
2219 iotlb += section.offset_within_region;
pbrook0f459d12008-06-09 00:20:13 +00002220 }
pbrook6658ffb2007-03-16 23:58:11 +00002221
pbrook0f459d12008-06-09 00:20:13 +00002222 code_address = address;
2223 /* Make accesses to pages with watchpoints go via the
2224 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002225 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002226 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002227 /* Avoid trapping reads of pages with a write breakpoint. */
2228 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivity1ec9b902012-01-02 12:47:48 +02002229 iotlb = io_mem_watch.ram_addr + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002230 address |= TLB_MMIO;
2231 break;
2232 }
pbrook6658ffb2007-03-16 23:58:11 +00002233 }
pbrook0f459d12008-06-09 00:20:13 +00002234 }
balrogd79acba2007-06-26 20:01:13 +00002235
pbrook0f459d12008-06-09 00:20:13 +00002236 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2237 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2238 te = &env->tlb_table[mmu_idx][index];
2239 te->addend = addend - vaddr;
2240 if (prot & PAGE_READ) {
2241 te->addr_read = address;
2242 } else {
2243 te->addr_read = -1;
2244 }
edgar_igl5c751e92008-05-06 08:44:21 +00002245
pbrook0f459d12008-06-09 00:20:13 +00002246 if (prot & PAGE_EXEC) {
2247 te->addr_code = code_address;
2248 } else {
2249 te->addr_code = -1;
2250 }
2251 if (prot & PAGE_WRITE) {
Avi Kivity06ef3522012-02-13 16:11:22 +02002252 if ((memory_region_is_ram(section.mr) && section.readonly)
2253 || is_romd(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002254 /* Write access calls the I/O callback. */
2255 te->addr_write = address | TLB_MMIO;
Avi Kivity06ef3522012-02-13 16:11:22 +02002256 } else if (memory_region_is_ram(section.mr)
2257 && !cpu_physical_memory_is_dirty(
2258 section.mr->ram_addr
2259 + section.offset_within_region)) {
pbrook0f459d12008-06-09 00:20:13 +00002260 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002261 } else {
pbrook0f459d12008-06-09 00:20:13 +00002262 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002263 }
pbrook0f459d12008-06-09 00:20:13 +00002264 } else {
2265 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002266 }
bellard9fa3e852004-01-04 18:06:42 +00002267}
2268
bellard01243112004-01-04 15:48:17 +00002269#else
2270
bellardee8b7022004-02-03 23:35:10 +00002271void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002272{
2273}
2274
bellard2e126692004-04-25 21:28:44 +00002275void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002276{
2277}
2278
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002279/*
2280 * Walks guest process memory "regions" one by one
2281 * and calls callback function 'fn' for each region.
2282 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002283
2284struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002285{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002286 walk_memory_regions_fn fn;
2287 void *priv;
2288 unsigned long start;
2289 int prot;
2290};
bellard9fa3e852004-01-04 18:06:42 +00002291
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002292static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002293 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002294{
2295 if (data->start != -1ul) {
2296 int rc = data->fn(data->priv, data->start, end, data->prot);
2297 if (rc != 0) {
2298 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002299 }
bellard33417e72003-08-10 21:47:01 +00002300 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002301
2302 data->start = (new_prot ? end : -1ul);
2303 data->prot = new_prot;
2304
2305 return 0;
2306}
2307
2308static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002309 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002310{
Paul Brookb480d9b2010-03-12 23:23:29 +00002311 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002312 int i, rc;
2313
2314 if (*lp == NULL) {
2315 return walk_memory_regions_end(data, base, 0);
2316 }
2317
2318 if (level == 0) {
2319 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002320 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002321 int prot = pd[i].flags;
2322
2323 pa = base | (i << TARGET_PAGE_BITS);
2324 if (prot != data->prot) {
2325 rc = walk_memory_regions_end(data, pa, prot);
2326 if (rc != 0) {
2327 return rc;
2328 }
2329 }
2330 }
2331 } else {
2332 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002333 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002334 pa = base | ((abi_ulong)i <<
2335 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002336 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2337 if (rc != 0) {
2338 return rc;
2339 }
2340 }
2341 }
2342
2343 return 0;
2344}
2345
2346int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2347{
2348 struct walk_memory_regions_data data;
2349 unsigned long i;
2350
2351 data.fn = fn;
2352 data.priv = priv;
2353 data.start = -1ul;
2354 data.prot = 0;
2355
2356 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002357 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002358 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2359 if (rc != 0) {
2360 return rc;
2361 }
2362 }
2363
2364 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002365}
2366
Paul Brookb480d9b2010-03-12 23:23:29 +00002367static int dump_region(void *priv, abi_ulong start,
2368 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002369{
2370 FILE *f = (FILE *)priv;
2371
Paul Brookb480d9b2010-03-12 23:23:29 +00002372 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2373 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002374 start, end, end - start,
2375 ((prot & PAGE_READ) ? 'r' : '-'),
2376 ((prot & PAGE_WRITE) ? 'w' : '-'),
2377 ((prot & PAGE_EXEC) ? 'x' : '-'));
2378
2379 return (0);
2380}
2381
2382/* dump memory mappings */
2383void page_dump(FILE *f)
2384{
2385 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2386 "start", "end", "size", "prot");
2387 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002388}
2389
pbrook53a59602006-03-25 19:31:22 +00002390int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002391{
bellard9fa3e852004-01-04 18:06:42 +00002392 PageDesc *p;
2393
2394 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002395 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002396 return 0;
2397 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002398}
2399
Richard Henderson376a7902010-03-10 15:57:04 -08002400/* Modify the flags of a page and invalidate the code if necessary.
2401 The flag PAGE_WRITE_ORG is positioned automatically depending
2402 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002403void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002404{
Richard Henderson376a7902010-03-10 15:57:04 -08002405 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002406
Richard Henderson376a7902010-03-10 15:57:04 -08002407 /* This function should never be called with addresses outside the
2408 guest address space. If this assert fires, it probably indicates
2409 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002410#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2411 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002412#endif
2413 assert(start < end);
2414
bellard9fa3e852004-01-04 18:06:42 +00002415 start = start & TARGET_PAGE_MASK;
2416 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002417
2418 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002419 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002420 }
2421
2422 for (addr = start, len = end - start;
2423 len != 0;
2424 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2425 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2426
2427 /* If the write protection bit is set, then we invalidate
2428 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002429 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002430 (flags & PAGE_WRITE) &&
2431 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002432 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002433 }
2434 p->flags = flags;
2435 }
bellard9fa3e852004-01-04 18:06:42 +00002436}
2437
ths3d97b402007-11-02 19:02:07 +00002438int page_check_range(target_ulong start, target_ulong len, int flags)
2439{
2440 PageDesc *p;
2441 target_ulong end;
2442 target_ulong addr;
2443
Richard Henderson376a7902010-03-10 15:57:04 -08002444 /* This function should never be called with addresses outside the
2445 guest address space. If this assert fires, it probably indicates
2446 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002447#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2448 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002449#endif
2450
Richard Henderson3e0650a2010-03-29 10:54:42 -07002451 if (len == 0) {
2452 return 0;
2453 }
Richard Henderson376a7902010-03-10 15:57:04 -08002454 if (start + len - 1 < start) {
2455 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002456 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002457 }
balrog55f280c2008-10-28 10:24:11 +00002458
ths3d97b402007-11-02 19:02:07 +00002459 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2460 start = start & TARGET_PAGE_MASK;
2461
Richard Henderson376a7902010-03-10 15:57:04 -08002462 for (addr = start, len = end - start;
2463 len != 0;
2464 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002465 p = page_find(addr >> TARGET_PAGE_BITS);
2466 if( !p )
2467 return -1;
2468 if( !(p->flags & PAGE_VALID) )
2469 return -1;
2470
bellarddae32702007-11-14 10:51:00 +00002471 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002472 return -1;
bellarddae32702007-11-14 10:51:00 +00002473 if (flags & PAGE_WRITE) {
2474 if (!(p->flags & PAGE_WRITE_ORG))
2475 return -1;
2476 /* unprotect the page if it was put read-only because it
2477 contains translated code */
2478 if (!(p->flags & PAGE_WRITE)) {
2479 if (!page_unprotect(addr, 0, NULL))
2480 return -1;
2481 }
2482 return 0;
2483 }
ths3d97b402007-11-02 19:02:07 +00002484 }
2485 return 0;
2486}
2487
bellard9fa3e852004-01-04 18:06:42 +00002488/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002489 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002490int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002491{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002492 unsigned int prot;
2493 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002494 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002495
pbrookc8a706f2008-06-02 16:16:42 +00002496 /* Technically this isn't safe inside a signal handler. However we
2497 know this only ever happens in a synchronous SEGV handler, so in
2498 practice it seems to be ok. */
2499 mmap_lock();
2500
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002501 p = page_find(address >> TARGET_PAGE_BITS);
2502 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002503 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002504 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002505 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002506
bellard9fa3e852004-01-04 18:06:42 +00002507 /* if the page was really writable, then we change its
2508 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002509 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2510 host_start = address & qemu_host_page_mask;
2511 host_end = host_start + qemu_host_page_size;
2512
2513 prot = 0;
2514 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2515 p = page_find(addr >> TARGET_PAGE_BITS);
2516 p->flags |= PAGE_WRITE;
2517 prot |= p->flags;
2518
bellard9fa3e852004-01-04 18:06:42 +00002519 /* and since the content will be modified, we must invalidate
2520 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002521 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002522#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002523 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002524#endif
bellard9fa3e852004-01-04 18:06:42 +00002525 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002526 mprotect((void *)g2h(host_start), qemu_host_page_size,
2527 prot & PAGE_BITS);
2528
2529 mmap_unlock();
2530 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002531 }
pbrookc8a706f2008-06-02 16:16:42 +00002532 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002533 return 0;
2534}
2535
bellard6a00d602005-11-21 23:25:50 +00002536static inline void tlb_set_dirty(CPUState *env,
2537 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002538{
2539}
bellard9fa3e852004-01-04 18:06:42 +00002540#endif /* defined(CONFIG_USER_ONLY) */
2541
pbrooke2eef172008-06-08 01:09:01 +00002542#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002543
Paul Brookc04b2b72010-03-01 03:31:14 +00002544#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2545typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002546 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002547 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002548 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002549} subpage_t;
2550
Anthony Liguoric227f092009-10-01 16:12:16 -05002551static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002552 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002553static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002554static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002555{
Avi Kivity5312bd82012-02-12 18:32:55 +02002556 MemoryRegionSection *section = &phys_sections[section_index];
2557 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002558
2559 if (mr->subpage) {
2560 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2561 memory_region_destroy(&subpage->iomem);
2562 g_free(subpage);
2563 }
2564}
2565
Avi Kivity4346ae32012-02-10 17:00:01 +02002566static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002567{
2568 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002569 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002570
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002571 if (lp->u.node == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002572 return;
2573 }
2574
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002575 p = phys_map_nodes[lp->u.node];
Avi Kivity4346ae32012-02-10 17:00:01 +02002576 for (i = 0; i < L2_SIZE; ++i) {
2577 if (level > 0) {
Avi Kivity54688b12012-02-09 17:34:32 +02002578 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002579 } else {
2580 destroy_page_desc(p[i].u.leaf);
Avi Kivity54688b12012-02-09 17:34:32 +02002581 }
Avi Kivity54688b12012-02-09 17:34:32 +02002582 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002583 lp->u.node = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002584}
2585
2586static void destroy_all_mappings(void)
2587{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002588 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002589 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002590}
2591
Avi Kivity5312bd82012-02-12 18:32:55 +02002592static uint16_t phys_section_add(MemoryRegionSection *section)
2593{
2594 if (phys_sections_nb == phys_sections_nb_alloc) {
2595 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2596 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2597 phys_sections_nb_alloc);
2598 }
2599 phys_sections[phys_sections_nb] = *section;
2600 return phys_sections_nb++;
2601}
2602
2603static void phys_sections_clear(void)
2604{
2605 phys_sections_nb = 0;
2606}
2607
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002608/* register physical memory.
2609 For RAM, 'size' must be a multiple of the target page size.
2610 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002611 io memory page. The address used when calling the IO function is
2612 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002613 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002614 before calculating this offset. This should not be a problem unless
2615 the low bits of start_addr and region_offset differ. */
Avi Kivity0f0cb162012-02-13 17:14:32 +02002616static void register_subpage(MemoryRegionSection *section)
2617{
2618 subpage_t *subpage;
2619 target_phys_addr_t base = section->offset_within_address_space
2620 & TARGET_PAGE_MASK;
2621 MemoryRegionSection existing = phys_page_find(base >> TARGET_PAGE_BITS);
2622 MemoryRegionSection subsection = {
2623 .offset_within_address_space = base,
2624 .size = TARGET_PAGE_SIZE,
2625 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002626 target_phys_addr_t start, end;
2627
2628 assert(existing.mr->subpage || existing.mr == &io_mem_unassigned);
2629
2630 if (!(existing.mr->subpage)) {
2631 subpage = subpage_init(base);
2632 subsection.mr = &subpage->iomem;
Avi Kivitya3918432012-02-13 17:19:30 +02002633 phys_page_set(base >> TARGET_PAGE_BITS, phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002634 } else {
2635 subpage = container_of(existing.mr, subpage_t, iomem);
2636 }
2637 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2638 end = start + section->size;
2639 subpage_register(subpage, start, end, phys_section_add(section));
2640}
2641
2642
2643static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002644{
Avi Kivitydd811242012-01-02 12:17:03 +02002645 target_phys_addr_t start_addr = section->offset_within_address_space;
2646 ram_addr_t size = section->size;
Anthony Liguoric227f092009-10-01 16:12:16 -05002647 target_phys_addr_t addr, end_addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002648 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002649
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002650 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002651
Anthony Liguoric227f092009-10-01 16:12:16 -05002652 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002653
2654 addr = start_addr;
2655 do {
Avi Kivitya3918432012-02-13 17:19:30 +02002656 phys_page_set(addr >> TARGET_PAGE_BITS, section_index);
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002657 addr += TARGET_PAGE_SIZE;
2658 } while (addr != end_addr);
bellard33417e72003-08-10 21:47:01 +00002659}
2660
Avi Kivity0f0cb162012-02-13 17:14:32 +02002661void cpu_register_physical_memory_log(MemoryRegionSection *section,
2662 bool readonly)
2663{
2664 MemoryRegionSection now = *section, remain = *section;
2665
2666 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2667 || (now.size < TARGET_PAGE_SIZE)) {
2668 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2669 - now.offset_within_address_space,
2670 now.size);
2671 register_subpage(&now);
2672 remain.size -= now.size;
2673 remain.offset_within_address_space += now.size;
2674 remain.offset_within_region += now.size;
2675 }
2676 now = remain;
2677 now.size &= TARGET_PAGE_MASK;
2678 if (now.size) {
2679 register_multipage(&now);
2680 remain.size -= now.size;
2681 remain.offset_within_address_space += now.size;
2682 remain.offset_within_region += now.size;
2683 }
2684 now = remain;
2685 if (now.size) {
2686 register_subpage(&now);
2687 }
2688}
2689
2690
Anthony Liguoric227f092009-10-01 16:12:16 -05002691void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002692{
2693 if (kvm_enabled())
2694 kvm_coalesce_mmio_region(addr, size);
2695}
2696
Anthony Liguoric227f092009-10-01 16:12:16 -05002697void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002698{
2699 if (kvm_enabled())
2700 kvm_uncoalesce_mmio_region(addr, size);
2701}
2702
Sheng Yang62a27442010-01-26 19:21:16 +08002703void qemu_flush_coalesced_mmio_buffer(void)
2704{
2705 if (kvm_enabled())
2706 kvm_flush_coalesced_mmio_buffer();
2707}
2708
Marcelo Tosattic9027602010-03-01 20:25:08 -03002709#if defined(__linux__) && !defined(TARGET_S390X)
2710
2711#include <sys/vfs.h>
2712
2713#define HUGETLBFS_MAGIC 0x958458f6
2714
2715static long gethugepagesize(const char *path)
2716{
2717 struct statfs fs;
2718 int ret;
2719
2720 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002721 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002722 } while (ret != 0 && errno == EINTR);
2723
2724 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002725 perror(path);
2726 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002727 }
2728
2729 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002730 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002731
2732 return fs.f_bsize;
2733}
2734
Alex Williamson04b16652010-07-02 11:13:17 -06002735static void *file_ram_alloc(RAMBlock *block,
2736 ram_addr_t memory,
2737 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002738{
2739 char *filename;
2740 void *area;
2741 int fd;
2742#ifdef MAP_POPULATE
2743 int flags;
2744#endif
2745 unsigned long hpagesize;
2746
2747 hpagesize = gethugepagesize(path);
2748 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002749 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002750 }
2751
2752 if (memory < hpagesize) {
2753 return NULL;
2754 }
2755
2756 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2757 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2758 return NULL;
2759 }
2760
2761 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002762 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002763 }
2764
2765 fd = mkstemp(filename);
2766 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002767 perror("unable to create backing store for hugepages");
2768 free(filename);
2769 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002770 }
2771 unlink(filename);
2772 free(filename);
2773
2774 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2775
2776 /*
2777 * ftruncate is not supported by hugetlbfs in older
2778 * hosts, so don't bother bailing out on errors.
2779 * If anything goes wrong with it under other filesystems,
2780 * mmap will fail.
2781 */
2782 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002783 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002784
2785#ifdef MAP_POPULATE
2786 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2787 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2788 * to sidestep this quirk.
2789 */
2790 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2791 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2792#else
2793 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2794#endif
2795 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002796 perror("file_ram_alloc: can't mmap RAM pages");
2797 close(fd);
2798 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002799 }
Alex Williamson04b16652010-07-02 11:13:17 -06002800 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002801 return area;
2802}
2803#endif
2804
Alex Williamsond17b5282010-06-25 11:08:38 -06002805static ram_addr_t find_ram_offset(ram_addr_t size)
2806{
Alex Williamson04b16652010-07-02 11:13:17 -06002807 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002808 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002809
2810 if (QLIST_EMPTY(&ram_list.blocks))
2811 return 0;
2812
2813 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002814 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002815
2816 end = block->offset + block->length;
2817
2818 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2819 if (next_block->offset >= end) {
2820 next = MIN(next, next_block->offset);
2821 }
2822 }
2823 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002824 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002825 mingap = next - end;
2826 }
2827 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002828
2829 if (offset == RAM_ADDR_MAX) {
2830 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2831 (uint64_t)size);
2832 abort();
2833 }
2834
Alex Williamson04b16652010-07-02 11:13:17 -06002835 return offset;
2836}
2837
2838static ram_addr_t last_ram_offset(void)
2839{
Alex Williamsond17b5282010-06-25 11:08:38 -06002840 RAMBlock *block;
2841 ram_addr_t last = 0;
2842
2843 QLIST_FOREACH(block, &ram_list.blocks, next)
2844 last = MAX(last, block->offset + block->length);
2845
2846 return last;
2847}
2848
Avi Kivityc5705a72011-12-20 15:59:12 +02002849void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002850{
2851 RAMBlock *new_block, *block;
2852
Avi Kivityc5705a72011-12-20 15:59:12 +02002853 new_block = NULL;
2854 QLIST_FOREACH(block, &ram_list.blocks, next) {
2855 if (block->offset == addr) {
2856 new_block = block;
2857 break;
2858 }
2859 }
2860 assert(new_block);
2861 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002862
2863 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2864 char *id = dev->parent_bus->info->get_dev_path(dev);
2865 if (id) {
2866 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002867 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002868 }
2869 }
2870 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2871
2872 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002873 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002874 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2875 new_block->idstr);
2876 abort();
2877 }
2878 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002879}
2880
2881ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2882 MemoryRegion *mr)
2883{
2884 RAMBlock *new_block;
2885
2886 size = TARGET_PAGE_ALIGN(size);
2887 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002888
Avi Kivity7c637362011-12-21 13:09:49 +02002889 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002890 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002891 if (host) {
2892 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002893 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002894 } else {
2895 if (mem_path) {
2896#if defined (__linux__) && !defined(TARGET_S390X)
2897 new_block->host = file_ram_alloc(new_block, size, mem_path);
2898 if (!new_block->host) {
2899 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002900 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002901 }
2902#else
2903 fprintf(stderr, "-mem-path option unsupported\n");
2904 exit(1);
2905#endif
2906 } else {
2907#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002908 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2909 an system defined value, which is at least 256GB. Larger systems
2910 have larger values. We put the guest between the end of data
2911 segment (system break) and this value. We use 32GB as a base to
2912 have enough room for the system break to grow. */
2913 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002914 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002915 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002916 if (new_block->host == MAP_FAILED) {
2917 fprintf(stderr, "Allocating RAM failed\n");
2918 abort();
2919 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002920#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002921 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002922 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002923 } else {
2924 new_block->host = qemu_vmalloc(size);
2925 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002926#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002927 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002928 }
2929 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002930 new_block->length = size;
2931
2932 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2933
Anthony Liguori7267c092011-08-20 22:09:37 -05002934 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002935 last_ram_offset() >> TARGET_PAGE_BITS);
2936 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2937 0xff, size >> TARGET_PAGE_BITS);
2938
2939 if (kvm_enabled())
2940 kvm_setup_guest_memory(new_block->host, size);
2941
2942 return new_block->offset;
2943}
2944
Avi Kivityc5705a72011-12-20 15:59:12 +02002945ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002946{
Avi Kivityc5705a72011-12-20 15:59:12 +02002947 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002948}
bellarde9a1ab12007-02-08 23:08:38 +00002949
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002950void qemu_ram_free_from_ptr(ram_addr_t addr)
2951{
2952 RAMBlock *block;
2953
2954 QLIST_FOREACH(block, &ram_list.blocks, next) {
2955 if (addr == block->offset) {
2956 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002957 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002958 return;
2959 }
2960 }
2961}
2962
Anthony Liguoric227f092009-10-01 16:12:16 -05002963void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002964{
Alex Williamson04b16652010-07-02 11:13:17 -06002965 RAMBlock *block;
2966
2967 QLIST_FOREACH(block, &ram_list.blocks, next) {
2968 if (addr == block->offset) {
2969 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002970 if (block->flags & RAM_PREALLOC_MASK) {
2971 ;
2972 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002973#if defined (__linux__) && !defined(TARGET_S390X)
2974 if (block->fd) {
2975 munmap(block->host, block->length);
2976 close(block->fd);
2977 } else {
2978 qemu_vfree(block->host);
2979 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002980#else
2981 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002982#endif
2983 } else {
2984#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2985 munmap(block->host, block->length);
2986#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002987 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002988 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002989 } else {
2990 qemu_vfree(block->host);
2991 }
Alex Williamson04b16652010-07-02 11:13:17 -06002992#endif
2993 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002994 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002995 return;
2996 }
2997 }
2998
bellarde9a1ab12007-02-08 23:08:38 +00002999}
3000
Huang Yingcd19cfa2011-03-02 08:56:19 +01003001#ifndef _WIN32
3002void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3003{
3004 RAMBlock *block;
3005 ram_addr_t offset;
3006 int flags;
3007 void *area, *vaddr;
3008
3009 QLIST_FOREACH(block, &ram_list.blocks, next) {
3010 offset = addr - block->offset;
3011 if (offset < block->length) {
3012 vaddr = block->host + offset;
3013 if (block->flags & RAM_PREALLOC_MASK) {
3014 ;
3015 } else {
3016 flags = MAP_FIXED;
3017 munmap(vaddr, length);
3018 if (mem_path) {
3019#if defined(__linux__) && !defined(TARGET_S390X)
3020 if (block->fd) {
3021#ifdef MAP_POPULATE
3022 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3023 MAP_PRIVATE;
3024#else
3025 flags |= MAP_PRIVATE;
3026#endif
3027 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3028 flags, block->fd, offset);
3029 } else {
3030 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3031 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3032 flags, -1, 0);
3033 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003034#else
3035 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003036#endif
3037 } else {
3038#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3039 flags |= MAP_SHARED | MAP_ANONYMOUS;
3040 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3041 flags, -1, 0);
3042#else
3043 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3044 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3045 flags, -1, 0);
3046#endif
3047 }
3048 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003049 fprintf(stderr, "Could not remap addr: "
3050 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003051 length, addr);
3052 exit(1);
3053 }
3054 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3055 }
3056 return;
3057 }
3058 }
3059}
3060#endif /* !_WIN32 */
3061
pbrookdc828ca2009-04-09 22:21:07 +00003062/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003063 With the exception of the softmmu code in this file, this should
3064 only be used for local memory (e.g. video ram) that the device owns,
3065 and knows it isn't going to access beyond the end of the block.
3066
3067 It should not be used for general purpose DMA.
3068 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3069 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003070void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003071{
pbrook94a6b542009-04-11 17:15:54 +00003072 RAMBlock *block;
3073
Alex Williamsonf471a172010-06-11 11:11:42 -06003074 QLIST_FOREACH(block, &ram_list.blocks, next) {
3075 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003076 /* Move this entry to to start of the list. */
3077 if (block != QLIST_FIRST(&ram_list.blocks)) {
3078 QLIST_REMOVE(block, next);
3079 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3080 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003081 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003082 /* We need to check if the requested address is in the RAM
3083 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003084 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003085 */
3086 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003087 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003088 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003089 block->host =
3090 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003091 }
3092 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003093 return block->host + (addr - block->offset);
3094 }
pbrook94a6b542009-04-11 17:15:54 +00003095 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003096
3097 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3098 abort();
3099
3100 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003101}
3102
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003103/* Return a host pointer to ram allocated with qemu_ram_alloc.
3104 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3105 */
3106void *qemu_safe_ram_ptr(ram_addr_t addr)
3107{
3108 RAMBlock *block;
3109
3110 QLIST_FOREACH(block, &ram_list.blocks, next) {
3111 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003112 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003113 /* We need to check if the requested address is in the RAM
3114 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003115 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003116 */
3117 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003118 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003119 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003120 block->host =
3121 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003122 }
3123 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003124 return block->host + (addr - block->offset);
3125 }
3126 }
3127
3128 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3129 abort();
3130
3131 return NULL;
3132}
3133
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003134/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3135 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003136void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003137{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003138 if (*size == 0) {
3139 return NULL;
3140 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003141 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003142 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003143 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003144 RAMBlock *block;
3145
3146 QLIST_FOREACH(block, &ram_list.blocks, next) {
3147 if (addr - block->offset < block->length) {
3148 if (addr - block->offset + *size > block->length)
3149 *size = block->length - addr + block->offset;
3150 return block->host + (addr - block->offset);
3151 }
3152 }
3153
3154 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3155 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003156 }
3157}
3158
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003159void qemu_put_ram_ptr(void *addr)
3160{
3161 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003162}
3163
Marcelo Tosattie8902612010-10-11 15:31:19 -03003164int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003165{
pbrook94a6b542009-04-11 17:15:54 +00003166 RAMBlock *block;
3167 uint8_t *host = ptr;
3168
Jan Kiszka868bb332011-06-21 22:59:09 +02003169 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003170 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003171 return 0;
3172 }
3173
Alex Williamsonf471a172010-06-11 11:11:42 -06003174 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003175 /* This case append when the block is not mapped. */
3176 if (block->host == NULL) {
3177 continue;
3178 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003179 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003180 *ram_addr = block->offset + (host - block->host);
3181 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003182 }
pbrook94a6b542009-04-11 17:15:54 +00003183 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003184
Marcelo Tosattie8902612010-10-11 15:31:19 -03003185 return -1;
3186}
Alex Williamsonf471a172010-06-11 11:11:42 -06003187
Marcelo Tosattie8902612010-10-11 15:31:19 -03003188/* Some of the softmmu routines need to translate from a host pointer
3189 (typically a TLB entry) back to a ram offset. */
3190ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3191{
3192 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003193
Marcelo Tosattie8902612010-10-11 15:31:19 -03003194 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3195 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3196 abort();
3197 }
3198 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003199}
3200
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003201static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3202 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003203{
pbrook67d3b952006-12-18 05:03:52 +00003204#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003205 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003206#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003207#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003208 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003209#endif
3210 return 0;
3211}
3212
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003213static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3214 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003215{
3216#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003217 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003218#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003219#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003220 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003221#endif
3222}
3223
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003224static const MemoryRegionOps unassigned_mem_ops = {
3225 .read = unassigned_mem_read,
3226 .write = unassigned_mem_write,
3227 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003228};
3229
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003230static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3231 unsigned size)
3232{
3233 abort();
3234}
3235
3236static void error_mem_write(void *opaque, target_phys_addr_t addr,
3237 uint64_t value, unsigned size)
3238{
3239 abort();
3240}
3241
3242static const MemoryRegionOps error_mem_ops = {
3243 .read = error_mem_read,
3244 .write = error_mem_write,
3245 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003246};
3247
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003248static const MemoryRegionOps rom_mem_ops = {
3249 .read = error_mem_read,
3250 .write = unassigned_mem_write,
3251 .endianness = DEVICE_NATIVE_ENDIAN,
3252};
3253
3254static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3255 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003256{
bellard3a7d9292005-08-21 09:26:42 +00003257 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003258 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003259 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3260#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003261 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003262 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003263#endif
3264 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003265 switch (size) {
3266 case 1:
3267 stb_p(qemu_get_ram_ptr(ram_addr), val);
3268 break;
3269 case 2:
3270 stw_p(qemu_get_ram_ptr(ram_addr), val);
3271 break;
3272 case 4:
3273 stl_p(qemu_get_ram_ptr(ram_addr), val);
3274 break;
3275 default:
3276 abort();
3277 }
bellardf23db162005-08-21 19:12:28 +00003278 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003279 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003280 /* we remove the notdirty callback only if the code has been
3281 flushed */
3282 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003283 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003284}
3285
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003286static const MemoryRegionOps notdirty_mem_ops = {
3287 .read = error_mem_read,
3288 .write = notdirty_mem_write,
3289 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003290};
3291
pbrook0f459d12008-06-09 00:20:13 +00003292/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003293static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003294{
3295 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003296 target_ulong pc, cs_base;
3297 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003298 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003299 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003300 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003301
aliguori06d55cc2008-11-18 20:24:06 +00003302 if (env->watchpoint_hit) {
3303 /* We re-entered the check after replacing the TB. Now raise
3304 * the debug interrupt so that is will trigger after the
3305 * current instruction. */
3306 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3307 return;
3308 }
pbrook2e70f6e2008-06-29 01:03:05 +00003309 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003310 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003311 if ((vaddr == (wp->vaddr & len_mask) ||
3312 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003313 wp->flags |= BP_WATCHPOINT_HIT;
3314 if (!env->watchpoint_hit) {
3315 env->watchpoint_hit = wp;
3316 tb = tb_find_pc(env->mem_io_pc);
3317 if (!tb) {
3318 cpu_abort(env, "check_watchpoint: could not find TB for "
3319 "pc=%p", (void *)env->mem_io_pc);
3320 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003321 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003322 tb_phys_invalidate(tb, -1);
3323 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3324 env->exception_index = EXCP_DEBUG;
3325 } else {
3326 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3327 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3328 }
3329 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003330 }
aliguori6e140f22008-11-18 20:37:55 +00003331 } else {
3332 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003333 }
3334 }
3335}
3336
pbrook6658ffb2007-03-16 23:58:11 +00003337/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3338 so these check for a hit then pass through to the normal out-of-line
3339 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003340static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3341 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003342{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003343 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3344 switch (size) {
3345 case 1: return ldub_phys(addr);
3346 case 2: return lduw_phys(addr);
3347 case 4: return ldl_phys(addr);
3348 default: abort();
3349 }
pbrook6658ffb2007-03-16 23:58:11 +00003350}
3351
Avi Kivity1ec9b902012-01-02 12:47:48 +02003352static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3353 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003354{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003355 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3356 switch (size) {
3357 case 1: stb_phys(addr, val);
3358 case 2: stw_phys(addr, val);
3359 case 4: stl_phys(addr, val);
3360 default: abort();
3361 }
pbrook6658ffb2007-03-16 23:58:11 +00003362}
3363
Avi Kivity1ec9b902012-01-02 12:47:48 +02003364static const MemoryRegionOps watch_mem_ops = {
3365 .read = watch_mem_read,
3366 .write = watch_mem_write,
3367 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003368};
pbrook6658ffb2007-03-16 23:58:11 +00003369
Avi Kivity70c68e42012-01-02 12:32:48 +02003370static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3371 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003372{
Avi Kivity70c68e42012-01-02 12:32:48 +02003373 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003374 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003375 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003376#if defined(DEBUG_SUBPAGE)
3377 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3378 mmio, len, addr, idx);
3379#endif
blueswir1db7b5422007-05-26 17:36:03 +00003380
Avi Kivity5312bd82012-02-12 18:32:55 +02003381 section = &phys_sections[mmio->sub_section[idx]];
3382 addr += mmio->base;
3383 addr -= section->offset_within_address_space;
3384 addr += section->offset_within_region;
3385 return io_mem_read(section->mr->ram_addr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003386}
3387
Avi Kivity70c68e42012-01-02 12:32:48 +02003388static void subpage_write(void *opaque, target_phys_addr_t addr,
3389 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003390{
Avi Kivity70c68e42012-01-02 12:32:48 +02003391 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003392 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003393 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003394#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003395 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3396 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003397 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003398#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003399
Avi Kivity5312bd82012-02-12 18:32:55 +02003400 section = &phys_sections[mmio->sub_section[idx]];
3401 addr += mmio->base;
3402 addr -= section->offset_within_address_space;
3403 addr += section->offset_within_region;
3404 io_mem_write(section->mr->ram_addr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003405}
3406
Avi Kivity70c68e42012-01-02 12:32:48 +02003407static const MemoryRegionOps subpage_ops = {
3408 .read = subpage_read,
3409 .write = subpage_write,
3410 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003411};
3412
Avi Kivityde712f92012-01-02 12:41:07 +02003413static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3414 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003415{
3416 ram_addr_t raddr = addr;
3417 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003418 switch (size) {
3419 case 1: return ldub_p(ptr);
3420 case 2: return lduw_p(ptr);
3421 case 4: return ldl_p(ptr);
3422 default: abort();
3423 }
Andreas Färber56384e82011-11-30 16:26:21 +01003424}
3425
Avi Kivityde712f92012-01-02 12:41:07 +02003426static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3427 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003428{
3429 ram_addr_t raddr = addr;
3430 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003431 switch (size) {
3432 case 1: return stb_p(ptr, value);
3433 case 2: return stw_p(ptr, value);
3434 case 4: return stl_p(ptr, value);
3435 default: abort();
3436 }
Andreas Färber56384e82011-11-30 16:26:21 +01003437}
3438
Avi Kivityde712f92012-01-02 12:41:07 +02003439static const MemoryRegionOps subpage_ram_ops = {
3440 .read = subpage_ram_read,
3441 .write = subpage_ram_write,
3442 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003443};
3444
Anthony Liguoric227f092009-10-01 16:12:16 -05003445static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003446 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003447{
3448 int idx, eidx;
3449
3450 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3451 return -1;
3452 idx = SUBPAGE_IDX(start);
3453 eidx = SUBPAGE_IDX(end);
3454#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003455 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003456 mmio, start, end, idx, eidx, memory);
3457#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003458 if (memory_region_is_ram(phys_sections[section].mr)) {
3459 MemoryRegionSection new_section = phys_sections[section];
3460 new_section.mr = &io_mem_subpage_ram;
3461 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003462 }
blueswir1db7b5422007-05-26 17:36:03 +00003463 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003464 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003465 }
3466
3467 return 0;
3468}
3469
Avi Kivity0f0cb162012-02-13 17:14:32 +02003470static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003471{
Anthony Liguoric227f092009-10-01 16:12:16 -05003472 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003473
Anthony Liguori7267c092011-08-20 22:09:37 -05003474 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003475
3476 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003477 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3478 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003479 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003480#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003481 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3482 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003483#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003484 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003485
3486 return mmio;
3487}
3488
aliguori88715652009-02-11 15:20:58 +00003489static int get_free_io_mem_idx(void)
3490{
3491 int i;
3492
3493 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3494 if (!io_mem_used[i]) {
3495 io_mem_used[i] = 1;
3496 return i;
3497 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003498 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003499 return -1;
3500}
3501
bellard33417e72003-08-10 21:47:01 +00003502/* mem_read and mem_write are arrays of functions containing the
3503 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003504 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003505 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003506 modified. If it is zero, a new io zone is allocated. The return
3507 value can be used with cpu_register_physical_memory(). (-1) is
3508 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003509static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003510{
bellard33417e72003-08-10 21:47:01 +00003511 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003512 io_index = get_free_io_mem_idx();
3513 if (io_index == -1)
3514 return io_index;
bellard33417e72003-08-10 21:47:01 +00003515 } else {
3516 if (io_index >= IO_MEM_NB_ENTRIES)
3517 return -1;
3518 }
bellardb5ff1b32005-11-26 10:38:39 +00003519
Avi Kivitya621f382012-01-02 13:12:08 +02003520 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003521
Avi Kivity11c7ef02012-01-02 17:21:07 +02003522 return io_index;
bellard33417e72003-08-10 21:47:01 +00003523}
bellard61382a52003-10-27 21:22:23 +00003524
Avi Kivitya621f382012-01-02 13:12:08 +02003525int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003526{
Avi Kivitya621f382012-01-02 13:12:08 +02003527 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003528}
3529
Avi Kivity11c7ef02012-01-02 17:21:07 +02003530void cpu_unregister_io_memory(int io_index)
aliguori88715652009-02-11 15:20:58 +00003531{
Avi Kivitya621f382012-01-02 13:12:08 +02003532 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003533 io_mem_used[io_index] = 0;
3534}
3535
Avi Kivity5312bd82012-02-12 18:32:55 +02003536static uint16_t dummy_section(MemoryRegion *mr)
3537{
3538 MemoryRegionSection section = {
3539 .mr = mr,
3540 .offset_within_address_space = 0,
3541 .offset_within_region = 0,
3542 .size = UINT64_MAX,
3543 };
3544
3545 return phys_section_add(&section);
3546}
3547
Avi Kivitye9179ce2009-06-14 11:38:52 +03003548static void io_mem_init(void)
3549{
3550 int i;
3551
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003552 /* Must be first: */
3553 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3554 assert(io_mem_ram.ram_addr == 0);
3555 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3556 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3557 "unassigned", UINT64_MAX);
3558 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3559 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003560 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3561 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003562 for (i=0; i<5; i++)
3563 io_mem_used[i] = 1;
3564
Avi Kivity1ec9b902012-01-02 12:47:48 +02003565 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3566 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003567}
3568
Avi Kivity50c1e142012-02-08 21:36:02 +02003569static void core_begin(MemoryListener *listener)
3570{
Avi Kivity54688b12012-02-09 17:34:32 +02003571 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003572 phys_sections_clear();
Avi Kivityd6f2ea22012-02-12 20:12:49 +02003573 phys_map.u.node = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003574 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivity50c1e142012-02-08 21:36:02 +02003575}
3576
3577static void core_commit(MemoryListener *listener)
3578{
Avi Kivity117712c2012-02-12 21:23:17 +02003579 CPUState *env;
3580
3581 /* since each CPU stores ram addresses in its TLB cache, we must
3582 reset the modified entries */
3583 /* XXX: slow ! */
3584 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3585 tlb_flush(env, 1);
3586 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003587}
3588
Avi Kivity93632742012-02-08 16:54:16 +02003589static void core_region_add(MemoryListener *listener,
3590 MemoryRegionSection *section)
3591{
Avi Kivity4855d412012-02-08 21:16:05 +02003592 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003593}
3594
3595static void core_region_del(MemoryListener *listener,
3596 MemoryRegionSection *section)
3597{
Avi Kivity93632742012-02-08 16:54:16 +02003598}
3599
Avi Kivity50c1e142012-02-08 21:36:02 +02003600static void core_region_nop(MemoryListener *listener,
3601 MemoryRegionSection *section)
3602{
Avi Kivity54688b12012-02-09 17:34:32 +02003603 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003604}
3605
Avi Kivity93632742012-02-08 16:54:16 +02003606static void core_log_start(MemoryListener *listener,
3607 MemoryRegionSection *section)
3608{
3609}
3610
3611static void core_log_stop(MemoryListener *listener,
3612 MemoryRegionSection *section)
3613{
3614}
3615
3616static void core_log_sync(MemoryListener *listener,
3617 MemoryRegionSection *section)
3618{
3619}
3620
3621static void core_log_global_start(MemoryListener *listener)
3622{
3623 cpu_physical_memory_set_dirty_tracking(1);
3624}
3625
3626static void core_log_global_stop(MemoryListener *listener)
3627{
3628 cpu_physical_memory_set_dirty_tracking(0);
3629}
3630
3631static void core_eventfd_add(MemoryListener *listener,
3632 MemoryRegionSection *section,
3633 bool match_data, uint64_t data, int fd)
3634{
3635}
3636
3637static void core_eventfd_del(MemoryListener *listener,
3638 MemoryRegionSection *section,
3639 bool match_data, uint64_t data, int fd)
3640{
3641}
3642
Avi Kivity50c1e142012-02-08 21:36:02 +02003643static void io_begin(MemoryListener *listener)
3644{
3645}
3646
3647static void io_commit(MemoryListener *listener)
3648{
3649}
3650
Avi Kivity4855d412012-02-08 21:16:05 +02003651static void io_region_add(MemoryListener *listener,
3652 MemoryRegionSection *section)
3653{
3654 iorange_init(&section->mr->iorange, &memory_region_iorange_ops,
3655 section->offset_within_address_space, section->size);
3656 ioport_register(&section->mr->iorange);
3657}
3658
3659static void io_region_del(MemoryListener *listener,
3660 MemoryRegionSection *section)
3661{
3662 isa_unassign_ioport(section->offset_within_address_space, section->size);
3663}
3664
Avi Kivity50c1e142012-02-08 21:36:02 +02003665static void io_region_nop(MemoryListener *listener,
3666 MemoryRegionSection *section)
3667{
3668}
3669
Avi Kivity4855d412012-02-08 21:16:05 +02003670static void io_log_start(MemoryListener *listener,
3671 MemoryRegionSection *section)
3672{
3673}
3674
3675static void io_log_stop(MemoryListener *listener,
3676 MemoryRegionSection *section)
3677{
3678}
3679
3680static void io_log_sync(MemoryListener *listener,
3681 MemoryRegionSection *section)
3682{
3683}
3684
3685static void io_log_global_start(MemoryListener *listener)
3686{
3687}
3688
3689static void io_log_global_stop(MemoryListener *listener)
3690{
3691}
3692
3693static void io_eventfd_add(MemoryListener *listener,
3694 MemoryRegionSection *section,
3695 bool match_data, uint64_t data, int fd)
3696{
3697}
3698
3699static void io_eventfd_del(MemoryListener *listener,
3700 MemoryRegionSection *section,
3701 bool match_data, uint64_t data, int fd)
3702{
3703}
3704
Avi Kivity93632742012-02-08 16:54:16 +02003705static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003706 .begin = core_begin,
3707 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003708 .region_add = core_region_add,
3709 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003710 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003711 .log_start = core_log_start,
3712 .log_stop = core_log_stop,
3713 .log_sync = core_log_sync,
3714 .log_global_start = core_log_global_start,
3715 .log_global_stop = core_log_global_stop,
3716 .eventfd_add = core_eventfd_add,
3717 .eventfd_del = core_eventfd_del,
3718 .priority = 0,
3719};
3720
Avi Kivity4855d412012-02-08 21:16:05 +02003721static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003722 .begin = io_begin,
3723 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003724 .region_add = io_region_add,
3725 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003726 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003727 .log_start = io_log_start,
3728 .log_stop = io_log_stop,
3729 .log_sync = io_log_sync,
3730 .log_global_start = io_log_global_start,
3731 .log_global_stop = io_log_global_stop,
3732 .eventfd_add = io_eventfd_add,
3733 .eventfd_del = io_eventfd_del,
3734 .priority = 0,
3735};
3736
Avi Kivity62152b82011-07-26 14:26:14 +03003737static void memory_map_init(void)
3738{
Anthony Liguori7267c092011-08-20 22:09:37 -05003739 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003740 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003741 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003742
Anthony Liguori7267c092011-08-20 22:09:37 -05003743 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003744 memory_region_init(system_io, "io", 65536);
3745 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003746
Avi Kivity4855d412012-02-08 21:16:05 +02003747 memory_listener_register(&core_memory_listener, system_memory);
3748 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003749}
3750
3751MemoryRegion *get_system_memory(void)
3752{
3753 return system_memory;
3754}
3755
Avi Kivity309cb472011-08-08 16:09:03 +03003756MemoryRegion *get_system_io(void)
3757{
3758 return system_io;
3759}
3760
pbrooke2eef172008-06-08 01:09:01 +00003761#endif /* !defined(CONFIG_USER_ONLY) */
3762
bellard13eb76e2004-01-24 15:23:36 +00003763/* physical memory access (slow version, mainly for debug) */
3764#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003765int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3766 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003767{
3768 int l, flags;
3769 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003770 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003771
3772 while (len > 0) {
3773 page = addr & TARGET_PAGE_MASK;
3774 l = (page + TARGET_PAGE_SIZE) - addr;
3775 if (l > len)
3776 l = len;
3777 flags = page_get_flags(page);
3778 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003779 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003780 if (is_write) {
3781 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003782 return -1;
bellard579a97f2007-11-11 14:26:47 +00003783 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003784 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003785 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003786 memcpy(p, buf, l);
3787 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003788 } else {
3789 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003790 return -1;
bellard579a97f2007-11-11 14:26:47 +00003791 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003792 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003793 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003794 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003795 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003796 }
3797 len -= l;
3798 buf += l;
3799 addr += l;
3800 }
Paul Brooka68fe892010-03-01 00:08:59 +00003801 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003802}
bellard8df1cd02005-01-28 22:37:22 +00003803
bellard13eb76e2004-01-24 15:23:36 +00003804#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003805void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003806 int len, int is_write)
3807{
3808 int l, io_index;
3809 uint8_t *ptr;
3810 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003811 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003812 MemoryRegionSection section;
ths3b46e622007-09-17 08:09:54 +00003813
bellard13eb76e2004-01-24 15:23:36 +00003814 while (len > 0) {
3815 page = addr & TARGET_PAGE_MASK;
3816 l = (page + TARGET_PAGE_SIZE) - addr;
3817 if (l > len)
3818 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003819 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003820
bellard13eb76e2004-01-24 15:23:36 +00003821 if (is_write) {
Avi Kivity06ef3522012-02-13 16:11:22 +02003822 if (!memory_region_is_ram(section.mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003823 target_phys_addr_t addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003824 io_index = memory_region_get_ram_addr(section.mr)
3825 & (IO_MEM_NB_ENTRIES - 1);
3826 addr1 = (addr & ~TARGET_PAGE_MASK)
3827 + section.offset_within_region;
bellard6a00d602005-11-21 23:25:50 +00003828 /* XXX: could force cpu_single_env to NULL to avoid
3829 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003830 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003831 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003832 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003833 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003834 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003835 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003836 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003837 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003838 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003839 l = 2;
3840 } else {
bellard1c213d12005-09-03 10:49:04 +00003841 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003842 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003843 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003844 l = 1;
3845 }
Avi Kivity06ef3522012-02-13 16:11:22 +02003846 } else if (!section.readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003847 ram_addr_t addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003848 addr1 = (memory_region_get_ram_addr(section.mr)
3849 + section.offset_within_region)
3850 | (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003851 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003852 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003853 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003854 if (!cpu_physical_memory_is_dirty(addr1)) {
3855 /* invalidate code */
3856 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3857 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003858 cpu_physical_memory_set_dirty_flags(
3859 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003860 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003861 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003862 }
3863 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02003864 if (!is_ram_rom_romd(&section)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003865 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003866 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02003867 io_index = memory_region_get_ram_addr(section.mr)
3868 & (IO_MEM_NB_ENTRIES - 1);
3869 addr1 = (addr & ~TARGET_PAGE_MASK)
3870 + section.offset_within_region;
aurel326c2934d2009-02-18 21:37:17 +00003871 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003872 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003873 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003874 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003875 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003876 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003877 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003878 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003879 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003880 l = 2;
3881 } else {
bellard1c213d12005-09-03 10:49:04 +00003882 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003883 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003884 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003885 l = 1;
3886 }
3887 } else {
3888 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02003889 ptr = qemu_get_ram_ptr(section.mr->ram_addr
3890 + section.offset_within_region);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003891 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3892 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003893 }
3894 }
3895 len -= l;
3896 buf += l;
3897 addr += l;
3898 }
3899}
bellard8df1cd02005-01-28 22:37:22 +00003900
bellardd0ecd2a2006-04-23 17:14:48 +00003901/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003902void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003903 const uint8_t *buf, int len)
3904{
3905 int l;
3906 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003907 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003908 MemoryRegionSection section;
ths3b46e622007-09-17 08:09:54 +00003909
bellardd0ecd2a2006-04-23 17:14:48 +00003910 while (len > 0) {
3911 page = addr & TARGET_PAGE_MASK;
3912 l = (page + TARGET_PAGE_SIZE) - addr;
3913 if (l > len)
3914 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003915 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003916
Avi Kivity06ef3522012-02-13 16:11:22 +02003917 if (!is_ram_rom_romd(&section)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003918 /* do nothing */
3919 } else {
3920 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003921 addr1 = (memory_region_get_ram_addr(section.mr)
3922 + section.offset_within_region)
3923 + (addr & ~TARGET_PAGE_MASK);
bellardd0ecd2a2006-04-23 17:14:48 +00003924 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003925 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003926 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003927 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003928 }
3929 len -= l;
3930 buf += l;
3931 addr += l;
3932 }
3933}
3934
aliguori6d16c2f2009-01-22 16:59:11 +00003935typedef struct {
3936 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003937 target_phys_addr_t addr;
3938 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003939} BounceBuffer;
3940
3941static BounceBuffer bounce;
3942
aliguoriba223c22009-01-22 16:59:16 +00003943typedef struct MapClient {
3944 void *opaque;
3945 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003946 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003947} MapClient;
3948
Blue Swirl72cf2d42009-09-12 07:36:22 +00003949static QLIST_HEAD(map_client_list, MapClient) map_client_list
3950 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003951
3952void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3953{
Anthony Liguori7267c092011-08-20 22:09:37 -05003954 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003955
3956 client->opaque = opaque;
3957 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003958 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003959 return client;
3960}
3961
3962void cpu_unregister_map_client(void *_client)
3963{
3964 MapClient *client = (MapClient *)_client;
3965
Blue Swirl72cf2d42009-09-12 07:36:22 +00003966 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003967 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003968}
3969
3970static void cpu_notify_map_clients(void)
3971{
3972 MapClient *client;
3973
Blue Swirl72cf2d42009-09-12 07:36:22 +00003974 while (!QLIST_EMPTY(&map_client_list)) {
3975 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003976 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003977 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003978 }
3979}
3980
aliguori6d16c2f2009-01-22 16:59:11 +00003981/* Map a physical memory region into a host virtual address.
3982 * May map a subset of the requested range, given by and returned in *plen.
3983 * May return NULL if resources needed to perform the mapping are exhausted.
3984 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003985 * Use cpu_register_map_client() to know when retrying the map operation is
3986 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003987 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003988void *cpu_physical_memory_map(target_phys_addr_t addr,
3989 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003990 int is_write)
3991{
Anthony Liguoric227f092009-10-01 16:12:16 -05003992 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003993 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003994 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003995 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003996 MemoryRegionSection section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003997 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003998 ram_addr_t rlen;
3999 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004000
4001 while (len > 0) {
4002 page = addr & TARGET_PAGE_MASK;
4003 l = (page + TARGET_PAGE_SIZE) - addr;
4004 if (l > len)
4005 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02004006 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00004007
Avi Kivity06ef3522012-02-13 16:11:22 +02004008 if (!(memory_region_is_ram(section.mr) && !section.readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004009 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004010 break;
4011 }
4012 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4013 bounce.addr = addr;
4014 bounce.len = l;
4015 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004016 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004017 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004018
4019 *plen = l;
4020 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004021 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004022 if (!todo) {
Avi Kivity06ef3522012-02-13 16:11:22 +02004023 raddr = memory_region_get_ram_addr(section.mr)
4024 + section.offset_within_region
4025 + (addr & ~TARGET_PAGE_MASK);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004026 }
aliguori6d16c2f2009-01-22 16:59:11 +00004027
4028 len -= l;
4029 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004030 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004031 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004032 rlen = todo;
4033 ret = qemu_ram_ptr_length(raddr, &rlen);
4034 *plen = rlen;
4035 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004036}
4037
4038/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4039 * Will also mark the memory as dirty if is_write == 1. access_len gives
4040 * the amount of memory that was actually read or written by the caller.
4041 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004042void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4043 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004044{
4045 if (buffer != bounce.buffer) {
4046 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004047 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004048 while (access_len) {
4049 unsigned l;
4050 l = TARGET_PAGE_SIZE;
4051 if (l > access_len)
4052 l = access_len;
4053 if (!cpu_physical_memory_is_dirty(addr1)) {
4054 /* invalidate code */
4055 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4056 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004057 cpu_physical_memory_set_dirty_flags(
4058 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004059 }
4060 addr1 += l;
4061 access_len -= l;
4062 }
4063 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004064 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004065 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004066 }
aliguori6d16c2f2009-01-22 16:59:11 +00004067 return;
4068 }
4069 if (is_write) {
4070 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4071 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004072 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004073 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004074 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004075}
bellardd0ecd2a2006-04-23 17:14:48 +00004076
bellard8df1cd02005-01-28 22:37:22 +00004077/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004078static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4079 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004080{
4081 int io_index;
4082 uint8_t *ptr;
4083 uint32_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004084 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004085
Avi Kivity06ef3522012-02-13 16:11:22 +02004086 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004087
Avi Kivity06ef3522012-02-13 16:11:22 +02004088 if (!is_ram_rom_romd(&section)) {
bellard8df1cd02005-01-28 22:37:22 +00004089 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004090 io_index = memory_region_get_ram_addr(section.mr)
4091 & (IO_MEM_NB_ENTRIES - 1);
4092 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004093 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004094#if defined(TARGET_WORDS_BIGENDIAN)
4095 if (endian == DEVICE_LITTLE_ENDIAN) {
4096 val = bswap32(val);
4097 }
4098#else
4099 if (endian == DEVICE_BIG_ENDIAN) {
4100 val = bswap32(val);
4101 }
4102#endif
bellard8df1cd02005-01-28 22:37:22 +00004103 } else {
4104 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004105 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4106 & TARGET_PAGE_MASK)
4107 + section.offset_within_region) +
bellard8df1cd02005-01-28 22:37:22 +00004108 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004109 switch (endian) {
4110 case DEVICE_LITTLE_ENDIAN:
4111 val = ldl_le_p(ptr);
4112 break;
4113 case DEVICE_BIG_ENDIAN:
4114 val = ldl_be_p(ptr);
4115 break;
4116 default:
4117 val = ldl_p(ptr);
4118 break;
4119 }
bellard8df1cd02005-01-28 22:37:22 +00004120 }
4121 return val;
4122}
4123
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004124uint32_t ldl_phys(target_phys_addr_t addr)
4125{
4126 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4127}
4128
4129uint32_t ldl_le_phys(target_phys_addr_t addr)
4130{
4131 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4132}
4133
4134uint32_t ldl_be_phys(target_phys_addr_t addr)
4135{
4136 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4137}
4138
bellard84b7b8e2005-11-28 21:19:04 +00004139/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004140static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4141 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004142{
4143 int io_index;
4144 uint8_t *ptr;
4145 uint64_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004146 MemoryRegionSection section;
bellard84b7b8e2005-11-28 21:19:04 +00004147
Avi Kivity06ef3522012-02-13 16:11:22 +02004148 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004149
Avi Kivity06ef3522012-02-13 16:11:22 +02004150 if (!is_ram_rom_romd(&section)) {
bellard84b7b8e2005-11-28 21:19:04 +00004151 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004152 io_index = memory_region_get_ram_addr(section.mr)
4153 & (IO_MEM_NB_ENTRIES - 1);
4154 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004155
4156 /* XXX This is broken when device endian != cpu endian.
4157 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004158#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004159 val = io_mem_read(io_index, addr, 4) << 32;
4160 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004161#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004162 val = io_mem_read(io_index, addr, 4);
4163 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004164#endif
4165 } else {
4166 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004167 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4168 & TARGET_PAGE_MASK)
4169 + section.offset_within_region)
4170 + (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004171 switch (endian) {
4172 case DEVICE_LITTLE_ENDIAN:
4173 val = ldq_le_p(ptr);
4174 break;
4175 case DEVICE_BIG_ENDIAN:
4176 val = ldq_be_p(ptr);
4177 break;
4178 default:
4179 val = ldq_p(ptr);
4180 break;
4181 }
bellard84b7b8e2005-11-28 21:19:04 +00004182 }
4183 return val;
4184}
4185
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004186uint64_t ldq_phys(target_phys_addr_t addr)
4187{
4188 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4189}
4190
4191uint64_t ldq_le_phys(target_phys_addr_t addr)
4192{
4193 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4194}
4195
4196uint64_t ldq_be_phys(target_phys_addr_t addr)
4197{
4198 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4199}
4200
bellardaab33092005-10-30 20:48:42 +00004201/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004202uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004203{
4204 uint8_t val;
4205 cpu_physical_memory_read(addr, &val, 1);
4206 return val;
4207}
4208
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004209/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004210static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4211 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004212{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004213 int io_index;
4214 uint8_t *ptr;
4215 uint64_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004216 MemoryRegionSection section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004217
Avi Kivity06ef3522012-02-13 16:11:22 +02004218 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004219
Avi Kivity06ef3522012-02-13 16:11:22 +02004220 if (!is_ram_rom_romd(&section)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004221 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004222 io_index = memory_region_get_ram_addr(section.mr)
4223 & (IO_MEM_NB_ENTRIES - 1);
4224 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004225 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004226#if defined(TARGET_WORDS_BIGENDIAN)
4227 if (endian == DEVICE_LITTLE_ENDIAN) {
4228 val = bswap16(val);
4229 }
4230#else
4231 if (endian == DEVICE_BIG_ENDIAN) {
4232 val = bswap16(val);
4233 }
4234#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004235 } else {
4236 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004237 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4238 & TARGET_PAGE_MASK)
4239 + section.offset_within_region)
4240 + (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004241 switch (endian) {
4242 case DEVICE_LITTLE_ENDIAN:
4243 val = lduw_le_p(ptr);
4244 break;
4245 case DEVICE_BIG_ENDIAN:
4246 val = lduw_be_p(ptr);
4247 break;
4248 default:
4249 val = lduw_p(ptr);
4250 break;
4251 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004252 }
4253 return val;
bellardaab33092005-10-30 20:48:42 +00004254}
4255
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004256uint32_t lduw_phys(target_phys_addr_t addr)
4257{
4258 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4259}
4260
4261uint32_t lduw_le_phys(target_phys_addr_t addr)
4262{
4263 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4264}
4265
4266uint32_t lduw_be_phys(target_phys_addr_t addr)
4267{
4268 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4269}
4270
bellard8df1cd02005-01-28 22:37:22 +00004271/* warning: addr must be aligned. The ram page is not masked as dirty
4272 and the code inside is not invalidated. It is useful if the dirty
4273 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004274void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004275{
4276 int io_index;
4277 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004278 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004279
Avi Kivity06ef3522012-02-13 16:11:22 +02004280 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004281
Avi Kivity06ef3522012-02-13 16:11:22 +02004282 if (!memory_region_is_ram(section.mr) || section.readonly) {
4283 if (memory_region_is_ram(section.mr)) {
4284 io_index = io_mem_rom.ram_addr;
4285 } else {
4286 io_index = memory_region_get_ram_addr(section.mr);
4287 }
4288 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004289 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004290 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02004291 unsigned long addr1 = (memory_region_get_ram_addr(section.mr)
4292 & TARGET_PAGE_MASK)
4293 + section.offset_within_region
4294 + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004295 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004296 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004297
4298 if (unlikely(in_migration)) {
4299 if (!cpu_physical_memory_is_dirty(addr1)) {
4300 /* invalidate code */
4301 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4302 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004303 cpu_physical_memory_set_dirty_flags(
4304 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004305 }
4306 }
bellard8df1cd02005-01-28 22:37:22 +00004307 }
4308}
4309
Anthony Liguoric227f092009-10-01 16:12:16 -05004310void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004311{
4312 int io_index;
4313 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004314 MemoryRegionSection section;
j_mayerbc98a7e2007-04-04 07:55:12 +00004315
Avi Kivity06ef3522012-02-13 16:11:22 +02004316 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004317
Avi Kivity06ef3522012-02-13 16:11:22 +02004318 if (!memory_region_is_ram(section.mr) || section.readonly) {
4319 if (memory_region_is_ram(section.mr)) {
4320 io_index = io_mem_rom.ram_addr;
4321 } else {
4322 io_index = memory_region_get_ram_addr(section.mr)
4323 & (IO_MEM_NB_ENTRIES - 1);
4324 }
4325 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
j_mayerbc98a7e2007-04-04 07:55:12 +00004326#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004327 io_mem_write(io_index, addr, val >> 32, 4);
4328 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004329#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004330 io_mem_write(io_index, addr, (uint32_t)val, 4);
4331 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004332#endif
4333 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02004334 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4335 & TARGET_PAGE_MASK)
4336 + section.offset_within_region)
4337 + (addr & ~TARGET_PAGE_MASK);
j_mayerbc98a7e2007-04-04 07:55:12 +00004338 stq_p(ptr, val);
4339 }
4340}
4341
bellard8df1cd02005-01-28 22:37:22 +00004342/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004343static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4344 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004345{
4346 int io_index;
4347 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004348 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004349
Avi Kivity06ef3522012-02-13 16:11:22 +02004350 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004351
Avi Kivity06ef3522012-02-13 16:11:22 +02004352 if (!memory_region_is_ram(section.mr) || section.readonly) {
4353 if (memory_region_is_ram(section.mr)) {
4354 io_index = io_mem_rom.ram_addr;
4355 } else {
4356 io_index = memory_region_get_ram_addr(section.mr)
4357 & (IO_MEM_NB_ENTRIES - 1);
4358 }
4359 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004360#if defined(TARGET_WORDS_BIGENDIAN)
4361 if (endian == DEVICE_LITTLE_ENDIAN) {
4362 val = bswap32(val);
4363 }
4364#else
4365 if (endian == DEVICE_BIG_ENDIAN) {
4366 val = bswap32(val);
4367 }
4368#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004369 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004370 } else {
4371 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02004372 addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
4373 + section.offset_within_region
4374 + (addr & ~TARGET_PAGE_MASK);
bellard8df1cd02005-01-28 22:37:22 +00004375 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004376 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004377 switch (endian) {
4378 case DEVICE_LITTLE_ENDIAN:
4379 stl_le_p(ptr, val);
4380 break;
4381 case DEVICE_BIG_ENDIAN:
4382 stl_be_p(ptr, val);
4383 break;
4384 default:
4385 stl_p(ptr, val);
4386 break;
4387 }
bellard3a7d9292005-08-21 09:26:42 +00004388 if (!cpu_physical_memory_is_dirty(addr1)) {
4389 /* invalidate code */
4390 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4391 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004392 cpu_physical_memory_set_dirty_flags(addr1,
4393 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004394 }
bellard8df1cd02005-01-28 22:37:22 +00004395 }
4396}
4397
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004398void stl_phys(target_phys_addr_t addr, uint32_t val)
4399{
4400 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4401}
4402
4403void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4404{
4405 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4406}
4407
4408void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4409{
4410 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4411}
4412
bellardaab33092005-10-30 20:48:42 +00004413/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004414void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004415{
4416 uint8_t v = val;
4417 cpu_physical_memory_write(addr, &v, 1);
4418}
4419
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004420/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004421static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4422 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004423{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004424 int io_index;
4425 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004426 MemoryRegionSection section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004427
Avi Kivity06ef3522012-02-13 16:11:22 +02004428 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004429
Avi Kivity06ef3522012-02-13 16:11:22 +02004430 if (!memory_region_is_ram(section.mr) || section.readonly) {
4431 if (memory_region_is_ram(section.mr)) {
4432 io_index = io_mem_rom.ram_addr;
4433 } else {
4434 io_index = memory_region_get_ram_addr(section.mr)
4435 & (IO_MEM_NB_ENTRIES - 1);
4436 }
4437 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004438#if defined(TARGET_WORDS_BIGENDIAN)
4439 if (endian == DEVICE_LITTLE_ENDIAN) {
4440 val = bswap16(val);
4441 }
4442#else
4443 if (endian == DEVICE_BIG_ENDIAN) {
4444 val = bswap16(val);
4445 }
4446#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004447 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004448 } else {
4449 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02004450 addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
4451 + section.offset_within_region + (addr & ~TARGET_PAGE_MASK);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004452 /* RAM case */
4453 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004454 switch (endian) {
4455 case DEVICE_LITTLE_ENDIAN:
4456 stw_le_p(ptr, val);
4457 break;
4458 case DEVICE_BIG_ENDIAN:
4459 stw_be_p(ptr, val);
4460 break;
4461 default:
4462 stw_p(ptr, val);
4463 break;
4464 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004465 if (!cpu_physical_memory_is_dirty(addr1)) {
4466 /* invalidate code */
4467 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4468 /* set dirty bit */
4469 cpu_physical_memory_set_dirty_flags(addr1,
4470 (0xff & ~CODE_DIRTY_FLAG));
4471 }
4472 }
bellardaab33092005-10-30 20:48:42 +00004473}
4474
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004475void stw_phys(target_phys_addr_t addr, uint32_t val)
4476{
4477 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4478}
4479
4480void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4481{
4482 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4483}
4484
4485void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4486{
4487 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4488}
4489
bellardaab33092005-10-30 20:48:42 +00004490/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004491void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004492{
4493 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004494 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004495}
4496
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004497void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4498{
4499 val = cpu_to_le64(val);
4500 cpu_physical_memory_write(addr, &val, 8);
4501}
4502
4503void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4504{
4505 val = cpu_to_be64(val);
4506 cpu_physical_memory_write(addr, &val, 8);
4507}
4508
aliguori5e2972f2009-03-28 17:51:36 +00004509/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004510int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004511 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004512{
4513 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004514 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004515 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004516
4517 while (len > 0) {
4518 page = addr & TARGET_PAGE_MASK;
4519 phys_addr = cpu_get_phys_page_debug(env, page);
4520 /* if no physical page mapped, return an error */
4521 if (phys_addr == -1)
4522 return -1;
4523 l = (page + TARGET_PAGE_SIZE) - addr;
4524 if (l > len)
4525 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004526 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004527 if (is_write)
4528 cpu_physical_memory_write_rom(phys_addr, buf, l);
4529 else
aliguori5e2972f2009-03-28 17:51:36 +00004530 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004531 len -= l;
4532 buf += l;
4533 addr += l;
4534 }
4535 return 0;
4536}
Paul Brooka68fe892010-03-01 00:08:59 +00004537#endif
bellard13eb76e2004-01-24 15:23:36 +00004538
pbrook2e70f6e2008-06-29 01:03:05 +00004539/* in deterministic execution mode, instructions doing device I/Os
4540 must be at the end of the TB */
4541void cpu_io_recompile(CPUState *env, void *retaddr)
4542{
4543 TranslationBlock *tb;
4544 uint32_t n, cflags;
4545 target_ulong pc, cs_base;
4546 uint64_t flags;
4547
4548 tb = tb_find_pc((unsigned long)retaddr);
4549 if (!tb) {
4550 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4551 retaddr);
4552 }
4553 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004554 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004555 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004556 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004557 n = n - env->icount_decr.u16.low;
4558 /* Generate a new TB ending on the I/O insn. */
4559 n++;
4560 /* On MIPS and SH, delay slot instructions can only be restarted if
4561 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004562 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004563 branch. */
4564#if defined(TARGET_MIPS)
4565 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4566 env->active_tc.PC -= 4;
4567 env->icount_decr.u16.low++;
4568 env->hflags &= ~MIPS_HFLAG_BMASK;
4569 }
4570#elif defined(TARGET_SH4)
4571 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4572 && n > 1) {
4573 env->pc -= 2;
4574 env->icount_decr.u16.low++;
4575 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4576 }
4577#endif
4578 /* This should never happen. */
4579 if (n > CF_COUNT_MASK)
4580 cpu_abort(env, "TB too big during recompile");
4581
4582 cflags = n | CF_LAST_IO;
4583 pc = tb->pc;
4584 cs_base = tb->cs_base;
4585 flags = tb->flags;
4586 tb_phys_invalidate(tb, -1);
4587 /* FIXME: In theory this could raise an exception. In practice
4588 we have already translated the block once so it's probably ok. */
4589 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004590 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004591 the first in the TB) then we end up generating a whole new TB and
4592 repeating the fault, which is horribly inefficient.
4593 Better would be to execute just this insn uncached, or generate a
4594 second new TB. */
4595 cpu_resume_from_signal(env, NULL);
4596}
4597
Paul Brookb3755a92010-03-12 16:54:58 +00004598#if !defined(CONFIG_USER_ONLY)
4599
Stefan Weil055403b2010-10-22 23:03:32 +02004600void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004601{
4602 int i, target_code_size, max_target_code_size;
4603 int direct_jmp_count, direct_jmp2_count, cross_page;
4604 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004605
bellarde3db7222005-01-26 22:00:47 +00004606 target_code_size = 0;
4607 max_target_code_size = 0;
4608 cross_page = 0;
4609 direct_jmp_count = 0;
4610 direct_jmp2_count = 0;
4611 for(i = 0; i < nb_tbs; i++) {
4612 tb = &tbs[i];
4613 target_code_size += tb->size;
4614 if (tb->size > max_target_code_size)
4615 max_target_code_size = tb->size;
4616 if (tb->page_addr[1] != -1)
4617 cross_page++;
4618 if (tb->tb_next_offset[0] != 0xffff) {
4619 direct_jmp_count++;
4620 if (tb->tb_next_offset[1] != 0xffff) {
4621 direct_jmp2_count++;
4622 }
4623 }
4624 }
4625 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004626 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004627 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004628 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4629 cpu_fprintf(f, "TB count %d/%d\n",
4630 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004631 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004632 nb_tbs ? target_code_size / nb_tbs : 0,
4633 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004634 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004635 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4636 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004637 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4638 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004639 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4640 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004641 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004642 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4643 direct_jmp2_count,
4644 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004645 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004646 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4647 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4648 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004649 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004650}
4651
Avi Kivityd39e8222012-01-01 23:35:10 +02004652/* NOTE: this function can trigger an exception */
4653/* NOTE2: the returned address is not exactly the physical address: it
4654 is the offset relative to phys_ram_base */
4655tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4656{
4657 int mmu_idx, page_index, pd;
4658 void *p;
4659
4660 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4661 mmu_idx = cpu_mmu_index(env1);
4662 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4663 (addr & TARGET_PAGE_MASK))) {
4664 ldub_code(addr);
4665 }
4666 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004667 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
Avi Kivity06ef3522012-02-13 16:11:22 +02004668 && !io_mem_region[pd]->rom_device) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004669#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4670 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4671#else
4672 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4673#endif
4674 }
4675 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4676 return qemu_ram_addr_from_host_nofail(p);
4677}
4678
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004679/*
4680 * A helper function for the _utterly broken_ virtio device model to find out if
4681 * it's running on a big endian machine. Don't do this at home kids!
4682 */
4683bool virtio_is_big_endian(void);
4684bool virtio_is_big_endian(void)
4685{
4686#if defined(TARGET_WORDS_BIGENDIAN)
4687 return true;
4688#else
4689 return false;
4690#endif
4691}
4692
bellard61382a52003-10-27 21:22:23 +00004693#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004694#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004695#define GETPC() NULL
4696#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004697#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004698
4699#define SHIFT 0
4700#include "softmmu_template.h"
4701
4702#define SHIFT 1
4703#include "softmmu_template.h"
4704
4705#define SHIFT 2
4706#include "softmmu_template.h"
4707
4708#define SHIFT 3
4709#include "softmmu_template.h"
4710
4711#undef env
4712
4713#endif