blob: 98c00565306d70a7712e8ab48fe47cbf31312893 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Avi Kivity3eef53d2012-02-10 14:57:31 +0200163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200189typedef struct PhysPageEntry PhysPageEntry;
190
Avi Kivity5312bd82012-02-12 18:32:55 +0200191static MemoryRegionSection *phys_sections;
192static unsigned phys_sections_nb, phys_sections_nb_alloc;
193static uint16_t phys_section_unassigned;
194
Avi Kivity4346ae32012-02-10 17:00:01 +0200195struct PhysPageEntry {
196 union {
Avi Kivity5312bd82012-02-12 18:32:55 +0200197 uint16_t leaf; /* index into phys_sections */
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200198 uint16_t node; /* index into phys_map_nodes */
Avi Kivity4346ae32012-02-10 17:00:01 +0200199 } u;
200};
201
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202/* Simple allocator for PhysPageEntry nodes */
203static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
204static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
205
206#define PHYS_MAP_NODE_NIL ((uint16_t)~0)
207
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800208/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200209 The bottom level has pointers to MemoryRegionSections. */
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200210static PhysPageEntry phys_map = { .u.node = PHYS_MAP_NODE_NIL };
Paul Brook6d9a1302010-02-28 23:55:53 +0000211
pbrooke2eef172008-06-08 01:09:01 +0000212static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300213static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000214
bellard33417e72003-08-10 21:47:01 +0000215/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200216MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000217static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200218static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000219#endif
bellard33417e72003-08-10 21:47:01 +0000220
bellard34865132003-10-05 14:28:56 +0000221/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200222#ifdef WIN32
223static const char *logfilename = "qemu.log";
224#else
blueswir1d9b630f2008-10-05 09:57:08 +0000225static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200226#endif
bellard34865132003-10-05 14:28:56 +0000227FILE *logfile;
228int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000229static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000230
bellarde3db7222005-01-26 22:00:47 +0000231/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000232#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000233static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000234#endif
bellarde3db7222005-01-26 22:00:47 +0000235static int tb_flush_count;
236static int tb_phys_invalidate_count;
237
bellard7cb69ca2008-05-10 10:55:51 +0000238#ifdef _WIN32
239static void map_exec(void *addr, long size)
240{
241 DWORD old_protect;
242 VirtualProtect(addr, size,
243 PAGE_EXECUTE_READWRITE, &old_protect);
244
245}
246#else
247static void map_exec(void *addr, long size)
248{
bellard43694152008-05-29 09:35:57 +0000249 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000250
bellard43694152008-05-29 09:35:57 +0000251 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000252 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000253 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000254
255 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000256 end += page_size - 1;
257 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000258
259 mprotect((void *)start, end - start,
260 PROT_READ | PROT_WRITE | PROT_EXEC);
261}
262#endif
263
bellardb346ff42003-06-15 20:05:50 +0000264static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000265{
bellard83fb7ad2004-07-05 21:25:26 +0000266 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000267 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000268#ifdef _WIN32
269 {
270 SYSTEM_INFO system_info;
271
272 GetSystemInfo(&system_info);
273 qemu_real_host_page_size = system_info.dwPageSize;
274 }
275#else
276 qemu_real_host_page_size = getpagesize();
277#endif
bellard83fb7ad2004-07-05 21:25:26 +0000278 if (qemu_host_page_size == 0)
279 qemu_host_page_size = qemu_real_host_page_size;
280 if (qemu_host_page_size < TARGET_PAGE_SIZE)
281 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000282 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000283
Paul Brook2e9a5712010-05-05 16:32:59 +0100284#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000285 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100286#ifdef HAVE_KINFO_GETVMMAP
287 struct kinfo_vmentry *freep;
288 int i, cnt;
289
290 freep = kinfo_getvmmap(getpid(), &cnt);
291 if (freep) {
292 mmap_lock();
293 for (i = 0; i < cnt; i++) {
294 unsigned long startaddr, endaddr;
295
296 startaddr = freep[i].kve_start;
297 endaddr = freep[i].kve_end;
298 if (h2g_valid(startaddr)) {
299 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
300
301 if (h2g_valid(endaddr)) {
302 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200303 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100304 } else {
305#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
306 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200307 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100308#endif
309 }
310 }
311 }
312 free(freep);
313 mmap_unlock();
314 }
315#else
balrog50a95692007-12-12 01:16:23 +0000316 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000317
pbrook07765902008-05-31 16:33:53 +0000318 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800319
Aurelien Jarnofd436902010-04-10 17:20:36 +0200320 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000321 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800322 mmap_lock();
323
balrog50a95692007-12-12 01:16:23 +0000324 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800325 unsigned long startaddr, endaddr;
326 int n;
327
328 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
329
330 if (n == 2 && h2g_valid(startaddr)) {
331 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
332
333 if (h2g_valid(endaddr)) {
334 endaddr = h2g(endaddr);
335 } else {
336 endaddr = ~0ul;
337 }
338 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000339 }
340 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800341
balrog50a95692007-12-12 01:16:23 +0000342 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800343 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000344 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100345#endif
balrog50a95692007-12-12 01:16:23 +0000346 }
347#endif
bellard54936002003-05-13 00:25:15 +0000348}
349
Paul Brook41c1b1c2010-03-12 16:54:58 +0000350static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000351{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000352 PageDesc *pd;
353 void **lp;
354 int i;
355
pbrook17e23772008-06-09 13:47:45 +0000356#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500357 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800358# define ALLOC(P, SIZE) \
359 do { \
360 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
361 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000363#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800364# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500365 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000366#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800367
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800368 /* Level 1. Always allocated. */
369 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
370
371 /* Level 2..N-1. */
372 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
373 void **p = *lp;
374
375 if (p == NULL) {
376 if (!alloc) {
377 return NULL;
378 }
379 ALLOC(p, sizeof(void *) * L2_SIZE);
380 *lp = p;
381 }
382
383 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000384 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800385
386 pd = *lp;
387 if (pd == NULL) {
388 if (!alloc) {
389 return NULL;
390 }
391 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
392 *lp = pd;
393 }
394
395#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800396
397 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000398}
399
Paul Brook41c1b1c2010-03-12 16:54:58 +0000400static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000401{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800402 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000403}
404
Paul Brook6d9a1302010-02-28 23:55:53 +0000405#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200406
Avi Kivityf7bf5462012-02-13 20:12:05 +0200407static void phys_map_node_reserve(unsigned nodes)
408{
409 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
410 typedef PhysPageEntry Node[L2_SIZE];
411 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
412 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
413 phys_map_nodes_nb + nodes);
414 phys_map_nodes = g_renew(Node, phys_map_nodes,
415 phys_map_nodes_nb_alloc);
416 }
417}
418
419static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200420{
421 unsigned i;
422 uint16_t ret;
423
Avi Kivityf7bf5462012-02-13 20:12:05 +0200424 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200425 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200426 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200427 for (i = 0; i < L2_SIZE; ++i) {
428 phys_map_nodes[ret][i].u.node = PHYS_MAP_NODE_NIL;
429 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200430 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200431}
432
433static void phys_map_nodes_reset(void)
434{
435 phys_map_nodes_nb = 0;
436}
437
Avi Kivityf7bf5462012-02-13 20:12:05 +0200438
Avi Kivity29990972012-02-13 20:21:20 +0200439static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
440 target_phys_addr_t *nb, uint16_t leaf,
441 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200442{
443 PhysPageEntry *p;
444 int i;
445
446 if (lp->u.node == PHYS_MAP_NODE_NIL) {
447 lp->u.node = phys_map_node_alloc();
448 p = phys_map_nodes[lp->u.node];
449 if (level == 0) {
450 for (i = 0; i < L2_SIZE; i++) {
451 p[i].u.leaf = phys_section_unassigned;
452 }
453 }
454 } else {
455 p = phys_map_nodes[lp->u.node];
456 }
Avi Kivity29990972012-02-13 20:21:20 +0200457 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200458
Avi Kivity29990972012-02-13 20:21:20 +0200459 while (*nb && lp < &p[L2_SIZE]) {
460 if (level == 0) {
461 lp->u.leaf = leaf;
462 ++*index;
463 --*nb;
464 } else {
465 phys_page_set_level(lp, index, nb, leaf, level - 1);
466 }
467 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200468 }
469}
470
Avi Kivity29990972012-02-13 20:21:20 +0200471static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
472 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000473{
Avi Kivity29990972012-02-13 20:21:20 +0200474 /* Wildly overreserve - it doesn't matter much. */
475 phys_map_node_reserve((nb + L2_SIZE - 1) / L2_SIZE * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000476
Avi Kivity29990972012-02-13 20:21:20 +0200477 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000478}
479
Avi Kivity06ef3522012-02-13 16:11:22 +0200480static MemoryRegionSection phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000481{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200482 PhysPageEntry lp = phys_map;
483 PhysPageEntry *p;
484 int i;
Avi Kivity06ef3522012-02-13 16:11:22 +0200485 MemoryRegionSection section;
486 target_phys_addr_t delta;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200487 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200488
Avi Kivity31ab2b42012-02-13 16:44:19 +0200489 for (i = P_L2_LEVELS - 1; i >= 0; i--) {
490 if (lp.u.node == PHYS_MAP_NODE_NIL) {
491 goto not_found;
492 }
493 p = phys_map_nodes[lp.u.node];
494 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200495 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200496
497 s_index = lp.u.leaf;
498not_found:
Avi Kivity06ef3522012-02-13 16:11:22 +0200499 section = phys_sections[s_index];
Avi Kivity5312bd82012-02-12 18:32:55 +0200500 index <<= TARGET_PAGE_BITS;
Avi Kivity06ef3522012-02-13 16:11:22 +0200501 assert(section.offset_within_address_space <= index
502 && index <= section.offset_within_address_space + section.size-1);
503 delta = index - section.offset_within_address_space;
504 section.offset_within_address_space += delta;
505 section.offset_within_region += delta;
506 section.size -= delta;
507 return section;
bellard92e873b2004-05-21 14:52:29 +0000508}
509
Anthony Liguoric227f092009-10-01 16:12:16 -0500510static void tlb_protect_code(ram_addr_t ram_addr);
511static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000512 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000513#define mmap_lock() do { } while(0)
514#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000515#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000516
bellard43694152008-05-29 09:35:57 +0000517#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
518
519#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100520/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000521 user mode. It will change when a dedicated libc will be used */
522#define USE_STATIC_CODE_GEN_BUFFER
523#endif
524
525#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200526static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
527 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000528#endif
529
blueswir18fcd3692008-08-17 20:26:25 +0000530static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000531{
bellard43694152008-05-29 09:35:57 +0000532#ifdef USE_STATIC_CODE_GEN_BUFFER
533 code_gen_buffer = static_code_gen_buffer;
534 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
535 map_exec(code_gen_buffer, code_gen_buffer_size);
536#else
bellard26a5f132008-05-28 12:30:31 +0000537 code_gen_buffer_size = tb_size;
538 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000539#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000540 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
541#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100542 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000543 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000544#endif
bellard26a5f132008-05-28 12:30:31 +0000545 }
546 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
547 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
548 /* The code gen buffer location may have constraints depending on
549 the host cpu and OS */
550#if defined(__linux__)
551 {
552 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000553 void *start = NULL;
554
bellard26a5f132008-05-28 12:30:31 +0000555 flags = MAP_PRIVATE | MAP_ANONYMOUS;
556#if defined(__x86_64__)
557 flags |= MAP_32BIT;
558 /* Cannot map more than that */
559 if (code_gen_buffer_size > (800 * 1024 * 1024))
560 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000561#elif defined(__sparc_v9__)
562 // Map the buffer below 2G, so we can use direct calls and branches
563 flags |= MAP_FIXED;
564 start = (void *) 0x60000000UL;
565 if (code_gen_buffer_size > (512 * 1024 * 1024))
566 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000567#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100568 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000569 if (code_gen_buffer_size > 16 * 1024 * 1024)
570 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700571#elif defined(__s390x__)
572 /* Map the buffer so that we can use direct calls and branches. */
573 /* We have a +- 4GB range on the branches; leave some slop. */
574 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
575 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
576 }
577 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000578#endif
blueswir1141ac462008-07-26 15:05:57 +0000579 code_gen_buffer = mmap(start, code_gen_buffer_size,
580 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000581 flags, -1, 0);
582 if (code_gen_buffer == MAP_FAILED) {
583 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
584 exit(1);
585 }
586 }
Bradcbb608a2010-12-20 21:25:40 -0500587#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000588 || defined(__DragonFly__) || defined(__OpenBSD__) \
589 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000590 {
591 int flags;
592 void *addr = NULL;
593 flags = MAP_PRIVATE | MAP_ANONYMOUS;
594#if defined(__x86_64__)
595 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
596 * 0x40000000 is free */
597 flags |= MAP_FIXED;
598 addr = (void *)0x40000000;
599 /* Cannot map more than that */
600 if (code_gen_buffer_size > (800 * 1024 * 1024))
601 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000602#elif defined(__sparc_v9__)
603 // Map the buffer below 2G, so we can use direct calls and branches
604 flags |= MAP_FIXED;
605 addr = (void *) 0x60000000UL;
606 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
607 code_gen_buffer_size = (512 * 1024 * 1024);
608 }
aliguori06e67a82008-09-27 15:32:41 +0000609#endif
610 code_gen_buffer = mmap(addr, code_gen_buffer_size,
611 PROT_WRITE | PROT_READ | PROT_EXEC,
612 flags, -1, 0);
613 if (code_gen_buffer == MAP_FAILED) {
614 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
615 exit(1);
616 }
617 }
bellard26a5f132008-05-28 12:30:31 +0000618#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500619 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000620 map_exec(code_gen_buffer, code_gen_buffer_size);
621#endif
bellard43694152008-05-29 09:35:57 +0000622#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000623 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100624 code_gen_buffer_max_size = code_gen_buffer_size -
625 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000626 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500627 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000628}
629
630/* Must be called before using the QEMU cpus. 'tb_size' is the size
631 (in bytes) allocated to the translation buffer. Zero means default
632 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200633void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000634{
bellard26a5f132008-05-28 12:30:31 +0000635 cpu_gen_init();
636 code_gen_alloc(tb_size);
637 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000638 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700639#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
640 /* There's no guest base to take into account, so go ahead and
641 initialize the prologue now. */
642 tcg_prologue_init(&tcg_ctx);
643#endif
bellard26a5f132008-05-28 12:30:31 +0000644}
645
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200646bool tcg_enabled(void)
647{
648 return code_gen_buffer != NULL;
649}
650
651void cpu_exec_init_all(void)
652{
653#if !defined(CONFIG_USER_ONLY)
654 memory_map_init();
655 io_mem_init();
656#endif
657}
658
pbrook9656f322008-07-01 20:01:19 +0000659#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
660
Juan Quintelae59fb372009-09-29 22:48:21 +0200661static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200662{
663 CPUState *env = opaque;
664
aurel323098dba2009-03-07 21:28:24 +0000665 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
666 version_id is increased. */
667 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000668 tlb_flush(env, 1);
669
670 return 0;
671}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200672
673static const VMStateDescription vmstate_cpu_common = {
674 .name = "cpu_common",
675 .version_id = 1,
676 .minimum_version_id = 1,
677 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200678 .post_load = cpu_common_post_load,
679 .fields = (VMStateField []) {
680 VMSTATE_UINT32(halted, CPUState),
681 VMSTATE_UINT32(interrupt_request, CPUState),
682 VMSTATE_END_OF_LIST()
683 }
684};
pbrook9656f322008-07-01 20:01:19 +0000685#endif
686
Glauber Costa950f1472009-06-09 12:15:18 -0400687CPUState *qemu_get_cpu(int cpu)
688{
689 CPUState *env = first_cpu;
690
691 while (env) {
692 if (env->cpu_index == cpu)
693 break;
694 env = env->next_cpu;
695 }
696
697 return env;
698}
699
bellard6a00d602005-11-21 23:25:50 +0000700void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000701{
bellard6a00d602005-11-21 23:25:50 +0000702 CPUState **penv;
703 int cpu_index;
704
pbrookc2764712009-03-07 15:24:59 +0000705#if defined(CONFIG_USER_ONLY)
706 cpu_list_lock();
707#endif
bellard6a00d602005-11-21 23:25:50 +0000708 env->next_cpu = NULL;
709 penv = &first_cpu;
710 cpu_index = 0;
711 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700712 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000713 cpu_index++;
714 }
715 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000716 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000717 QTAILQ_INIT(&env->breakpoints);
718 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100719#ifndef CONFIG_USER_ONLY
720 env->thread_id = qemu_get_thread_id();
721#endif
bellard6a00d602005-11-21 23:25:50 +0000722 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000723#if defined(CONFIG_USER_ONLY)
724 cpu_list_unlock();
725#endif
pbrookb3c77242008-06-30 16:31:04 +0000726#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600727 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
728 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000729 cpu_save, cpu_load, env);
730#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000731}
732
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100733/* Allocate a new translation block. Flush the translation buffer if
734 too many translation blocks or too much generated code. */
735static TranslationBlock *tb_alloc(target_ulong pc)
736{
737 TranslationBlock *tb;
738
739 if (nb_tbs >= code_gen_max_blocks ||
740 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
741 return NULL;
742 tb = &tbs[nb_tbs++];
743 tb->pc = pc;
744 tb->cflags = 0;
745 return tb;
746}
747
748void tb_free(TranslationBlock *tb)
749{
750 /* In practice this is mostly used for single use temporary TB
751 Ignore the hard cases and just back up if this TB happens to
752 be the last one generated. */
753 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
754 code_gen_ptr = tb->tc_ptr;
755 nb_tbs--;
756 }
757}
758
bellard9fa3e852004-01-04 18:06:42 +0000759static inline void invalidate_page_bitmap(PageDesc *p)
760{
761 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500762 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000763 p->code_bitmap = NULL;
764 }
765 p->code_write_count = 0;
766}
767
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800768/* Set to NULL all the 'first_tb' fields in all PageDescs. */
769
770static void page_flush_tb_1 (int level, void **lp)
771{
772 int i;
773
774 if (*lp == NULL) {
775 return;
776 }
777 if (level == 0) {
778 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000779 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800780 pd[i].first_tb = NULL;
781 invalidate_page_bitmap(pd + i);
782 }
783 } else {
784 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000785 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800786 page_flush_tb_1 (level - 1, pp + i);
787 }
788 }
789}
790
bellardfd6ce8f2003-05-14 19:00:11 +0000791static void page_flush_tb(void)
792{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800793 int i;
794 for (i = 0; i < V_L1_SIZE; i++) {
795 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000796 }
797}
798
799/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000800/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000801void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000802{
bellard6a00d602005-11-21 23:25:50 +0000803 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000804#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000805 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
806 (unsigned long)(code_gen_ptr - code_gen_buffer),
807 nb_tbs, nb_tbs > 0 ?
808 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000809#endif
bellard26a5f132008-05-28 12:30:31 +0000810 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000811 cpu_abort(env1, "Internal error: code buffer overflow\n");
812
bellardfd6ce8f2003-05-14 19:00:11 +0000813 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000814
bellard6a00d602005-11-21 23:25:50 +0000815 for(env = first_cpu; env != NULL; env = env->next_cpu) {
816 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
817 }
bellard9fa3e852004-01-04 18:06:42 +0000818
bellard8a8a6082004-10-03 13:36:49 +0000819 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000820 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000821
bellardfd6ce8f2003-05-14 19:00:11 +0000822 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000823 /* XXX: flush processor icache at this point if cache flush is
824 expensive */
bellarde3db7222005-01-26 22:00:47 +0000825 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000826}
827
828#ifdef DEBUG_TB_CHECK
829
j_mayerbc98a7e2007-04-04 07:55:12 +0000830static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000831{
832 TranslationBlock *tb;
833 int i;
834 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000835 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
836 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000837 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
838 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000839 printf("ERROR invalidate: address=" TARGET_FMT_lx
840 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000841 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000842 }
843 }
844 }
845}
846
847/* verify that all the pages have correct rights for code */
848static void tb_page_check(void)
849{
850 TranslationBlock *tb;
851 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000852
pbrook99773bd2006-04-16 15:14:59 +0000853 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
854 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000855 flags1 = page_get_flags(tb->pc);
856 flags2 = page_get_flags(tb->pc + tb->size - 1);
857 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
858 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000859 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000860 }
861 }
862 }
863}
864
865#endif
866
867/* invalidate one TB */
868static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
869 int next_offset)
870{
871 TranslationBlock *tb1;
872 for(;;) {
873 tb1 = *ptb;
874 if (tb1 == tb) {
875 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
876 break;
877 }
878 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
879 }
880}
881
bellard9fa3e852004-01-04 18:06:42 +0000882static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
883{
884 TranslationBlock *tb1;
885 unsigned int n1;
886
887 for(;;) {
888 tb1 = *ptb;
889 n1 = (long)tb1 & 3;
890 tb1 = (TranslationBlock *)((long)tb1 & ~3);
891 if (tb1 == tb) {
892 *ptb = tb1->page_next[n1];
893 break;
894 }
895 ptb = &tb1->page_next[n1];
896 }
897}
898
bellardd4e81642003-05-25 16:46:15 +0000899static inline void tb_jmp_remove(TranslationBlock *tb, int n)
900{
901 TranslationBlock *tb1, **ptb;
902 unsigned int n1;
903
904 ptb = &tb->jmp_next[n];
905 tb1 = *ptb;
906 if (tb1) {
907 /* find tb(n) in circular list */
908 for(;;) {
909 tb1 = *ptb;
910 n1 = (long)tb1 & 3;
911 tb1 = (TranslationBlock *)((long)tb1 & ~3);
912 if (n1 == n && tb1 == tb)
913 break;
914 if (n1 == 2) {
915 ptb = &tb1->jmp_first;
916 } else {
917 ptb = &tb1->jmp_next[n1];
918 }
919 }
920 /* now we can suppress tb(n) from the list */
921 *ptb = tb->jmp_next[n];
922
923 tb->jmp_next[n] = NULL;
924 }
925}
926
927/* reset the jump entry 'n' of a TB so that it is not chained to
928 another TB */
929static inline void tb_reset_jump(TranslationBlock *tb, int n)
930{
931 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
932}
933
Paul Brook41c1b1c2010-03-12 16:54:58 +0000934void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000935{
bellard6a00d602005-11-21 23:25:50 +0000936 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000937 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000938 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000939 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000940 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000941
bellard9fa3e852004-01-04 18:06:42 +0000942 /* remove the TB from the hash list */
943 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
944 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000945 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000946 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000947
bellard9fa3e852004-01-04 18:06:42 +0000948 /* remove the TB from the page list */
949 if (tb->page_addr[0] != page_addr) {
950 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
951 tb_page_remove(&p->first_tb, tb);
952 invalidate_page_bitmap(p);
953 }
954 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
955 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
956 tb_page_remove(&p->first_tb, tb);
957 invalidate_page_bitmap(p);
958 }
959
bellard8a40a182005-11-20 10:35:40 +0000960 tb_invalidated_flag = 1;
961
962 /* remove the TB from the hash list */
963 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000964 for(env = first_cpu; env != NULL; env = env->next_cpu) {
965 if (env->tb_jmp_cache[h] == tb)
966 env->tb_jmp_cache[h] = NULL;
967 }
bellard8a40a182005-11-20 10:35:40 +0000968
969 /* suppress this TB from the two jump lists */
970 tb_jmp_remove(tb, 0);
971 tb_jmp_remove(tb, 1);
972
973 /* suppress any remaining jumps to this TB */
974 tb1 = tb->jmp_first;
975 for(;;) {
976 n1 = (long)tb1 & 3;
977 if (n1 == 2)
978 break;
979 tb1 = (TranslationBlock *)((long)tb1 & ~3);
980 tb2 = tb1->jmp_next[n1];
981 tb_reset_jump(tb1, n1);
982 tb1->jmp_next[n1] = NULL;
983 tb1 = tb2;
984 }
985 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
986
bellarde3db7222005-01-26 22:00:47 +0000987 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000988}
989
990static inline void set_bits(uint8_t *tab, int start, int len)
991{
992 int end, mask, end1;
993
994 end = start + len;
995 tab += start >> 3;
996 mask = 0xff << (start & 7);
997 if ((start & ~7) == (end & ~7)) {
998 if (start < end) {
999 mask &= ~(0xff << (end & 7));
1000 *tab |= mask;
1001 }
1002 } else {
1003 *tab++ |= mask;
1004 start = (start + 8) & ~7;
1005 end1 = end & ~7;
1006 while (start < end1) {
1007 *tab++ = 0xff;
1008 start += 8;
1009 }
1010 if (start < end) {
1011 mask = ~(0xff << (end & 7));
1012 *tab |= mask;
1013 }
1014 }
1015}
1016
1017static void build_page_bitmap(PageDesc *p)
1018{
1019 int n, tb_start, tb_end;
1020 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001021
Anthony Liguori7267c092011-08-20 22:09:37 -05001022 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001023
1024 tb = p->first_tb;
1025 while (tb != NULL) {
1026 n = (long)tb & 3;
1027 tb = (TranslationBlock *)((long)tb & ~3);
1028 /* NOTE: this is subtle as a TB may span two physical pages */
1029 if (n == 0) {
1030 /* NOTE: tb_end may be after the end of the page, but
1031 it is not a problem */
1032 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1033 tb_end = tb_start + tb->size;
1034 if (tb_end > TARGET_PAGE_SIZE)
1035 tb_end = TARGET_PAGE_SIZE;
1036 } else {
1037 tb_start = 0;
1038 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1039 }
1040 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1041 tb = tb->page_next[n];
1042 }
1043}
1044
pbrook2e70f6e2008-06-29 01:03:05 +00001045TranslationBlock *tb_gen_code(CPUState *env,
1046 target_ulong pc, target_ulong cs_base,
1047 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001048{
1049 TranslationBlock *tb;
1050 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001051 tb_page_addr_t phys_pc, phys_page2;
1052 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001053 int code_gen_size;
1054
Paul Brook41c1b1c2010-03-12 16:54:58 +00001055 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001056 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001057 if (!tb) {
1058 /* flush must be done */
1059 tb_flush(env);
1060 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001061 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001062 /* Don't forget to invalidate previous TB info. */
1063 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001064 }
1065 tc_ptr = code_gen_ptr;
1066 tb->tc_ptr = tc_ptr;
1067 tb->cs_base = cs_base;
1068 tb->flags = flags;
1069 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001070 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001071 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001072
bellardd720b932004-04-25 17:57:43 +00001073 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001074 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001075 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001076 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001077 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001078 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001079 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001080 return tb;
bellardd720b932004-04-25 17:57:43 +00001081}
ths3b46e622007-09-17 08:09:54 +00001082
bellard9fa3e852004-01-04 18:06:42 +00001083/* invalidate all TBs which intersect with the target physical page
1084 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001085 the same physical page. 'is_cpu_write_access' should be true if called
1086 from a real cpu write access: the virtual CPU will exit the current
1087 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001088void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001089 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001090{
aliguori6b917542008-11-18 19:46:41 +00001091 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001092 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001093 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001094 PageDesc *p;
1095 int n;
1096#ifdef TARGET_HAS_PRECISE_SMC
1097 int current_tb_not_found = is_cpu_write_access;
1098 TranslationBlock *current_tb = NULL;
1099 int current_tb_modified = 0;
1100 target_ulong current_pc = 0;
1101 target_ulong current_cs_base = 0;
1102 int current_flags = 0;
1103#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001104
1105 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001106 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001107 return;
ths5fafdf22007-09-16 21:08:06 +00001108 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001109 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1110 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001111 /* build code bitmap */
1112 build_page_bitmap(p);
1113 }
1114
1115 /* we remove all the TBs in the range [start, end[ */
1116 /* XXX: see if in some cases it could be faster to invalidate all the code */
1117 tb = p->first_tb;
1118 while (tb != NULL) {
1119 n = (long)tb & 3;
1120 tb = (TranslationBlock *)((long)tb & ~3);
1121 tb_next = tb->page_next[n];
1122 /* NOTE: this is subtle as a TB may span two physical pages */
1123 if (n == 0) {
1124 /* NOTE: tb_end may be after the end of the page, but
1125 it is not a problem */
1126 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1127 tb_end = tb_start + tb->size;
1128 } else {
1129 tb_start = tb->page_addr[1];
1130 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1131 }
1132 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001133#ifdef TARGET_HAS_PRECISE_SMC
1134 if (current_tb_not_found) {
1135 current_tb_not_found = 0;
1136 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001137 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001138 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001139 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001140 }
1141 }
1142 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001143 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001144 /* If we are modifying the current TB, we must stop
1145 its execution. We could be more precise by checking
1146 that the modification is after the current PC, but it
1147 would require a specialized function to partially
1148 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001149
bellardd720b932004-04-25 17:57:43 +00001150 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001151 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001152 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1153 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001154 }
1155#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001156 /* we need to do that to handle the case where a signal
1157 occurs while doing tb_phys_invalidate() */
1158 saved_tb = NULL;
1159 if (env) {
1160 saved_tb = env->current_tb;
1161 env->current_tb = NULL;
1162 }
bellard9fa3e852004-01-04 18:06:42 +00001163 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001164 if (env) {
1165 env->current_tb = saved_tb;
1166 if (env->interrupt_request && env->current_tb)
1167 cpu_interrupt(env, env->interrupt_request);
1168 }
bellard9fa3e852004-01-04 18:06:42 +00001169 }
1170 tb = tb_next;
1171 }
1172#if !defined(CONFIG_USER_ONLY)
1173 /* if no code remaining, no need to continue to use slow writes */
1174 if (!p->first_tb) {
1175 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001176 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001177 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001178 }
1179 }
1180#endif
1181#ifdef TARGET_HAS_PRECISE_SMC
1182 if (current_tb_modified) {
1183 /* we generate a block containing just the instruction
1184 modifying the memory. It will ensure that it cannot modify
1185 itself */
bellardea1c1802004-06-14 18:56:36 +00001186 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001187 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001188 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001189 }
1190#endif
1191}
1192
1193/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001194static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001195{
1196 PageDesc *p;
1197 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001198#if 0
bellarda4193c82004-06-03 14:01:43 +00001199 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001200 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1201 cpu_single_env->mem_io_vaddr, len,
1202 cpu_single_env->eip,
1203 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001204 }
1205#endif
bellard9fa3e852004-01-04 18:06:42 +00001206 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001207 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001208 return;
1209 if (p->code_bitmap) {
1210 offset = start & ~TARGET_PAGE_MASK;
1211 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1212 if (b & ((1 << len) - 1))
1213 goto do_invalidate;
1214 } else {
1215 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001216 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001217 }
1218}
1219
bellard9fa3e852004-01-04 18:06:42 +00001220#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001221static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001222 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001223{
aliguori6b917542008-11-18 19:46:41 +00001224 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001225 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001226 int n;
bellardd720b932004-04-25 17:57:43 +00001227#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001228 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001229 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001230 int current_tb_modified = 0;
1231 target_ulong current_pc = 0;
1232 target_ulong current_cs_base = 0;
1233 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001234#endif
bellard9fa3e852004-01-04 18:06:42 +00001235
1236 addr &= TARGET_PAGE_MASK;
1237 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001238 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001239 return;
1240 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001241#ifdef TARGET_HAS_PRECISE_SMC
1242 if (tb && pc != 0) {
1243 current_tb = tb_find_pc(pc);
1244 }
1245#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001246 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001247 n = (long)tb & 3;
1248 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001249#ifdef TARGET_HAS_PRECISE_SMC
1250 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001251 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001252 /* If we are modifying the current TB, we must stop
1253 its execution. We could be more precise by checking
1254 that the modification is after the current PC, but it
1255 would require a specialized function to partially
1256 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001257
bellardd720b932004-04-25 17:57:43 +00001258 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001259 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001260 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1261 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001262 }
1263#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001264 tb_phys_invalidate(tb, addr);
1265 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001266 }
1267 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001268#ifdef TARGET_HAS_PRECISE_SMC
1269 if (current_tb_modified) {
1270 /* we generate a block containing just the instruction
1271 modifying the memory. It will ensure that it cannot modify
1272 itself */
bellardea1c1802004-06-14 18:56:36 +00001273 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001274 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001275 cpu_resume_from_signal(env, puc);
1276 }
1277#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001278}
bellard9fa3e852004-01-04 18:06:42 +00001279#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001280
1281/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001282static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001283 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001284{
1285 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001286#ifndef CONFIG_USER_ONLY
1287 bool page_already_protected;
1288#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001289
bellard9fa3e852004-01-04 18:06:42 +00001290 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001291 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001292 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001293#ifndef CONFIG_USER_ONLY
1294 page_already_protected = p->first_tb != NULL;
1295#endif
bellard9fa3e852004-01-04 18:06:42 +00001296 p->first_tb = (TranslationBlock *)((long)tb | n);
1297 invalidate_page_bitmap(p);
1298
bellard107db442004-06-22 18:48:46 +00001299#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001300
bellard9fa3e852004-01-04 18:06:42 +00001301#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001302 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001303 target_ulong addr;
1304 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001305 int prot;
1306
bellardfd6ce8f2003-05-14 19:00:11 +00001307 /* force the host page as non writable (writes will have a
1308 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001309 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001310 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001311 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1312 addr += TARGET_PAGE_SIZE) {
1313
1314 p2 = page_find (addr >> TARGET_PAGE_BITS);
1315 if (!p2)
1316 continue;
1317 prot |= p2->flags;
1318 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001319 }
ths5fafdf22007-09-16 21:08:06 +00001320 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001321 (prot & PAGE_BITS) & ~PAGE_WRITE);
1322#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001323 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001324 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001325#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001326 }
bellard9fa3e852004-01-04 18:06:42 +00001327#else
1328 /* if some code is already present, then the pages are already
1329 protected. So we handle the case where only the first TB is
1330 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001331 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001332 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001333 }
1334#endif
bellardd720b932004-04-25 17:57:43 +00001335
1336#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001337}
1338
bellard9fa3e852004-01-04 18:06:42 +00001339/* add a new TB and link it to the physical page tables. phys_page2 is
1340 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001341void tb_link_page(TranslationBlock *tb,
1342 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001343{
bellard9fa3e852004-01-04 18:06:42 +00001344 unsigned int h;
1345 TranslationBlock **ptb;
1346
pbrookc8a706f2008-06-02 16:16:42 +00001347 /* Grab the mmap lock to stop another thread invalidating this TB
1348 before we are done. */
1349 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001350 /* add in the physical hash table */
1351 h = tb_phys_hash_func(phys_pc);
1352 ptb = &tb_phys_hash[h];
1353 tb->phys_hash_next = *ptb;
1354 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001355
1356 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001357 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1358 if (phys_page2 != -1)
1359 tb_alloc_page(tb, 1, phys_page2);
1360 else
1361 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001362
bellardd4e81642003-05-25 16:46:15 +00001363 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1364 tb->jmp_next[0] = NULL;
1365 tb->jmp_next[1] = NULL;
1366
1367 /* init original jump addresses */
1368 if (tb->tb_next_offset[0] != 0xffff)
1369 tb_reset_jump(tb, 0);
1370 if (tb->tb_next_offset[1] != 0xffff)
1371 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001372
1373#ifdef DEBUG_TB_CHECK
1374 tb_page_check();
1375#endif
pbrookc8a706f2008-06-02 16:16:42 +00001376 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001377}
1378
bellarda513fe12003-05-27 23:29:48 +00001379/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1380 tb[1].tc_ptr. Return NULL if not found */
1381TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1382{
1383 int m_min, m_max, m;
1384 unsigned long v;
1385 TranslationBlock *tb;
1386
1387 if (nb_tbs <= 0)
1388 return NULL;
1389 if (tc_ptr < (unsigned long)code_gen_buffer ||
1390 tc_ptr >= (unsigned long)code_gen_ptr)
1391 return NULL;
1392 /* binary search (cf Knuth) */
1393 m_min = 0;
1394 m_max = nb_tbs - 1;
1395 while (m_min <= m_max) {
1396 m = (m_min + m_max) >> 1;
1397 tb = &tbs[m];
1398 v = (unsigned long)tb->tc_ptr;
1399 if (v == tc_ptr)
1400 return tb;
1401 else if (tc_ptr < v) {
1402 m_max = m - 1;
1403 } else {
1404 m_min = m + 1;
1405 }
ths5fafdf22007-09-16 21:08:06 +00001406 }
bellarda513fe12003-05-27 23:29:48 +00001407 return &tbs[m_max];
1408}
bellard75012672003-06-21 13:11:07 +00001409
bellardea041c02003-06-25 16:16:50 +00001410static void tb_reset_jump_recursive(TranslationBlock *tb);
1411
1412static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1413{
1414 TranslationBlock *tb1, *tb_next, **ptb;
1415 unsigned int n1;
1416
1417 tb1 = tb->jmp_next[n];
1418 if (tb1 != NULL) {
1419 /* find head of list */
1420 for(;;) {
1421 n1 = (long)tb1 & 3;
1422 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1423 if (n1 == 2)
1424 break;
1425 tb1 = tb1->jmp_next[n1];
1426 }
1427 /* we are now sure now that tb jumps to tb1 */
1428 tb_next = tb1;
1429
1430 /* remove tb from the jmp_first list */
1431 ptb = &tb_next->jmp_first;
1432 for(;;) {
1433 tb1 = *ptb;
1434 n1 = (long)tb1 & 3;
1435 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1436 if (n1 == n && tb1 == tb)
1437 break;
1438 ptb = &tb1->jmp_next[n1];
1439 }
1440 *ptb = tb->jmp_next[n];
1441 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001442
bellardea041c02003-06-25 16:16:50 +00001443 /* suppress the jump to next tb in generated code */
1444 tb_reset_jump(tb, n);
1445
bellard01243112004-01-04 15:48:17 +00001446 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001447 tb_reset_jump_recursive(tb_next);
1448 }
1449}
1450
1451static void tb_reset_jump_recursive(TranslationBlock *tb)
1452{
1453 tb_reset_jump_recursive2(tb, 0);
1454 tb_reset_jump_recursive2(tb, 1);
1455}
1456
bellard1fddef42005-04-17 19:16:13 +00001457#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001458#if defined(CONFIG_USER_ONLY)
1459static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1460{
1461 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1462}
1463#else
bellardd720b932004-04-25 17:57:43 +00001464static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1465{
Anthony Liguoric227f092009-10-01 16:12:16 -05001466 target_phys_addr_t addr;
Anthony Liguoric227f092009-10-01 16:12:16 -05001467 ram_addr_t ram_addr;
Avi Kivity06ef3522012-02-13 16:11:22 +02001468 MemoryRegionSection section;
bellardd720b932004-04-25 17:57:43 +00001469
pbrookc2f07f82006-04-08 17:14:56 +00001470 addr = cpu_get_phys_page_debug(env, pc);
Avi Kivity06ef3522012-02-13 16:11:22 +02001471 section = phys_page_find(addr >> TARGET_PAGE_BITS);
1472 if (!(memory_region_is_ram(section.mr)
1473 || (section.mr->rom_device && section.mr->readable))) {
1474 return;
1475 }
1476 ram_addr = (memory_region_get_ram_addr(section.mr)
1477 + section.offset_within_region) & TARGET_PAGE_MASK;
1478 ram_addr |= (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001479 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001480}
bellardc27004e2005-01-03 23:35:10 +00001481#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001482#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001483
Paul Brookc527ee82010-03-01 03:31:14 +00001484#if defined(CONFIG_USER_ONLY)
1485void cpu_watchpoint_remove_all(CPUState *env, int mask)
1486
1487{
1488}
1489
1490int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1491 int flags, CPUWatchpoint **watchpoint)
1492{
1493 return -ENOSYS;
1494}
1495#else
pbrook6658ffb2007-03-16 23:58:11 +00001496/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001497int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1498 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001499{
aliguorib4051332008-11-18 20:14:20 +00001500 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001501 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001502
aliguorib4051332008-11-18 20:14:20 +00001503 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1504 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1505 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1506 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1507 return -EINVAL;
1508 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001509 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001510
aliguoria1d1bb32008-11-18 20:07:32 +00001511 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001512 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001513 wp->flags = flags;
1514
aliguori2dc9f412008-11-18 20:56:59 +00001515 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001516 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001517 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001518 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001519 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001520
pbrook6658ffb2007-03-16 23:58:11 +00001521 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001522
1523 if (watchpoint)
1524 *watchpoint = wp;
1525 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001526}
1527
aliguoria1d1bb32008-11-18 20:07:32 +00001528/* Remove a specific watchpoint. */
1529int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1530 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001531{
aliguorib4051332008-11-18 20:14:20 +00001532 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001533 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001534
Blue Swirl72cf2d42009-09-12 07:36:22 +00001535 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001536 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001537 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001538 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001539 return 0;
1540 }
1541 }
aliguoria1d1bb32008-11-18 20:07:32 +00001542 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001543}
1544
aliguoria1d1bb32008-11-18 20:07:32 +00001545/* Remove a specific watchpoint by reference. */
1546void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1547{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001548 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001549
aliguoria1d1bb32008-11-18 20:07:32 +00001550 tlb_flush_page(env, watchpoint->vaddr);
1551
Anthony Liguori7267c092011-08-20 22:09:37 -05001552 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001553}
1554
aliguoria1d1bb32008-11-18 20:07:32 +00001555/* Remove all matching watchpoints. */
1556void cpu_watchpoint_remove_all(CPUState *env, int mask)
1557{
aliguoric0ce9982008-11-25 22:13:57 +00001558 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001559
Blue Swirl72cf2d42009-09-12 07:36:22 +00001560 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001561 if (wp->flags & mask)
1562 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001563 }
aliguoria1d1bb32008-11-18 20:07:32 +00001564}
Paul Brookc527ee82010-03-01 03:31:14 +00001565#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001566
1567/* Add a breakpoint. */
1568int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1569 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001570{
bellard1fddef42005-04-17 19:16:13 +00001571#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001572 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001573
Anthony Liguori7267c092011-08-20 22:09:37 -05001574 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001575
1576 bp->pc = pc;
1577 bp->flags = flags;
1578
aliguori2dc9f412008-11-18 20:56:59 +00001579 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001580 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001581 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001582 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001583 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001584
1585 breakpoint_invalidate(env, pc);
1586
1587 if (breakpoint)
1588 *breakpoint = bp;
1589 return 0;
1590#else
1591 return -ENOSYS;
1592#endif
1593}
1594
1595/* Remove a specific breakpoint. */
1596int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1597{
1598#if defined(TARGET_HAS_ICE)
1599 CPUBreakpoint *bp;
1600
Blue Swirl72cf2d42009-09-12 07:36:22 +00001601 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001602 if (bp->pc == pc && bp->flags == flags) {
1603 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001604 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001605 }
bellard4c3a88a2003-07-26 12:06:08 +00001606 }
aliguoria1d1bb32008-11-18 20:07:32 +00001607 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001608#else
aliguoria1d1bb32008-11-18 20:07:32 +00001609 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001610#endif
1611}
1612
aliguoria1d1bb32008-11-18 20:07:32 +00001613/* Remove a specific breakpoint by reference. */
1614void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001615{
bellard1fddef42005-04-17 19:16:13 +00001616#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001617 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001618
aliguoria1d1bb32008-11-18 20:07:32 +00001619 breakpoint_invalidate(env, breakpoint->pc);
1620
Anthony Liguori7267c092011-08-20 22:09:37 -05001621 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001622#endif
1623}
1624
1625/* Remove all matching breakpoints. */
1626void cpu_breakpoint_remove_all(CPUState *env, int mask)
1627{
1628#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001629 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001630
Blue Swirl72cf2d42009-09-12 07:36:22 +00001631 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001632 if (bp->flags & mask)
1633 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001634 }
bellard4c3a88a2003-07-26 12:06:08 +00001635#endif
1636}
1637
bellardc33a3462003-07-29 20:50:33 +00001638/* enable or disable single step mode. EXCP_DEBUG is returned by the
1639 CPU loop after each instruction */
1640void cpu_single_step(CPUState *env, int enabled)
1641{
bellard1fddef42005-04-17 19:16:13 +00001642#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001643 if (env->singlestep_enabled != enabled) {
1644 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001645 if (kvm_enabled())
1646 kvm_update_guest_debug(env, 0);
1647 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001648 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001649 /* XXX: only flush what is necessary */
1650 tb_flush(env);
1651 }
bellardc33a3462003-07-29 20:50:33 +00001652 }
1653#endif
1654}
1655
bellard34865132003-10-05 14:28:56 +00001656/* enable or disable low levels log */
1657void cpu_set_log(int log_flags)
1658{
1659 loglevel = log_flags;
1660 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001661 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001662 if (!logfile) {
1663 perror(logfilename);
1664 _exit(1);
1665 }
bellard9fa3e852004-01-04 18:06:42 +00001666#if !defined(CONFIG_SOFTMMU)
1667 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1668 {
blueswir1b55266b2008-09-20 08:07:15 +00001669 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001670 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1671 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001672#elif defined(_WIN32)
1673 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1674 setvbuf(logfile, NULL, _IONBF, 0);
1675#else
bellard34865132003-10-05 14:28:56 +00001676 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001677#endif
pbrooke735b912007-06-30 13:53:24 +00001678 log_append = 1;
1679 }
1680 if (!loglevel && logfile) {
1681 fclose(logfile);
1682 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001683 }
1684}
1685
1686void cpu_set_log_filename(const char *filename)
1687{
1688 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001689 if (logfile) {
1690 fclose(logfile);
1691 logfile = NULL;
1692 }
1693 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001694}
bellardc33a3462003-07-29 20:50:33 +00001695
aurel323098dba2009-03-07 21:28:24 +00001696static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001697{
pbrookd5975362008-06-07 20:50:51 +00001698 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1699 problem and hope the cpu will stop of its own accord. For userspace
1700 emulation this often isn't actually as bad as it sounds. Often
1701 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001702 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001703 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001704
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001705 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001706 tb = env->current_tb;
1707 /* if the cpu is currently executing code, we must unlink it and
1708 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001709 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001710 env->current_tb = NULL;
1711 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001712 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001713 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001714}
1715
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001716#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001717/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001718static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001719{
1720 int old_mask;
1721
1722 old_mask = env->interrupt_request;
1723 env->interrupt_request |= mask;
1724
aliguori8edac962009-04-24 18:03:45 +00001725 /*
1726 * If called from iothread context, wake the target cpu in
1727 * case its halted.
1728 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001729 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001730 qemu_cpu_kick(env);
1731 return;
1732 }
aliguori8edac962009-04-24 18:03:45 +00001733
pbrook2e70f6e2008-06-29 01:03:05 +00001734 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001735 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001736 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001737 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001738 cpu_abort(env, "Raised interrupt while not in I/O function");
1739 }
pbrook2e70f6e2008-06-29 01:03:05 +00001740 } else {
aurel323098dba2009-03-07 21:28:24 +00001741 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001742 }
1743}
1744
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001745CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1746
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001747#else /* CONFIG_USER_ONLY */
1748
1749void cpu_interrupt(CPUState *env, int mask)
1750{
1751 env->interrupt_request |= mask;
1752 cpu_unlink_tb(env);
1753}
1754#endif /* CONFIG_USER_ONLY */
1755
bellardb54ad042004-05-20 13:42:52 +00001756void cpu_reset_interrupt(CPUState *env, int mask)
1757{
1758 env->interrupt_request &= ~mask;
1759}
1760
aurel323098dba2009-03-07 21:28:24 +00001761void cpu_exit(CPUState *env)
1762{
1763 env->exit_request = 1;
1764 cpu_unlink_tb(env);
1765}
1766
blueswir1c7cd6a32008-10-02 18:27:46 +00001767const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001768 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001769 "show generated host assembly code for each compiled TB" },
1770 { CPU_LOG_TB_IN_ASM, "in_asm",
1771 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001772 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001773 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001774 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001775 "show micro ops "
1776#ifdef TARGET_I386
1777 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001778#endif
blueswir1e01a1152008-03-14 17:37:11 +00001779 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001780 { CPU_LOG_INT, "int",
1781 "show interrupts/exceptions in short format" },
1782 { CPU_LOG_EXEC, "exec",
1783 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001784 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001785 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001786#ifdef TARGET_I386
1787 { CPU_LOG_PCALL, "pcall",
1788 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001789 { CPU_LOG_RESET, "cpu_reset",
1790 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001791#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001792#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001793 { CPU_LOG_IOPORT, "ioport",
1794 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001795#endif
bellardf193c792004-03-21 17:06:25 +00001796 { 0, NULL, NULL },
1797};
1798
1799static int cmp1(const char *s1, int n, const char *s2)
1800{
1801 if (strlen(s2) != n)
1802 return 0;
1803 return memcmp(s1, s2, n) == 0;
1804}
ths3b46e622007-09-17 08:09:54 +00001805
bellardf193c792004-03-21 17:06:25 +00001806/* takes a comma separated list of log masks. Return 0 if error. */
1807int cpu_str_to_log_mask(const char *str)
1808{
blueswir1c7cd6a32008-10-02 18:27:46 +00001809 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001810 int mask;
1811 const char *p, *p1;
1812
1813 p = str;
1814 mask = 0;
1815 for(;;) {
1816 p1 = strchr(p, ',');
1817 if (!p1)
1818 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001819 if(cmp1(p,p1-p,"all")) {
1820 for(item = cpu_log_items; item->mask != 0; item++) {
1821 mask |= item->mask;
1822 }
1823 } else {
1824 for(item = cpu_log_items; item->mask != 0; item++) {
1825 if (cmp1(p, p1 - p, item->name))
1826 goto found;
1827 }
1828 return 0;
bellardf193c792004-03-21 17:06:25 +00001829 }
bellardf193c792004-03-21 17:06:25 +00001830 found:
1831 mask |= item->mask;
1832 if (*p1 != ',')
1833 break;
1834 p = p1 + 1;
1835 }
1836 return mask;
1837}
bellardea041c02003-06-25 16:16:50 +00001838
bellard75012672003-06-21 13:11:07 +00001839void cpu_abort(CPUState *env, const char *fmt, ...)
1840{
1841 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001842 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001843
1844 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001845 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001846 fprintf(stderr, "qemu: fatal: ");
1847 vfprintf(stderr, fmt, ap);
1848 fprintf(stderr, "\n");
1849#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001850 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1851#else
1852 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001853#endif
aliguori93fcfe32009-01-15 22:34:14 +00001854 if (qemu_log_enabled()) {
1855 qemu_log("qemu: fatal: ");
1856 qemu_log_vprintf(fmt, ap2);
1857 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001858#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001859 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001860#else
aliguori93fcfe32009-01-15 22:34:14 +00001861 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001862#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001863 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001864 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001865 }
pbrook493ae1f2007-11-23 16:53:59 +00001866 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001867 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001868#if defined(CONFIG_USER_ONLY)
1869 {
1870 struct sigaction act;
1871 sigfillset(&act.sa_mask);
1872 act.sa_handler = SIG_DFL;
1873 sigaction(SIGABRT, &act, NULL);
1874 }
1875#endif
bellard75012672003-06-21 13:11:07 +00001876 abort();
1877}
1878
thsc5be9f02007-02-28 20:20:53 +00001879CPUState *cpu_copy(CPUState *env)
1880{
ths01ba9812007-12-09 02:22:57 +00001881 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001882 CPUState *next_cpu = new_env->next_cpu;
1883 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001884#if defined(TARGET_HAS_ICE)
1885 CPUBreakpoint *bp;
1886 CPUWatchpoint *wp;
1887#endif
1888
thsc5be9f02007-02-28 20:20:53 +00001889 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001890
1891 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001892 new_env->next_cpu = next_cpu;
1893 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001894
1895 /* Clone all break/watchpoints.
1896 Note: Once we support ptrace with hw-debug register access, make sure
1897 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001898 QTAILQ_INIT(&env->breakpoints);
1899 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001900#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001901 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001902 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1903 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001904 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001905 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1906 wp->flags, NULL);
1907 }
1908#endif
1909
thsc5be9f02007-02-28 20:20:53 +00001910 return new_env;
1911}
1912
bellard01243112004-01-04 15:48:17 +00001913#if !defined(CONFIG_USER_ONLY)
1914
edgar_igl5c751e92008-05-06 08:44:21 +00001915static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1916{
1917 unsigned int i;
1918
1919 /* Discard jump cache entries for any tb which might potentially
1920 overlap the flushed page. */
1921 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1922 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001923 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001924
1925 i = tb_jmp_cache_hash_page(addr);
1926 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001927 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001928}
1929
Igor Kovalenko08738982009-07-12 02:15:40 +04001930static CPUTLBEntry s_cputlb_empty_entry = {
1931 .addr_read = -1,
1932 .addr_write = -1,
1933 .addr_code = -1,
1934 .addend = -1,
1935};
1936
Peter Maydell771124e2012-01-17 13:23:13 +00001937/* NOTE:
1938 * If flush_global is true (the usual case), flush all tlb entries.
1939 * If flush_global is false, flush (at least) all tlb entries not
1940 * marked global.
1941 *
1942 * Since QEMU doesn't currently implement a global/not-global flag
1943 * for tlb entries, at the moment tlb_flush() will also flush all
1944 * tlb entries in the flush_global == false case. This is OK because
1945 * CPU architectures generally permit an implementation to drop
1946 * entries from the TLB at any time, so flushing more entries than
1947 * required is only an efficiency issue, not a correctness issue.
1948 */
bellardee8b7022004-02-03 23:35:10 +00001949void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001950{
bellard33417e72003-08-10 21:47:01 +00001951 int i;
bellard01243112004-01-04 15:48:17 +00001952
bellard9fa3e852004-01-04 18:06:42 +00001953#if defined(DEBUG_TLB)
1954 printf("tlb_flush:\n");
1955#endif
bellard01243112004-01-04 15:48:17 +00001956 /* must reset current TB so that interrupts cannot modify the
1957 links while we are modifying them */
1958 env->current_tb = NULL;
1959
bellard33417e72003-08-10 21:47:01 +00001960 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001961 int mmu_idx;
1962 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001963 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001964 }
bellard33417e72003-08-10 21:47:01 +00001965 }
bellard9fa3e852004-01-04 18:06:42 +00001966
bellard8a40a182005-11-20 10:35:40 +00001967 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001968
Paul Brookd4c430a2010-03-17 02:14:28 +00001969 env->tlb_flush_addr = -1;
1970 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001971 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001972}
1973
bellard274da6b2004-05-20 21:56:27 +00001974static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001975{
ths5fafdf22007-09-16 21:08:06 +00001976 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001977 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001978 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001979 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001980 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001981 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001982 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001983 }
bellard61382a52003-10-27 21:22:23 +00001984}
1985
bellard2e126692004-04-25 21:28:44 +00001986void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001987{
bellard8a40a182005-11-20 10:35:40 +00001988 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001989 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001990
bellard9fa3e852004-01-04 18:06:42 +00001991#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001992 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001993#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001994 /* Check if we need to flush due to large pages. */
1995 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1996#if defined(DEBUG_TLB)
1997 printf("tlb_flush_page: forced full flush ("
1998 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1999 env->tlb_flush_addr, env->tlb_flush_mask);
2000#endif
2001 tlb_flush(env, 1);
2002 return;
2003 }
bellard01243112004-01-04 15:48:17 +00002004 /* must reset current TB so that interrupts cannot modify the
2005 links while we are modifying them */
2006 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002007
bellard61382a52003-10-27 21:22:23 +00002008 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002009 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002010 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2011 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002012
edgar_igl5c751e92008-05-06 08:44:21 +00002013 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002014}
2015
bellard9fa3e852004-01-04 18:06:42 +00002016/* update the TLBs so that writes to code in the virtual page 'addr'
2017 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002018static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002019{
ths5fafdf22007-09-16 21:08:06 +00002020 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002021 ram_addr + TARGET_PAGE_SIZE,
2022 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002023}
2024
bellard9fa3e852004-01-04 18:06:42 +00002025/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002026 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002027static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002028 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002029{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002030 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002031}
2032
ths5fafdf22007-09-16 21:08:06 +00002033static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002034 unsigned long start, unsigned long length)
2035{
2036 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002037 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00002038 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002039 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002040 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002041 }
2042 }
2043}
2044
pbrook5579c7f2009-04-11 14:47:08 +00002045/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002046void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002047 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002048{
2049 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002050 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002051 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002052
2053 start &= TARGET_PAGE_MASK;
2054 end = TARGET_PAGE_ALIGN(end);
2055
2056 length = end - start;
2057 if (length == 0)
2058 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002059 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002060
bellard1ccde1c2004-02-06 19:46:14 +00002061 /* we modify the TLB cache so that the dirty bit will be set again
2062 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002063 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002064 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002065 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002066 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002067 != (end - 1) - start) {
2068 abort();
2069 }
2070
bellard6a00d602005-11-21 23:25:50 +00002071 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002072 int mmu_idx;
2073 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2074 for(i = 0; i < CPU_TLB_SIZE; i++)
2075 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2076 start1, length);
2077 }
bellard6a00d602005-11-21 23:25:50 +00002078 }
bellard1ccde1c2004-02-06 19:46:14 +00002079}
2080
aliguori74576192008-10-06 14:02:03 +00002081int cpu_physical_memory_set_dirty_tracking(int enable)
2082{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002083 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002084 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002085 return ret;
aliguori74576192008-10-06 14:02:03 +00002086}
2087
bellard3a7d9292005-08-21 09:26:42 +00002088static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2089{
Anthony Liguoric227f092009-10-01 16:12:16 -05002090 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002091 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002092
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002093 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002094 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2095 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002096 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002097 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002098 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002099 }
2100 }
2101}
2102
2103/* update the TLB according to the current state of the dirty bits */
2104void cpu_tlb_update_dirty(CPUState *env)
2105{
2106 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002107 int mmu_idx;
2108 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2109 for(i = 0; i < CPU_TLB_SIZE; i++)
2110 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2111 }
bellard3a7d9292005-08-21 09:26:42 +00002112}
2113
pbrook0f459d12008-06-09 00:20:13 +00002114static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002115{
pbrook0f459d12008-06-09 00:20:13 +00002116 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2117 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002118}
2119
pbrook0f459d12008-06-09 00:20:13 +00002120/* update the TLB corresponding to virtual page vaddr
2121 so that it is no longer dirty */
2122static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002123{
bellard1ccde1c2004-02-06 19:46:14 +00002124 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002125 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002126
pbrook0f459d12008-06-09 00:20:13 +00002127 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002128 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002129 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2130 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002131}
2132
Paul Brookd4c430a2010-03-17 02:14:28 +00002133/* Our TLB does not support large pages, so remember the area covered by
2134 large pages and trigger a full TLB flush if these are invalidated. */
2135static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2136 target_ulong size)
2137{
2138 target_ulong mask = ~(size - 1);
2139
2140 if (env->tlb_flush_addr == (target_ulong)-1) {
2141 env->tlb_flush_addr = vaddr & mask;
2142 env->tlb_flush_mask = mask;
2143 return;
2144 }
2145 /* Extend the existing region to include the new page.
2146 This is a compromise between unnecessary flushes and the cost
2147 of maintaining a full variable size TLB. */
2148 mask &= env->tlb_flush_mask;
2149 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2150 mask <<= 1;
2151 }
2152 env->tlb_flush_addr &= mask;
2153 env->tlb_flush_mask = mask;
2154}
2155
Avi Kivity06ef3522012-02-13 16:11:22 +02002156static bool is_ram_rom(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002157{
Avi Kivity06ef3522012-02-13 16:11:22 +02002158 return memory_region_is_ram(s->mr);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002159}
2160
Avi Kivity06ef3522012-02-13 16:11:22 +02002161static bool is_romd(MemoryRegionSection *s)
Avi Kivity75c578d2012-01-02 15:40:52 +02002162{
Avi Kivity06ef3522012-02-13 16:11:22 +02002163 MemoryRegion *mr = s->mr;
Avi Kivity75c578d2012-01-02 15:40:52 +02002164
Avi Kivity75c578d2012-01-02 15:40:52 +02002165 return mr->rom_device && mr->readable;
2166}
2167
Avi Kivity06ef3522012-02-13 16:11:22 +02002168static bool is_ram_rom_romd(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002169{
Avi Kivity06ef3522012-02-13 16:11:22 +02002170 return is_ram_rom(s) || is_romd(s);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002171}
2172
Paul Brookd4c430a2010-03-17 02:14:28 +00002173/* Add a new TLB entry. At most one entry for a given virtual address
2174 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2175 supplied size is only used by tlb_flush_page. */
2176void tlb_set_page(CPUState *env, target_ulong vaddr,
2177 target_phys_addr_t paddr, int prot,
2178 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002179{
Avi Kivity06ef3522012-02-13 16:11:22 +02002180 MemoryRegionSection section;
bellard9fa3e852004-01-04 18:06:42 +00002181 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002182 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002183 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002184 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002185 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002186 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002187 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002188
Paul Brookd4c430a2010-03-17 02:14:28 +00002189 assert(size >= TARGET_PAGE_SIZE);
2190 if (size != TARGET_PAGE_SIZE) {
2191 tlb_add_large_page(env, vaddr, size);
2192 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002193 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002194#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002195 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2196 " prot=%x idx=%d pd=0x%08lx\n",
2197 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002198#endif
2199
pbrook0f459d12008-06-09 00:20:13 +00002200 address = vaddr;
Avi Kivity06ef3522012-02-13 16:11:22 +02002201 if (!is_ram_rom_romd(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002202 /* IO memory case (romd handled later) */
2203 address |= TLB_MMIO;
2204 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002205 if (is_ram_rom_romd(&section)) {
2206 addend = (unsigned long)(memory_region_get_ram_ptr(section.mr)
2207 + section.offset_within_region);
2208 } else {
2209 addend = 0;
2210 }
2211 if (is_ram_rom(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002212 /* Normal RAM. */
Avi Kivity06ef3522012-02-13 16:11:22 +02002213 iotlb = (memory_region_get_ram_addr(section.mr)
2214 + section.offset_within_region) & TARGET_PAGE_MASK;
2215 if (!section.readonly)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002216 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002217 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002218 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002219 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002220 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002221 It would be nice to pass an offset from the base address
2222 of that region. This would avoid having to special case RAM,
2223 and avoid full address decoding in every device.
2224 We can't use the high bits of pd for this because
2225 IO_MEM_ROMD uses these as a ram address. */
Avi Kivity06ef3522012-02-13 16:11:22 +02002226 iotlb = memory_region_get_ram_addr(section.mr) & ~TARGET_PAGE_MASK;
2227 iotlb += section.offset_within_region;
pbrook0f459d12008-06-09 00:20:13 +00002228 }
pbrook6658ffb2007-03-16 23:58:11 +00002229
pbrook0f459d12008-06-09 00:20:13 +00002230 code_address = address;
2231 /* Make accesses to pages with watchpoints go via the
2232 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002233 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002234 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002235 /* Avoid trapping reads of pages with a write breakpoint. */
2236 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivity1ec9b902012-01-02 12:47:48 +02002237 iotlb = io_mem_watch.ram_addr + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002238 address |= TLB_MMIO;
2239 break;
2240 }
pbrook6658ffb2007-03-16 23:58:11 +00002241 }
pbrook0f459d12008-06-09 00:20:13 +00002242 }
balrogd79acba2007-06-26 20:01:13 +00002243
pbrook0f459d12008-06-09 00:20:13 +00002244 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2245 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2246 te = &env->tlb_table[mmu_idx][index];
2247 te->addend = addend - vaddr;
2248 if (prot & PAGE_READ) {
2249 te->addr_read = address;
2250 } else {
2251 te->addr_read = -1;
2252 }
edgar_igl5c751e92008-05-06 08:44:21 +00002253
pbrook0f459d12008-06-09 00:20:13 +00002254 if (prot & PAGE_EXEC) {
2255 te->addr_code = code_address;
2256 } else {
2257 te->addr_code = -1;
2258 }
2259 if (prot & PAGE_WRITE) {
Avi Kivity06ef3522012-02-13 16:11:22 +02002260 if ((memory_region_is_ram(section.mr) && section.readonly)
2261 || is_romd(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002262 /* Write access calls the I/O callback. */
2263 te->addr_write = address | TLB_MMIO;
Avi Kivity06ef3522012-02-13 16:11:22 +02002264 } else if (memory_region_is_ram(section.mr)
2265 && !cpu_physical_memory_is_dirty(
2266 section.mr->ram_addr
2267 + section.offset_within_region)) {
pbrook0f459d12008-06-09 00:20:13 +00002268 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002269 } else {
pbrook0f459d12008-06-09 00:20:13 +00002270 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002271 }
pbrook0f459d12008-06-09 00:20:13 +00002272 } else {
2273 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002274 }
bellard9fa3e852004-01-04 18:06:42 +00002275}
2276
bellard01243112004-01-04 15:48:17 +00002277#else
2278
bellardee8b7022004-02-03 23:35:10 +00002279void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002280{
2281}
2282
bellard2e126692004-04-25 21:28:44 +00002283void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002284{
2285}
2286
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002287/*
2288 * Walks guest process memory "regions" one by one
2289 * and calls callback function 'fn' for each region.
2290 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002291
2292struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002293{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002294 walk_memory_regions_fn fn;
2295 void *priv;
2296 unsigned long start;
2297 int prot;
2298};
bellard9fa3e852004-01-04 18:06:42 +00002299
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002300static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002301 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002302{
2303 if (data->start != -1ul) {
2304 int rc = data->fn(data->priv, data->start, end, data->prot);
2305 if (rc != 0) {
2306 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002307 }
bellard33417e72003-08-10 21:47:01 +00002308 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002309
2310 data->start = (new_prot ? end : -1ul);
2311 data->prot = new_prot;
2312
2313 return 0;
2314}
2315
2316static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002317 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002318{
Paul Brookb480d9b2010-03-12 23:23:29 +00002319 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002320 int i, rc;
2321
2322 if (*lp == NULL) {
2323 return walk_memory_regions_end(data, base, 0);
2324 }
2325
2326 if (level == 0) {
2327 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002328 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002329 int prot = pd[i].flags;
2330
2331 pa = base | (i << TARGET_PAGE_BITS);
2332 if (prot != data->prot) {
2333 rc = walk_memory_regions_end(data, pa, prot);
2334 if (rc != 0) {
2335 return rc;
2336 }
2337 }
2338 }
2339 } else {
2340 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002341 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002342 pa = base | ((abi_ulong)i <<
2343 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002344 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2345 if (rc != 0) {
2346 return rc;
2347 }
2348 }
2349 }
2350
2351 return 0;
2352}
2353
2354int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2355{
2356 struct walk_memory_regions_data data;
2357 unsigned long i;
2358
2359 data.fn = fn;
2360 data.priv = priv;
2361 data.start = -1ul;
2362 data.prot = 0;
2363
2364 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002365 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002366 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2367 if (rc != 0) {
2368 return rc;
2369 }
2370 }
2371
2372 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002373}
2374
Paul Brookb480d9b2010-03-12 23:23:29 +00002375static int dump_region(void *priv, abi_ulong start,
2376 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002377{
2378 FILE *f = (FILE *)priv;
2379
Paul Brookb480d9b2010-03-12 23:23:29 +00002380 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2381 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002382 start, end, end - start,
2383 ((prot & PAGE_READ) ? 'r' : '-'),
2384 ((prot & PAGE_WRITE) ? 'w' : '-'),
2385 ((prot & PAGE_EXEC) ? 'x' : '-'));
2386
2387 return (0);
2388}
2389
2390/* dump memory mappings */
2391void page_dump(FILE *f)
2392{
2393 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2394 "start", "end", "size", "prot");
2395 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002396}
2397
pbrook53a59602006-03-25 19:31:22 +00002398int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002399{
bellard9fa3e852004-01-04 18:06:42 +00002400 PageDesc *p;
2401
2402 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002403 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002404 return 0;
2405 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002406}
2407
Richard Henderson376a7902010-03-10 15:57:04 -08002408/* Modify the flags of a page and invalidate the code if necessary.
2409 The flag PAGE_WRITE_ORG is positioned automatically depending
2410 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002411void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002412{
Richard Henderson376a7902010-03-10 15:57:04 -08002413 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002414
Richard Henderson376a7902010-03-10 15:57:04 -08002415 /* This function should never be called with addresses outside the
2416 guest address space. If this assert fires, it probably indicates
2417 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002418#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2419 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002420#endif
2421 assert(start < end);
2422
bellard9fa3e852004-01-04 18:06:42 +00002423 start = start & TARGET_PAGE_MASK;
2424 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002425
2426 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002427 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002428 }
2429
2430 for (addr = start, len = end - start;
2431 len != 0;
2432 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2433 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2434
2435 /* If the write protection bit is set, then we invalidate
2436 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002437 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002438 (flags & PAGE_WRITE) &&
2439 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002440 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002441 }
2442 p->flags = flags;
2443 }
bellard9fa3e852004-01-04 18:06:42 +00002444}
2445
ths3d97b402007-11-02 19:02:07 +00002446int page_check_range(target_ulong start, target_ulong len, int flags)
2447{
2448 PageDesc *p;
2449 target_ulong end;
2450 target_ulong addr;
2451
Richard Henderson376a7902010-03-10 15:57:04 -08002452 /* This function should never be called with addresses outside the
2453 guest address space. If this assert fires, it probably indicates
2454 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002455#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2456 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002457#endif
2458
Richard Henderson3e0650a2010-03-29 10:54:42 -07002459 if (len == 0) {
2460 return 0;
2461 }
Richard Henderson376a7902010-03-10 15:57:04 -08002462 if (start + len - 1 < start) {
2463 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002464 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002465 }
balrog55f280c2008-10-28 10:24:11 +00002466
ths3d97b402007-11-02 19:02:07 +00002467 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2468 start = start & TARGET_PAGE_MASK;
2469
Richard Henderson376a7902010-03-10 15:57:04 -08002470 for (addr = start, len = end - start;
2471 len != 0;
2472 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002473 p = page_find(addr >> TARGET_PAGE_BITS);
2474 if( !p )
2475 return -1;
2476 if( !(p->flags & PAGE_VALID) )
2477 return -1;
2478
bellarddae32702007-11-14 10:51:00 +00002479 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002480 return -1;
bellarddae32702007-11-14 10:51:00 +00002481 if (flags & PAGE_WRITE) {
2482 if (!(p->flags & PAGE_WRITE_ORG))
2483 return -1;
2484 /* unprotect the page if it was put read-only because it
2485 contains translated code */
2486 if (!(p->flags & PAGE_WRITE)) {
2487 if (!page_unprotect(addr, 0, NULL))
2488 return -1;
2489 }
2490 return 0;
2491 }
ths3d97b402007-11-02 19:02:07 +00002492 }
2493 return 0;
2494}
2495
bellard9fa3e852004-01-04 18:06:42 +00002496/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002497 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002498int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002499{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002500 unsigned int prot;
2501 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002502 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002503
pbrookc8a706f2008-06-02 16:16:42 +00002504 /* Technically this isn't safe inside a signal handler. However we
2505 know this only ever happens in a synchronous SEGV handler, so in
2506 practice it seems to be ok. */
2507 mmap_lock();
2508
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002509 p = page_find(address >> TARGET_PAGE_BITS);
2510 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002511 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002512 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002513 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002514
bellard9fa3e852004-01-04 18:06:42 +00002515 /* if the page was really writable, then we change its
2516 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002517 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2518 host_start = address & qemu_host_page_mask;
2519 host_end = host_start + qemu_host_page_size;
2520
2521 prot = 0;
2522 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2523 p = page_find(addr >> TARGET_PAGE_BITS);
2524 p->flags |= PAGE_WRITE;
2525 prot |= p->flags;
2526
bellard9fa3e852004-01-04 18:06:42 +00002527 /* and since the content will be modified, we must invalidate
2528 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002529 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002530#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002531 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002532#endif
bellard9fa3e852004-01-04 18:06:42 +00002533 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002534 mprotect((void *)g2h(host_start), qemu_host_page_size,
2535 prot & PAGE_BITS);
2536
2537 mmap_unlock();
2538 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002539 }
pbrookc8a706f2008-06-02 16:16:42 +00002540 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002541 return 0;
2542}
2543
bellard6a00d602005-11-21 23:25:50 +00002544static inline void tlb_set_dirty(CPUState *env,
2545 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002546{
2547}
bellard9fa3e852004-01-04 18:06:42 +00002548#endif /* defined(CONFIG_USER_ONLY) */
2549
pbrooke2eef172008-06-08 01:09:01 +00002550#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002551
Paul Brookc04b2b72010-03-01 03:31:14 +00002552#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2553typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002554 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002555 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002556 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002557} subpage_t;
2558
Anthony Liguoric227f092009-10-01 16:12:16 -05002559static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002560 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002561static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002562static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002563{
Avi Kivity5312bd82012-02-12 18:32:55 +02002564 MemoryRegionSection *section = &phys_sections[section_index];
2565 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002566
2567 if (mr->subpage) {
2568 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2569 memory_region_destroy(&subpage->iomem);
2570 g_free(subpage);
2571 }
2572}
2573
Avi Kivity4346ae32012-02-10 17:00:01 +02002574static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002575{
2576 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002577 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002578
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002579 if (lp->u.node == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002580 return;
2581 }
2582
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002583 p = phys_map_nodes[lp->u.node];
Avi Kivity4346ae32012-02-10 17:00:01 +02002584 for (i = 0; i < L2_SIZE; ++i) {
2585 if (level > 0) {
Avi Kivity54688b12012-02-09 17:34:32 +02002586 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002587 } else {
2588 destroy_page_desc(p[i].u.leaf);
Avi Kivity54688b12012-02-09 17:34:32 +02002589 }
Avi Kivity54688b12012-02-09 17:34:32 +02002590 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002591 lp->u.node = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002592}
2593
2594static void destroy_all_mappings(void)
2595{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002596 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002597 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002598}
2599
Avi Kivity5312bd82012-02-12 18:32:55 +02002600static uint16_t phys_section_add(MemoryRegionSection *section)
2601{
2602 if (phys_sections_nb == phys_sections_nb_alloc) {
2603 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2604 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2605 phys_sections_nb_alloc);
2606 }
2607 phys_sections[phys_sections_nb] = *section;
2608 return phys_sections_nb++;
2609}
2610
2611static void phys_sections_clear(void)
2612{
2613 phys_sections_nb = 0;
2614}
2615
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002616/* register physical memory.
2617 For RAM, 'size' must be a multiple of the target page size.
2618 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002619 io memory page. The address used when calling the IO function is
2620 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002621 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002622 before calculating this offset. This should not be a problem unless
2623 the low bits of start_addr and region_offset differ. */
Avi Kivity0f0cb162012-02-13 17:14:32 +02002624static void register_subpage(MemoryRegionSection *section)
2625{
2626 subpage_t *subpage;
2627 target_phys_addr_t base = section->offset_within_address_space
2628 & TARGET_PAGE_MASK;
2629 MemoryRegionSection existing = phys_page_find(base >> TARGET_PAGE_BITS);
2630 MemoryRegionSection subsection = {
2631 .offset_within_address_space = base,
2632 .size = TARGET_PAGE_SIZE,
2633 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002634 target_phys_addr_t start, end;
2635
2636 assert(existing.mr->subpage || existing.mr == &io_mem_unassigned);
2637
2638 if (!(existing.mr->subpage)) {
2639 subpage = subpage_init(base);
2640 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002641 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2642 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002643 } else {
2644 subpage = container_of(existing.mr, subpage_t, iomem);
2645 }
2646 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2647 end = start + section->size;
2648 subpage_register(subpage, start, end, phys_section_add(section));
2649}
2650
2651
2652static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002653{
Avi Kivitydd811242012-01-02 12:17:03 +02002654 target_phys_addr_t start_addr = section->offset_within_address_space;
2655 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002656 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002657 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002658
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002659 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002660
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002661 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002662 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2663 section_index);
bellard33417e72003-08-10 21:47:01 +00002664}
2665
Avi Kivity0f0cb162012-02-13 17:14:32 +02002666void cpu_register_physical_memory_log(MemoryRegionSection *section,
2667 bool readonly)
2668{
2669 MemoryRegionSection now = *section, remain = *section;
2670
2671 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2672 || (now.size < TARGET_PAGE_SIZE)) {
2673 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2674 - now.offset_within_address_space,
2675 now.size);
2676 register_subpage(&now);
2677 remain.size -= now.size;
2678 remain.offset_within_address_space += now.size;
2679 remain.offset_within_region += now.size;
2680 }
2681 now = remain;
2682 now.size &= TARGET_PAGE_MASK;
2683 if (now.size) {
2684 register_multipage(&now);
2685 remain.size -= now.size;
2686 remain.offset_within_address_space += now.size;
2687 remain.offset_within_region += now.size;
2688 }
2689 now = remain;
2690 if (now.size) {
2691 register_subpage(&now);
2692 }
2693}
2694
2695
Anthony Liguoric227f092009-10-01 16:12:16 -05002696void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002697{
2698 if (kvm_enabled())
2699 kvm_coalesce_mmio_region(addr, size);
2700}
2701
Anthony Liguoric227f092009-10-01 16:12:16 -05002702void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002703{
2704 if (kvm_enabled())
2705 kvm_uncoalesce_mmio_region(addr, size);
2706}
2707
Sheng Yang62a27442010-01-26 19:21:16 +08002708void qemu_flush_coalesced_mmio_buffer(void)
2709{
2710 if (kvm_enabled())
2711 kvm_flush_coalesced_mmio_buffer();
2712}
2713
Marcelo Tosattic9027602010-03-01 20:25:08 -03002714#if defined(__linux__) && !defined(TARGET_S390X)
2715
2716#include <sys/vfs.h>
2717
2718#define HUGETLBFS_MAGIC 0x958458f6
2719
2720static long gethugepagesize(const char *path)
2721{
2722 struct statfs fs;
2723 int ret;
2724
2725 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002726 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002727 } while (ret != 0 && errno == EINTR);
2728
2729 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002730 perror(path);
2731 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002732 }
2733
2734 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002735 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002736
2737 return fs.f_bsize;
2738}
2739
Alex Williamson04b16652010-07-02 11:13:17 -06002740static void *file_ram_alloc(RAMBlock *block,
2741 ram_addr_t memory,
2742 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002743{
2744 char *filename;
2745 void *area;
2746 int fd;
2747#ifdef MAP_POPULATE
2748 int flags;
2749#endif
2750 unsigned long hpagesize;
2751
2752 hpagesize = gethugepagesize(path);
2753 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002754 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002755 }
2756
2757 if (memory < hpagesize) {
2758 return NULL;
2759 }
2760
2761 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2762 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2763 return NULL;
2764 }
2765
2766 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002767 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002768 }
2769
2770 fd = mkstemp(filename);
2771 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002772 perror("unable to create backing store for hugepages");
2773 free(filename);
2774 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002775 }
2776 unlink(filename);
2777 free(filename);
2778
2779 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2780
2781 /*
2782 * ftruncate is not supported by hugetlbfs in older
2783 * hosts, so don't bother bailing out on errors.
2784 * If anything goes wrong with it under other filesystems,
2785 * mmap will fail.
2786 */
2787 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002788 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002789
2790#ifdef MAP_POPULATE
2791 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2792 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2793 * to sidestep this quirk.
2794 */
2795 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2796 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2797#else
2798 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2799#endif
2800 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002801 perror("file_ram_alloc: can't mmap RAM pages");
2802 close(fd);
2803 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002804 }
Alex Williamson04b16652010-07-02 11:13:17 -06002805 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002806 return area;
2807}
2808#endif
2809
Alex Williamsond17b5282010-06-25 11:08:38 -06002810static ram_addr_t find_ram_offset(ram_addr_t size)
2811{
Alex Williamson04b16652010-07-02 11:13:17 -06002812 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002813 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002814
2815 if (QLIST_EMPTY(&ram_list.blocks))
2816 return 0;
2817
2818 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002819 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002820
2821 end = block->offset + block->length;
2822
2823 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2824 if (next_block->offset >= end) {
2825 next = MIN(next, next_block->offset);
2826 }
2827 }
2828 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002829 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002830 mingap = next - end;
2831 }
2832 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002833
2834 if (offset == RAM_ADDR_MAX) {
2835 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2836 (uint64_t)size);
2837 abort();
2838 }
2839
Alex Williamson04b16652010-07-02 11:13:17 -06002840 return offset;
2841}
2842
2843static ram_addr_t last_ram_offset(void)
2844{
Alex Williamsond17b5282010-06-25 11:08:38 -06002845 RAMBlock *block;
2846 ram_addr_t last = 0;
2847
2848 QLIST_FOREACH(block, &ram_list.blocks, next)
2849 last = MAX(last, block->offset + block->length);
2850
2851 return last;
2852}
2853
Avi Kivityc5705a72011-12-20 15:59:12 +02002854void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002855{
2856 RAMBlock *new_block, *block;
2857
Avi Kivityc5705a72011-12-20 15:59:12 +02002858 new_block = NULL;
2859 QLIST_FOREACH(block, &ram_list.blocks, next) {
2860 if (block->offset == addr) {
2861 new_block = block;
2862 break;
2863 }
2864 }
2865 assert(new_block);
2866 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002867
2868 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2869 char *id = dev->parent_bus->info->get_dev_path(dev);
2870 if (id) {
2871 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002872 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002873 }
2874 }
2875 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2876
2877 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002878 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002879 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2880 new_block->idstr);
2881 abort();
2882 }
2883 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002884}
2885
2886ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2887 MemoryRegion *mr)
2888{
2889 RAMBlock *new_block;
2890
2891 size = TARGET_PAGE_ALIGN(size);
2892 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002893
Avi Kivity7c637362011-12-21 13:09:49 +02002894 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002895 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002896 if (host) {
2897 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002898 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002899 } else {
2900 if (mem_path) {
2901#if defined (__linux__) && !defined(TARGET_S390X)
2902 new_block->host = file_ram_alloc(new_block, size, mem_path);
2903 if (!new_block->host) {
2904 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002905 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002906 }
2907#else
2908 fprintf(stderr, "-mem-path option unsupported\n");
2909 exit(1);
2910#endif
2911 } else {
2912#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002913 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2914 an system defined value, which is at least 256GB. Larger systems
2915 have larger values. We put the guest between the end of data
2916 segment (system break) and this value. We use 32GB as a base to
2917 have enough room for the system break to grow. */
2918 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002919 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002920 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002921 if (new_block->host == MAP_FAILED) {
2922 fprintf(stderr, "Allocating RAM failed\n");
2923 abort();
2924 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002925#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002926 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002927 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002928 } else {
2929 new_block->host = qemu_vmalloc(size);
2930 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002931#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002932 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002933 }
2934 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002935 new_block->length = size;
2936
2937 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2938
Anthony Liguori7267c092011-08-20 22:09:37 -05002939 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002940 last_ram_offset() >> TARGET_PAGE_BITS);
2941 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2942 0xff, size >> TARGET_PAGE_BITS);
2943
2944 if (kvm_enabled())
2945 kvm_setup_guest_memory(new_block->host, size);
2946
2947 return new_block->offset;
2948}
2949
Avi Kivityc5705a72011-12-20 15:59:12 +02002950ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002951{
Avi Kivityc5705a72011-12-20 15:59:12 +02002952 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002953}
bellarde9a1ab12007-02-08 23:08:38 +00002954
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002955void qemu_ram_free_from_ptr(ram_addr_t addr)
2956{
2957 RAMBlock *block;
2958
2959 QLIST_FOREACH(block, &ram_list.blocks, next) {
2960 if (addr == block->offset) {
2961 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002962 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002963 return;
2964 }
2965 }
2966}
2967
Anthony Liguoric227f092009-10-01 16:12:16 -05002968void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002969{
Alex Williamson04b16652010-07-02 11:13:17 -06002970 RAMBlock *block;
2971
2972 QLIST_FOREACH(block, &ram_list.blocks, next) {
2973 if (addr == block->offset) {
2974 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002975 if (block->flags & RAM_PREALLOC_MASK) {
2976 ;
2977 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002978#if defined (__linux__) && !defined(TARGET_S390X)
2979 if (block->fd) {
2980 munmap(block->host, block->length);
2981 close(block->fd);
2982 } else {
2983 qemu_vfree(block->host);
2984 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002985#else
2986 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002987#endif
2988 } else {
2989#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2990 munmap(block->host, block->length);
2991#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002992 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002993 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002994 } else {
2995 qemu_vfree(block->host);
2996 }
Alex Williamson04b16652010-07-02 11:13:17 -06002997#endif
2998 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002999 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06003000 return;
3001 }
3002 }
3003
bellarde9a1ab12007-02-08 23:08:38 +00003004}
3005
Huang Yingcd19cfa2011-03-02 08:56:19 +01003006#ifndef _WIN32
3007void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3008{
3009 RAMBlock *block;
3010 ram_addr_t offset;
3011 int flags;
3012 void *area, *vaddr;
3013
3014 QLIST_FOREACH(block, &ram_list.blocks, next) {
3015 offset = addr - block->offset;
3016 if (offset < block->length) {
3017 vaddr = block->host + offset;
3018 if (block->flags & RAM_PREALLOC_MASK) {
3019 ;
3020 } else {
3021 flags = MAP_FIXED;
3022 munmap(vaddr, length);
3023 if (mem_path) {
3024#if defined(__linux__) && !defined(TARGET_S390X)
3025 if (block->fd) {
3026#ifdef MAP_POPULATE
3027 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3028 MAP_PRIVATE;
3029#else
3030 flags |= MAP_PRIVATE;
3031#endif
3032 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3033 flags, block->fd, offset);
3034 } else {
3035 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3036 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3037 flags, -1, 0);
3038 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003039#else
3040 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003041#endif
3042 } else {
3043#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3044 flags |= MAP_SHARED | MAP_ANONYMOUS;
3045 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3046 flags, -1, 0);
3047#else
3048 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3049 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3050 flags, -1, 0);
3051#endif
3052 }
3053 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003054 fprintf(stderr, "Could not remap addr: "
3055 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003056 length, addr);
3057 exit(1);
3058 }
3059 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3060 }
3061 return;
3062 }
3063 }
3064}
3065#endif /* !_WIN32 */
3066
pbrookdc828ca2009-04-09 22:21:07 +00003067/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003068 With the exception of the softmmu code in this file, this should
3069 only be used for local memory (e.g. video ram) that the device owns,
3070 and knows it isn't going to access beyond the end of the block.
3071
3072 It should not be used for general purpose DMA.
3073 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3074 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003075void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003076{
pbrook94a6b542009-04-11 17:15:54 +00003077 RAMBlock *block;
3078
Alex Williamsonf471a172010-06-11 11:11:42 -06003079 QLIST_FOREACH(block, &ram_list.blocks, next) {
3080 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003081 /* Move this entry to to start of the list. */
3082 if (block != QLIST_FIRST(&ram_list.blocks)) {
3083 QLIST_REMOVE(block, next);
3084 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3085 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003086 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003087 /* We need to check if the requested address is in the RAM
3088 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003089 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003090 */
3091 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003092 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003093 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003094 block->host =
3095 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003096 }
3097 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003098 return block->host + (addr - block->offset);
3099 }
pbrook94a6b542009-04-11 17:15:54 +00003100 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003101
3102 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3103 abort();
3104
3105 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003106}
3107
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003108/* Return a host pointer to ram allocated with qemu_ram_alloc.
3109 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3110 */
3111void *qemu_safe_ram_ptr(ram_addr_t addr)
3112{
3113 RAMBlock *block;
3114
3115 QLIST_FOREACH(block, &ram_list.blocks, next) {
3116 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003117 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003118 /* We need to check if the requested address is in the RAM
3119 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003120 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003121 */
3122 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003123 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003124 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003125 block->host =
3126 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003127 }
3128 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003129 return block->host + (addr - block->offset);
3130 }
3131 }
3132
3133 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3134 abort();
3135
3136 return NULL;
3137}
3138
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003139/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3140 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003141void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003142{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003143 if (*size == 0) {
3144 return NULL;
3145 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003146 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003147 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003148 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003149 RAMBlock *block;
3150
3151 QLIST_FOREACH(block, &ram_list.blocks, next) {
3152 if (addr - block->offset < block->length) {
3153 if (addr - block->offset + *size > block->length)
3154 *size = block->length - addr + block->offset;
3155 return block->host + (addr - block->offset);
3156 }
3157 }
3158
3159 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3160 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003161 }
3162}
3163
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003164void qemu_put_ram_ptr(void *addr)
3165{
3166 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003167}
3168
Marcelo Tosattie8902612010-10-11 15:31:19 -03003169int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003170{
pbrook94a6b542009-04-11 17:15:54 +00003171 RAMBlock *block;
3172 uint8_t *host = ptr;
3173
Jan Kiszka868bb332011-06-21 22:59:09 +02003174 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003175 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003176 return 0;
3177 }
3178
Alex Williamsonf471a172010-06-11 11:11:42 -06003179 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003180 /* This case append when the block is not mapped. */
3181 if (block->host == NULL) {
3182 continue;
3183 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003184 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003185 *ram_addr = block->offset + (host - block->host);
3186 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003187 }
pbrook94a6b542009-04-11 17:15:54 +00003188 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003189
Marcelo Tosattie8902612010-10-11 15:31:19 -03003190 return -1;
3191}
Alex Williamsonf471a172010-06-11 11:11:42 -06003192
Marcelo Tosattie8902612010-10-11 15:31:19 -03003193/* Some of the softmmu routines need to translate from a host pointer
3194 (typically a TLB entry) back to a ram offset. */
3195ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3196{
3197 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003198
Marcelo Tosattie8902612010-10-11 15:31:19 -03003199 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3200 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3201 abort();
3202 }
3203 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003204}
3205
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003206static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3207 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003208{
pbrook67d3b952006-12-18 05:03:52 +00003209#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003210 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003211#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003212#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003213 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003214#endif
3215 return 0;
3216}
3217
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003218static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3219 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003220{
3221#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003222 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003223#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003224#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003225 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003226#endif
3227}
3228
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003229static const MemoryRegionOps unassigned_mem_ops = {
3230 .read = unassigned_mem_read,
3231 .write = unassigned_mem_write,
3232 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003233};
3234
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003235static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3236 unsigned size)
3237{
3238 abort();
3239}
3240
3241static void error_mem_write(void *opaque, target_phys_addr_t addr,
3242 uint64_t value, unsigned size)
3243{
3244 abort();
3245}
3246
3247static const MemoryRegionOps error_mem_ops = {
3248 .read = error_mem_read,
3249 .write = error_mem_write,
3250 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003251};
3252
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003253static const MemoryRegionOps rom_mem_ops = {
3254 .read = error_mem_read,
3255 .write = unassigned_mem_write,
3256 .endianness = DEVICE_NATIVE_ENDIAN,
3257};
3258
3259static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3260 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003261{
bellard3a7d9292005-08-21 09:26:42 +00003262 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003263 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003264 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3265#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003266 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003267 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003268#endif
3269 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003270 switch (size) {
3271 case 1:
3272 stb_p(qemu_get_ram_ptr(ram_addr), val);
3273 break;
3274 case 2:
3275 stw_p(qemu_get_ram_ptr(ram_addr), val);
3276 break;
3277 case 4:
3278 stl_p(qemu_get_ram_ptr(ram_addr), val);
3279 break;
3280 default:
3281 abort();
3282 }
bellardf23db162005-08-21 19:12:28 +00003283 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003284 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003285 /* we remove the notdirty callback only if the code has been
3286 flushed */
3287 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003288 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003289}
3290
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003291static const MemoryRegionOps notdirty_mem_ops = {
3292 .read = error_mem_read,
3293 .write = notdirty_mem_write,
3294 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003295};
3296
pbrook0f459d12008-06-09 00:20:13 +00003297/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003298static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003299{
3300 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003301 target_ulong pc, cs_base;
3302 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003303 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003304 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003305 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003306
aliguori06d55cc2008-11-18 20:24:06 +00003307 if (env->watchpoint_hit) {
3308 /* We re-entered the check after replacing the TB. Now raise
3309 * the debug interrupt so that is will trigger after the
3310 * current instruction. */
3311 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3312 return;
3313 }
pbrook2e70f6e2008-06-29 01:03:05 +00003314 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003315 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003316 if ((vaddr == (wp->vaddr & len_mask) ||
3317 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003318 wp->flags |= BP_WATCHPOINT_HIT;
3319 if (!env->watchpoint_hit) {
3320 env->watchpoint_hit = wp;
3321 tb = tb_find_pc(env->mem_io_pc);
3322 if (!tb) {
3323 cpu_abort(env, "check_watchpoint: could not find TB for "
3324 "pc=%p", (void *)env->mem_io_pc);
3325 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003326 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003327 tb_phys_invalidate(tb, -1);
3328 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3329 env->exception_index = EXCP_DEBUG;
3330 } else {
3331 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3332 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3333 }
3334 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003335 }
aliguori6e140f22008-11-18 20:37:55 +00003336 } else {
3337 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003338 }
3339 }
3340}
3341
pbrook6658ffb2007-03-16 23:58:11 +00003342/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3343 so these check for a hit then pass through to the normal out-of-line
3344 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003345static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3346 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003347{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003348 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3349 switch (size) {
3350 case 1: return ldub_phys(addr);
3351 case 2: return lduw_phys(addr);
3352 case 4: return ldl_phys(addr);
3353 default: abort();
3354 }
pbrook6658ffb2007-03-16 23:58:11 +00003355}
3356
Avi Kivity1ec9b902012-01-02 12:47:48 +02003357static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3358 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003359{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003360 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3361 switch (size) {
3362 case 1: stb_phys(addr, val);
3363 case 2: stw_phys(addr, val);
3364 case 4: stl_phys(addr, val);
3365 default: abort();
3366 }
pbrook6658ffb2007-03-16 23:58:11 +00003367}
3368
Avi Kivity1ec9b902012-01-02 12:47:48 +02003369static const MemoryRegionOps watch_mem_ops = {
3370 .read = watch_mem_read,
3371 .write = watch_mem_write,
3372 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003373};
pbrook6658ffb2007-03-16 23:58:11 +00003374
Avi Kivity70c68e42012-01-02 12:32:48 +02003375static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3376 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003377{
Avi Kivity70c68e42012-01-02 12:32:48 +02003378 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003379 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003380 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003381#if defined(DEBUG_SUBPAGE)
3382 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3383 mmio, len, addr, idx);
3384#endif
blueswir1db7b5422007-05-26 17:36:03 +00003385
Avi Kivity5312bd82012-02-12 18:32:55 +02003386 section = &phys_sections[mmio->sub_section[idx]];
3387 addr += mmio->base;
3388 addr -= section->offset_within_address_space;
3389 addr += section->offset_within_region;
3390 return io_mem_read(section->mr->ram_addr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003391}
3392
Avi Kivity70c68e42012-01-02 12:32:48 +02003393static void subpage_write(void *opaque, target_phys_addr_t addr,
3394 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003395{
Avi Kivity70c68e42012-01-02 12:32:48 +02003396 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003397 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003398 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003399#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003400 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3401 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003402 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003403#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003404
Avi Kivity5312bd82012-02-12 18:32:55 +02003405 section = &phys_sections[mmio->sub_section[idx]];
3406 addr += mmio->base;
3407 addr -= section->offset_within_address_space;
3408 addr += section->offset_within_region;
3409 io_mem_write(section->mr->ram_addr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003410}
3411
Avi Kivity70c68e42012-01-02 12:32:48 +02003412static const MemoryRegionOps subpage_ops = {
3413 .read = subpage_read,
3414 .write = subpage_write,
3415 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003416};
3417
Avi Kivityde712f92012-01-02 12:41:07 +02003418static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3419 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003420{
3421 ram_addr_t raddr = addr;
3422 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003423 switch (size) {
3424 case 1: return ldub_p(ptr);
3425 case 2: return lduw_p(ptr);
3426 case 4: return ldl_p(ptr);
3427 default: abort();
3428 }
Andreas Färber56384e82011-11-30 16:26:21 +01003429}
3430
Avi Kivityde712f92012-01-02 12:41:07 +02003431static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3432 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003433{
3434 ram_addr_t raddr = addr;
3435 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003436 switch (size) {
3437 case 1: return stb_p(ptr, value);
3438 case 2: return stw_p(ptr, value);
3439 case 4: return stl_p(ptr, value);
3440 default: abort();
3441 }
Andreas Färber56384e82011-11-30 16:26:21 +01003442}
3443
Avi Kivityde712f92012-01-02 12:41:07 +02003444static const MemoryRegionOps subpage_ram_ops = {
3445 .read = subpage_ram_read,
3446 .write = subpage_ram_write,
3447 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003448};
3449
Anthony Liguoric227f092009-10-01 16:12:16 -05003450static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003451 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003452{
3453 int idx, eidx;
3454
3455 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3456 return -1;
3457 idx = SUBPAGE_IDX(start);
3458 eidx = SUBPAGE_IDX(end);
3459#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003460 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003461 mmio, start, end, idx, eidx, memory);
3462#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003463 if (memory_region_is_ram(phys_sections[section].mr)) {
3464 MemoryRegionSection new_section = phys_sections[section];
3465 new_section.mr = &io_mem_subpage_ram;
3466 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003467 }
blueswir1db7b5422007-05-26 17:36:03 +00003468 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003469 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003470 }
3471
3472 return 0;
3473}
3474
Avi Kivity0f0cb162012-02-13 17:14:32 +02003475static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003476{
Anthony Liguoric227f092009-10-01 16:12:16 -05003477 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003478
Anthony Liguori7267c092011-08-20 22:09:37 -05003479 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003480
3481 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003482 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3483 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003484 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003485#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003486 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3487 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003488#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003489 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003490
3491 return mmio;
3492}
3493
aliguori88715652009-02-11 15:20:58 +00003494static int get_free_io_mem_idx(void)
3495{
3496 int i;
3497
3498 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3499 if (!io_mem_used[i]) {
3500 io_mem_used[i] = 1;
3501 return i;
3502 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003503 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003504 return -1;
3505}
3506
bellard33417e72003-08-10 21:47:01 +00003507/* mem_read and mem_write are arrays of functions containing the
3508 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003509 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003510 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003511 modified. If it is zero, a new io zone is allocated. The return
3512 value can be used with cpu_register_physical_memory(). (-1) is
3513 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003514static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003515{
bellard33417e72003-08-10 21:47:01 +00003516 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003517 io_index = get_free_io_mem_idx();
3518 if (io_index == -1)
3519 return io_index;
bellard33417e72003-08-10 21:47:01 +00003520 } else {
3521 if (io_index >= IO_MEM_NB_ENTRIES)
3522 return -1;
3523 }
bellardb5ff1b32005-11-26 10:38:39 +00003524
Avi Kivitya621f382012-01-02 13:12:08 +02003525 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003526
Avi Kivity11c7ef02012-01-02 17:21:07 +02003527 return io_index;
bellard33417e72003-08-10 21:47:01 +00003528}
bellard61382a52003-10-27 21:22:23 +00003529
Avi Kivitya621f382012-01-02 13:12:08 +02003530int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003531{
Avi Kivitya621f382012-01-02 13:12:08 +02003532 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003533}
3534
Avi Kivity11c7ef02012-01-02 17:21:07 +02003535void cpu_unregister_io_memory(int io_index)
aliguori88715652009-02-11 15:20:58 +00003536{
Avi Kivitya621f382012-01-02 13:12:08 +02003537 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003538 io_mem_used[io_index] = 0;
3539}
3540
Avi Kivity5312bd82012-02-12 18:32:55 +02003541static uint16_t dummy_section(MemoryRegion *mr)
3542{
3543 MemoryRegionSection section = {
3544 .mr = mr,
3545 .offset_within_address_space = 0,
3546 .offset_within_region = 0,
3547 .size = UINT64_MAX,
3548 };
3549
3550 return phys_section_add(&section);
3551}
3552
Avi Kivitye9179ce2009-06-14 11:38:52 +03003553static void io_mem_init(void)
3554{
3555 int i;
3556
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003557 /* Must be first: */
3558 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3559 assert(io_mem_ram.ram_addr == 0);
3560 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3561 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3562 "unassigned", UINT64_MAX);
3563 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3564 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003565 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3566 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003567 for (i=0; i<5; i++)
3568 io_mem_used[i] = 1;
3569
Avi Kivity1ec9b902012-01-02 12:47:48 +02003570 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3571 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003572}
3573
Avi Kivity50c1e142012-02-08 21:36:02 +02003574static void core_begin(MemoryListener *listener)
3575{
Avi Kivity54688b12012-02-09 17:34:32 +02003576 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003577 phys_sections_clear();
Avi Kivityd6f2ea22012-02-12 20:12:49 +02003578 phys_map.u.node = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003579 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivity50c1e142012-02-08 21:36:02 +02003580}
3581
3582static void core_commit(MemoryListener *listener)
3583{
Avi Kivity117712c2012-02-12 21:23:17 +02003584 CPUState *env;
3585
3586 /* since each CPU stores ram addresses in its TLB cache, we must
3587 reset the modified entries */
3588 /* XXX: slow ! */
3589 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3590 tlb_flush(env, 1);
3591 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003592}
3593
Avi Kivity93632742012-02-08 16:54:16 +02003594static void core_region_add(MemoryListener *listener,
3595 MemoryRegionSection *section)
3596{
Avi Kivity4855d412012-02-08 21:16:05 +02003597 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003598}
3599
3600static void core_region_del(MemoryListener *listener,
3601 MemoryRegionSection *section)
3602{
Avi Kivity93632742012-02-08 16:54:16 +02003603}
3604
Avi Kivity50c1e142012-02-08 21:36:02 +02003605static void core_region_nop(MemoryListener *listener,
3606 MemoryRegionSection *section)
3607{
Avi Kivity54688b12012-02-09 17:34:32 +02003608 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003609}
3610
Avi Kivity93632742012-02-08 16:54:16 +02003611static void core_log_start(MemoryListener *listener,
3612 MemoryRegionSection *section)
3613{
3614}
3615
3616static void core_log_stop(MemoryListener *listener,
3617 MemoryRegionSection *section)
3618{
3619}
3620
3621static void core_log_sync(MemoryListener *listener,
3622 MemoryRegionSection *section)
3623{
3624}
3625
3626static void core_log_global_start(MemoryListener *listener)
3627{
3628 cpu_physical_memory_set_dirty_tracking(1);
3629}
3630
3631static void core_log_global_stop(MemoryListener *listener)
3632{
3633 cpu_physical_memory_set_dirty_tracking(0);
3634}
3635
3636static void core_eventfd_add(MemoryListener *listener,
3637 MemoryRegionSection *section,
3638 bool match_data, uint64_t data, int fd)
3639{
3640}
3641
3642static void core_eventfd_del(MemoryListener *listener,
3643 MemoryRegionSection *section,
3644 bool match_data, uint64_t data, int fd)
3645{
3646}
3647
Avi Kivity50c1e142012-02-08 21:36:02 +02003648static void io_begin(MemoryListener *listener)
3649{
3650}
3651
3652static void io_commit(MemoryListener *listener)
3653{
3654}
3655
Avi Kivity4855d412012-02-08 21:16:05 +02003656static void io_region_add(MemoryListener *listener,
3657 MemoryRegionSection *section)
3658{
3659 iorange_init(&section->mr->iorange, &memory_region_iorange_ops,
3660 section->offset_within_address_space, section->size);
3661 ioport_register(&section->mr->iorange);
3662}
3663
3664static void io_region_del(MemoryListener *listener,
3665 MemoryRegionSection *section)
3666{
3667 isa_unassign_ioport(section->offset_within_address_space, section->size);
3668}
3669
Avi Kivity50c1e142012-02-08 21:36:02 +02003670static void io_region_nop(MemoryListener *listener,
3671 MemoryRegionSection *section)
3672{
3673}
3674
Avi Kivity4855d412012-02-08 21:16:05 +02003675static void io_log_start(MemoryListener *listener,
3676 MemoryRegionSection *section)
3677{
3678}
3679
3680static void io_log_stop(MemoryListener *listener,
3681 MemoryRegionSection *section)
3682{
3683}
3684
3685static void io_log_sync(MemoryListener *listener,
3686 MemoryRegionSection *section)
3687{
3688}
3689
3690static void io_log_global_start(MemoryListener *listener)
3691{
3692}
3693
3694static void io_log_global_stop(MemoryListener *listener)
3695{
3696}
3697
3698static void io_eventfd_add(MemoryListener *listener,
3699 MemoryRegionSection *section,
3700 bool match_data, uint64_t data, int fd)
3701{
3702}
3703
3704static void io_eventfd_del(MemoryListener *listener,
3705 MemoryRegionSection *section,
3706 bool match_data, uint64_t data, int fd)
3707{
3708}
3709
Avi Kivity93632742012-02-08 16:54:16 +02003710static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003711 .begin = core_begin,
3712 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003713 .region_add = core_region_add,
3714 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003715 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003716 .log_start = core_log_start,
3717 .log_stop = core_log_stop,
3718 .log_sync = core_log_sync,
3719 .log_global_start = core_log_global_start,
3720 .log_global_stop = core_log_global_stop,
3721 .eventfd_add = core_eventfd_add,
3722 .eventfd_del = core_eventfd_del,
3723 .priority = 0,
3724};
3725
Avi Kivity4855d412012-02-08 21:16:05 +02003726static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003727 .begin = io_begin,
3728 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003729 .region_add = io_region_add,
3730 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003731 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003732 .log_start = io_log_start,
3733 .log_stop = io_log_stop,
3734 .log_sync = io_log_sync,
3735 .log_global_start = io_log_global_start,
3736 .log_global_stop = io_log_global_stop,
3737 .eventfd_add = io_eventfd_add,
3738 .eventfd_del = io_eventfd_del,
3739 .priority = 0,
3740};
3741
Avi Kivity62152b82011-07-26 14:26:14 +03003742static void memory_map_init(void)
3743{
Anthony Liguori7267c092011-08-20 22:09:37 -05003744 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003745 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003746 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003747
Anthony Liguori7267c092011-08-20 22:09:37 -05003748 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003749 memory_region_init(system_io, "io", 65536);
3750 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003751
Avi Kivity4855d412012-02-08 21:16:05 +02003752 memory_listener_register(&core_memory_listener, system_memory);
3753 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003754}
3755
3756MemoryRegion *get_system_memory(void)
3757{
3758 return system_memory;
3759}
3760
Avi Kivity309cb472011-08-08 16:09:03 +03003761MemoryRegion *get_system_io(void)
3762{
3763 return system_io;
3764}
3765
pbrooke2eef172008-06-08 01:09:01 +00003766#endif /* !defined(CONFIG_USER_ONLY) */
3767
bellard13eb76e2004-01-24 15:23:36 +00003768/* physical memory access (slow version, mainly for debug) */
3769#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003770int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3771 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003772{
3773 int l, flags;
3774 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003775 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003776
3777 while (len > 0) {
3778 page = addr & TARGET_PAGE_MASK;
3779 l = (page + TARGET_PAGE_SIZE) - addr;
3780 if (l > len)
3781 l = len;
3782 flags = page_get_flags(page);
3783 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003784 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003785 if (is_write) {
3786 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003787 return -1;
bellard579a97f2007-11-11 14:26:47 +00003788 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003789 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003790 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003791 memcpy(p, buf, l);
3792 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003793 } else {
3794 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003795 return -1;
bellard579a97f2007-11-11 14:26:47 +00003796 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003797 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003798 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003799 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003800 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003801 }
3802 len -= l;
3803 buf += l;
3804 addr += l;
3805 }
Paul Brooka68fe892010-03-01 00:08:59 +00003806 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003807}
bellard8df1cd02005-01-28 22:37:22 +00003808
bellard13eb76e2004-01-24 15:23:36 +00003809#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003810void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003811 int len, int is_write)
3812{
3813 int l, io_index;
3814 uint8_t *ptr;
3815 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003816 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003817 MemoryRegionSection section;
ths3b46e622007-09-17 08:09:54 +00003818
bellard13eb76e2004-01-24 15:23:36 +00003819 while (len > 0) {
3820 page = addr & TARGET_PAGE_MASK;
3821 l = (page + TARGET_PAGE_SIZE) - addr;
3822 if (l > len)
3823 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003824 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003825
bellard13eb76e2004-01-24 15:23:36 +00003826 if (is_write) {
Avi Kivity06ef3522012-02-13 16:11:22 +02003827 if (!memory_region_is_ram(section.mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003828 target_phys_addr_t addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003829 io_index = memory_region_get_ram_addr(section.mr)
3830 & (IO_MEM_NB_ENTRIES - 1);
3831 addr1 = (addr & ~TARGET_PAGE_MASK)
3832 + section.offset_within_region;
bellard6a00d602005-11-21 23:25:50 +00003833 /* XXX: could force cpu_single_env to NULL to avoid
3834 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003835 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003836 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003837 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003838 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003839 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003840 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003841 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003842 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003843 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003844 l = 2;
3845 } else {
bellard1c213d12005-09-03 10:49:04 +00003846 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003847 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003848 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003849 l = 1;
3850 }
Avi Kivity06ef3522012-02-13 16:11:22 +02003851 } else if (!section.readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003852 ram_addr_t addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003853 addr1 = (memory_region_get_ram_addr(section.mr)
3854 + section.offset_within_region)
3855 | (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003856 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003857 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003858 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003859 if (!cpu_physical_memory_is_dirty(addr1)) {
3860 /* invalidate code */
3861 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3862 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003863 cpu_physical_memory_set_dirty_flags(
3864 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003865 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003866 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003867 }
3868 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02003869 if (!is_ram_rom_romd(&section)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003870 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003871 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02003872 io_index = memory_region_get_ram_addr(section.mr)
3873 & (IO_MEM_NB_ENTRIES - 1);
3874 addr1 = (addr & ~TARGET_PAGE_MASK)
3875 + section.offset_within_region;
aurel326c2934d2009-02-18 21:37:17 +00003876 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003877 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003878 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003879 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003880 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003881 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003882 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003883 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003884 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003885 l = 2;
3886 } else {
bellard1c213d12005-09-03 10:49:04 +00003887 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003888 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003889 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003890 l = 1;
3891 }
3892 } else {
3893 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02003894 ptr = qemu_get_ram_ptr(section.mr->ram_addr
3895 + section.offset_within_region);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003896 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3897 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003898 }
3899 }
3900 len -= l;
3901 buf += l;
3902 addr += l;
3903 }
3904}
bellard8df1cd02005-01-28 22:37:22 +00003905
bellardd0ecd2a2006-04-23 17:14:48 +00003906/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003907void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003908 const uint8_t *buf, int len)
3909{
3910 int l;
3911 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003912 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003913 MemoryRegionSection section;
ths3b46e622007-09-17 08:09:54 +00003914
bellardd0ecd2a2006-04-23 17:14:48 +00003915 while (len > 0) {
3916 page = addr & TARGET_PAGE_MASK;
3917 l = (page + TARGET_PAGE_SIZE) - addr;
3918 if (l > len)
3919 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003920 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003921
Avi Kivity06ef3522012-02-13 16:11:22 +02003922 if (!is_ram_rom_romd(&section)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003923 /* do nothing */
3924 } else {
3925 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003926 addr1 = (memory_region_get_ram_addr(section.mr)
3927 + section.offset_within_region)
3928 + (addr & ~TARGET_PAGE_MASK);
bellardd0ecd2a2006-04-23 17:14:48 +00003929 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003930 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003931 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003932 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003933 }
3934 len -= l;
3935 buf += l;
3936 addr += l;
3937 }
3938}
3939
aliguori6d16c2f2009-01-22 16:59:11 +00003940typedef struct {
3941 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003942 target_phys_addr_t addr;
3943 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003944} BounceBuffer;
3945
3946static BounceBuffer bounce;
3947
aliguoriba223c22009-01-22 16:59:16 +00003948typedef struct MapClient {
3949 void *opaque;
3950 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003951 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003952} MapClient;
3953
Blue Swirl72cf2d42009-09-12 07:36:22 +00003954static QLIST_HEAD(map_client_list, MapClient) map_client_list
3955 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003956
3957void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3958{
Anthony Liguori7267c092011-08-20 22:09:37 -05003959 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003960
3961 client->opaque = opaque;
3962 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003963 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003964 return client;
3965}
3966
3967void cpu_unregister_map_client(void *_client)
3968{
3969 MapClient *client = (MapClient *)_client;
3970
Blue Swirl72cf2d42009-09-12 07:36:22 +00003971 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003972 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003973}
3974
3975static void cpu_notify_map_clients(void)
3976{
3977 MapClient *client;
3978
Blue Swirl72cf2d42009-09-12 07:36:22 +00003979 while (!QLIST_EMPTY(&map_client_list)) {
3980 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003981 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003982 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003983 }
3984}
3985
aliguori6d16c2f2009-01-22 16:59:11 +00003986/* Map a physical memory region into a host virtual address.
3987 * May map a subset of the requested range, given by and returned in *plen.
3988 * May return NULL if resources needed to perform the mapping are exhausted.
3989 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003990 * Use cpu_register_map_client() to know when retrying the map operation is
3991 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003992 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003993void *cpu_physical_memory_map(target_phys_addr_t addr,
3994 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003995 int is_write)
3996{
Anthony Liguoric227f092009-10-01 16:12:16 -05003997 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003998 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003999 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004000 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02004001 MemoryRegionSection section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00004002 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004003 ram_addr_t rlen;
4004 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004005
4006 while (len > 0) {
4007 page = addr & TARGET_PAGE_MASK;
4008 l = (page + TARGET_PAGE_SIZE) - addr;
4009 if (l > len)
4010 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02004011 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00004012
Avi Kivity06ef3522012-02-13 16:11:22 +02004013 if (!(memory_region_is_ram(section.mr) && !section.readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004014 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004015 break;
4016 }
4017 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4018 bounce.addr = addr;
4019 bounce.len = l;
4020 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004021 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004022 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004023
4024 *plen = l;
4025 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004026 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004027 if (!todo) {
Avi Kivity06ef3522012-02-13 16:11:22 +02004028 raddr = memory_region_get_ram_addr(section.mr)
4029 + section.offset_within_region
4030 + (addr & ~TARGET_PAGE_MASK);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004031 }
aliguori6d16c2f2009-01-22 16:59:11 +00004032
4033 len -= l;
4034 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004035 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004036 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004037 rlen = todo;
4038 ret = qemu_ram_ptr_length(raddr, &rlen);
4039 *plen = rlen;
4040 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004041}
4042
4043/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4044 * Will also mark the memory as dirty if is_write == 1. access_len gives
4045 * the amount of memory that was actually read or written by the caller.
4046 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004047void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4048 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004049{
4050 if (buffer != bounce.buffer) {
4051 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004052 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004053 while (access_len) {
4054 unsigned l;
4055 l = TARGET_PAGE_SIZE;
4056 if (l > access_len)
4057 l = access_len;
4058 if (!cpu_physical_memory_is_dirty(addr1)) {
4059 /* invalidate code */
4060 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4061 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004062 cpu_physical_memory_set_dirty_flags(
4063 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004064 }
4065 addr1 += l;
4066 access_len -= l;
4067 }
4068 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004069 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004070 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004071 }
aliguori6d16c2f2009-01-22 16:59:11 +00004072 return;
4073 }
4074 if (is_write) {
4075 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4076 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004077 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004078 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004079 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004080}
bellardd0ecd2a2006-04-23 17:14:48 +00004081
bellard8df1cd02005-01-28 22:37:22 +00004082/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004083static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4084 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004085{
4086 int io_index;
4087 uint8_t *ptr;
4088 uint32_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004089 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004090
Avi Kivity06ef3522012-02-13 16:11:22 +02004091 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004092
Avi Kivity06ef3522012-02-13 16:11:22 +02004093 if (!is_ram_rom_romd(&section)) {
bellard8df1cd02005-01-28 22:37:22 +00004094 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004095 io_index = memory_region_get_ram_addr(section.mr)
4096 & (IO_MEM_NB_ENTRIES - 1);
4097 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004098 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004099#if defined(TARGET_WORDS_BIGENDIAN)
4100 if (endian == DEVICE_LITTLE_ENDIAN) {
4101 val = bswap32(val);
4102 }
4103#else
4104 if (endian == DEVICE_BIG_ENDIAN) {
4105 val = bswap32(val);
4106 }
4107#endif
bellard8df1cd02005-01-28 22:37:22 +00004108 } else {
4109 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004110 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4111 & TARGET_PAGE_MASK)
4112 + section.offset_within_region) +
bellard8df1cd02005-01-28 22:37:22 +00004113 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004114 switch (endian) {
4115 case DEVICE_LITTLE_ENDIAN:
4116 val = ldl_le_p(ptr);
4117 break;
4118 case DEVICE_BIG_ENDIAN:
4119 val = ldl_be_p(ptr);
4120 break;
4121 default:
4122 val = ldl_p(ptr);
4123 break;
4124 }
bellard8df1cd02005-01-28 22:37:22 +00004125 }
4126 return val;
4127}
4128
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004129uint32_t ldl_phys(target_phys_addr_t addr)
4130{
4131 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4132}
4133
4134uint32_t ldl_le_phys(target_phys_addr_t addr)
4135{
4136 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4137}
4138
4139uint32_t ldl_be_phys(target_phys_addr_t addr)
4140{
4141 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4142}
4143
bellard84b7b8e2005-11-28 21:19:04 +00004144/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004145static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4146 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004147{
4148 int io_index;
4149 uint8_t *ptr;
4150 uint64_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004151 MemoryRegionSection section;
bellard84b7b8e2005-11-28 21:19:04 +00004152
Avi Kivity06ef3522012-02-13 16:11:22 +02004153 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004154
Avi Kivity06ef3522012-02-13 16:11:22 +02004155 if (!is_ram_rom_romd(&section)) {
bellard84b7b8e2005-11-28 21:19:04 +00004156 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004157 io_index = memory_region_get_ram_addr(section.mr)
4158 & (IO_MEM_NB_ENTRIES - 1);
4159 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004160
4161 /* XXX This is broken when device endian != cpu endian.
4162 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004163#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004164 val = io_mem_read(io_index, addr, 4) << 32;
4165 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004166#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004167 val = io_mem_read(io_index, addr, 4);
4168 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004169#endif
4170 } else {
4171 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004172 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4173 & TARGET_PAGE_MASK)
4174 + section.offset_within_region)
4175 + (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004176 switch (endian) {
4177 case DEVICE_LITTLE_ENDIAN:
4178 val = ldq_le_p(ptr);
4179 break;
4180 case DEVICE_BIG_ENDIAN:
4181 val = ldq_be_p(ptr);
4182 break;
4183 default:
4184 val = ldq_p(ptr);
4185 break;
4186 }
bellard84b7b8e2005-11-28 21:19:04 +00004187 }
4188 return val;
4189}
4190
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004191uint64_t ldq_phys(target_phys_addr_t addr)
4192{
4193 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4194}
4195
4196uint64_t ldq_le_phys(target_phys_addr_t addr)
4197{
4198 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4199}
4200
4201uint64_t ldq_be_phys(target_phys_addr_t addr)
4202{
4203 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4204}
4205
bellardaab33092005-10-30 20:48:42 +00004206/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004207uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004208{
4209 uint8_t val;
4210 cpu_physical_memory_read(addr, &val, 1);
4211 return val;
4212}
4213
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004214/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004215static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4216 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004217{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004218 int io_index;
4219 uint8_t *ptr;
4220 uint64_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004221 MemoryRegionSection section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004222
Avi Kivity06ef3522012-02-13 16:11:22 +02004223 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004224
Avi Kivity06ef3522012-02-13 16:11:22 +02004225 if (!is_ram_rom_romd(&section)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004226 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004227 io_index = memory_region_get_ram_addr(section.mr)
4228 & (IO_MEM_NB_ENTRIES - 1);
4229 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004230 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004231#if defined(TARGET_WORDS_BIGENDIAN)
4232 if (endian == DEVICE_LITTLE_ENDIAN) {
4233 val = bswap16(val);
4234 }
4235#else
4236 if (endian == DEVICE_BIG_ENDIAN) {
4237 val = bswap16(val);
4238 }
4239#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004240 } else {
4241 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004242 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4243 & TARGET_PAGE_MASK)
4244 + section.offset_within_region)
4245 + (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004246 switch (endian) {
4247 case DEVICE_LITTLE_ENDIAN:
4248 val = lduw_le_p(ptr);
4249 break;
4250 case DEVICE_BIG_ENDIAN:
4251 val = lduw_be_p(ptr);
4252 break;
4253 default:
4254 val = lduw_p(ptr);
4255 break;
4256 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004257 }
4258 return val;
bellardaab33092005-10-30 20:48:42 +00004259}
4260
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004261uint32_t lduw_phys(target_phys_addr_t addr)
4262{
4263 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4264}
4265
4266uint32_t lduw_le_phys(target_phys_addr_t addr)
4267{
4268 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4269}
4270
4271uint32_t lduw_be_phys(target_phys_addr_t addr)
4272{
4273 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4274}
4275
bellard8df1cd02005-01-28 22:37:22 +00004276/* warning: addr must be aligned. The ram page is not masked as dirty
4277 and the code inside is not invalidated. It is useful if the dirty
4278 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004279void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004280{
4281 int io_index;
4282 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004283 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004284
Avi Kivity06ef3522012-02-13 16:11:22 +02004285 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004286
Avi Kivity06ef3522012-02-13 16:11:22 +02004287 if (!memory_region_is_ram(section.mr) || section.readonly) {
4288 if (memory_region_is_ram(section.mr)) {
4289 io_index = io_mem_rom.ram_addr;
4290 } else {
4291 io_index = memory_region_get_ram_addr(section.mr);
4292 }
4293 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004294 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004295 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02004296 unsigned long addr1 = (memory_region_get_ram_addr(section.mr)
4297 & TARGET_PAGE_MASK)
4298 + section.offset_within_region
4299 + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004300 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004301 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004302
4303 if (unlikely(in_migration)) {
4304 if (!cpu_physical_memory_is_dirty(addr1)) {
4305 /* invalidate code */
4306 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4307 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004308 cpu_physical_memory_set_dirty_flags(
4309 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004310 }
4311 }
bellard8df1cd02005-01-28 22:37:22 +00004312 }
4313}
4314
Anthony Liguoric227f092009-10-01 16:12:16 -05004315void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004316{
4317 int io_index;
4318 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004319 MemoryRegionSection section;
j_mayerbc98a7e2007-04-04 07:55:12 +00004320
Avi Kivity06ef3522012-02-13 16:11:22 +02004321 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004322
Avi Kivity06ef3522012-02-13 16:11:22 +02004323 if (!memory_region_is_ram(section.mr) || section.readonly) {
4324 if (memory_region_is_ram(section.mr)) {
4325 io_index = io_mem_rom.ram_addr;
4326 } else {
4327 io_index = memory_region_get_ram_addr(section.mr)
4328 & (IO_MEM_NB_ENTRIES - 1);
4329 }
4330 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
j_mayerbc98a7e2007-04-04 07:55:12 +00004331#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004332 io_mem_write(io_index, addr, val >> 32, 4);
4333 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004334#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004335 io_mem_write(io_index, addr, (uint32_t)val, 4);
4336 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004337#endif
4338 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02004339 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4340 & TARGET_PAGE_MASK)
4341 + section.offset_within_region)
4342 + (addr & ~TARGET_PAGE_MASK);
j_mayerbc98a7e2007-04-04 07:55:12 +00004343 stq_p(ptr, val);
4344 }
4345}
4346
bellard8df1cd02005-01-28 22:37:22 +00004347/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004348static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4349 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004350{
4351 int io_index;
4352 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004353 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004354
Avi Kivity06ef3522012-02-13 16:11:22 +02004355 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004356
Avi Kivity06ef3522012-02-13 16:11:22 +02004357 if (!memory_region_is_ram(section.mr) || section.readonly) {
4358 if (memory_region_is_ram(section.mr)) {
4359 io_index = io_mem_rom.ram_addr;
4360 } else {
4361 io_index = memory_region_get_ram_addr(section.mr)
4362 & (IO_MEM_NB_ENTRIES - 1);
4363 }
4364 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004365#if defined(TARGET_WORDS_BIGENDIAN)
4366 if (endian == DEVICE_LITTLE_ENDIAN) {
4367 val = bswap32(val);
4368 }
4369#else
4370 if (endian == DEVICE_BIG_ENDIAN) {
4371 val = bswap32(val);
4372 }
4373#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004374 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004375 } else {
4376 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02004377 addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
4378 + section.offset_within_region
4379 + (addr & ~TARGET_PAGE_MASK);
bellard8df1cd02005-01-28 22:37:22 +00004380 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004381 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004382 switch (endian) {
4383 case DEVICE_LITTLE_ENDIAN:
4384 stl_le_p(ptr, val);
4385 break;
4386 case DEVICE_BIG_ENDIAN:
4387 stl_be_p(ptr, val);
4388 break;
4389 default:
4390 stl_p(ptr, val);
4391 break;
4392 }
bellard3a7d9292005-08-21 09:26:42 +00004393 if (!cpu_physical_memory_is_dirty(addr1)) {
4394 /* invalidate code */
4395 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4396 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004397 cpu_physical_memory_set_dirty_flags(addr1,
4398 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004399 }
bellard8df1cd02005-01-28 22:37:22 +00004400 }
4401}
4402
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004403void stl_phys(target_phys_addr_t addr, uint32_t val)
4404{
4405 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4406}
4407
4408void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4409{
4410 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4411}
4412
4413void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4414{
4415 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4416}
4417
bellardaab33092005-10-30 20:48:42 +00004418/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004419void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004420{
4421 uint8_t v = val;
4422 cpu_physical_memory_write(addr, &v, 1);
4423}
4424
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004425/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004426static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4427 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004428{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004429 int io_index;
4430 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004431 MemoryRegionSection section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004432
Avi Kivity06ef3522012-02-13 16:11:22 +02004433 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004434
Avi Kivity06ef3522012-02-13 16:11:22 +02004435 if (!memory_region_is_ram(section.mr) || section.readonly) {
4436 if (memory_region_is_ram(section.mr)) {
4437 io_index = io_mem_rom.ram_addr;
4438 } else {
4439 io_index = memory_region_get_ram_addr(section.mr)
4440 & (IO_MEM_NB_ENTRIES - 1);
4441 }
4442 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004443#if defined(TARGET_WORDS_BIGENDIAN)
4444 if (endian == DEVICE_LITTLE_ENDIAN) {
4445 val = bswap16(val);
4446 }
4447#else
4448 if (endian == DEVICE_BIG_ENDIAN) {
4449 val = bswap16(val);
4450 }
4451#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004452 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004453 } else {
4454 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02004455 addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
4456 + section.offset_within_region + (addr & ~TARGET_PAGE_MASK);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004457 /* RAM case */
4458 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004459 switch (endian) {
4460 case DEVICE_LITTLE_ENDIAN:
4461 stw_le_p(ptr, val);
4462 break;
4463 case DEVICE_BIG_ENDIAN:
4464 stw_be_p(ptr, val);
4465 break;
4466 default:
4467 stw_p(ptr, val);
4468 break;
4469 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004470 if (!cpu_physical_memory_is_dirty(addr1)) {
4471 /* invalidate code */
4472 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4473 /* set dirty bit */
4474 cpu_physical_memory_set_dirty_flags(addr1,
4475 (0xff & ~CODE_DIRTY_FLAG));
4476 }
4477 }
bellardaab33092005-10-30 20:48:42 +00004478}
4479
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004480void stw_phys(target_phys_addr_t addr, uint32_t val)
4481{
4482 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4483}
4484
4485void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4486{
4487 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4488}
4489
4490void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4491{
4492 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4493}
4494
bellardaab33092005-10-30 20:48:42 +00004495/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004496void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004497{
4498 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004499 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004500}
4501
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004502void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4503{
4504 val = cpu_to_le64(val);
4505 cpu_physical_memory_write(addr, &val, 8);
4506}
4507
4508void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4509{
4510 val = cpu_to_be64(val);
4511 cpu_physical_memory_write(addr, &val, 8);
4512}
4513
aliguori5e2972f2009-03-28 17:51:36 +00004514/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004515int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004516 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004517{
4518 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004519 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004520 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004521
4522 while (len > 0) {
4523 page = addr & TARGET_PAGE_MASK;
4524 phys_addr = cpu_get_phys_page_debug(env, page);
4525 /* if no physical page mapped, return an error */
4526 if (phys_addr == -1)
4527 return -1;
4528 l = (page + TARGET_PAGE_SIZE) - addr;
4529 if (l > len)
4530 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004531 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004532 if (is_write)
4533 cpu_physical_memory_write_rom(phys_addr, buf, l);
4534 else
aliguori5e2972f2009-03-28 17:51:36 +00004535 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004536 len -= l;
4537 buf += l;
4538 addr += l;
4539 }
4540 return 0;
4541}
Paul Brooka68fe892010-03-01 00:08:59 +00004542#endif
bellard13eb76e2004-01-24 15:23:36 +00004543
pbrook2e70f6e2008-06-29 01:03:05 +00004544/* in deterministic execution mode, instructions doing device I/Os
4545 must be at the end of the TB */
4546void cpu_io_recompile(CPUState *env, void *retaddr)
4547{
4548 TranslationBlock *tb;
4549 uint32_t n, cflags;
4550 target_ulong pc, cs_base;
4551 uint64_t flags;
4552
4553 tb = tb_find_pc((unsigned long)retaddr);
4554 if (!tb) {
4555 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4556 retaddr);
4557 }
4558 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004559 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004560 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004561 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004562 n = n - env->icount_decr.u16.low;
4563 /* Generate a new TB ending on the I/O insn. */
4564 n++;
4565 /* On MIPS and SH, delay slot instructions can only be restarted if
4566 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004567 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004568 branch. */
4569#if defined(TARGET_MIPS)
4570 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4571 env->active_tc.PC -= 4;
4572 env->icount_decr.u16.low++;
4573 env->hflags &= ~MIPS_HFLAG_BMASK;
4574 }
4575#elif defined(TARGET_SH4)
4576 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4577 && n > 1) {
4578 env->pc -= 2;
4579 env->icount_decr.u16.low++;
4580 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4581 }
4582#endif
4583 /* This should never happen. */
4584 if (n > CF_COUNT_MASK)
4585 cpu_abort(env, "TB too big during recompile");
4586
4587 cflags = n | CF_LAST_IO;
4588 pc = tb->pc;
4589 cs_base = tb->cs_base;
4590 flags = tb->flags;
4591 tb_phys_invalidate(tb, -1);
4592 /* FIXME: In theory this could raise an exception. In practice
4593 we have already translated the block once so it's probably ok. */
4594 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004595 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004596 the first in the TB) then we end up generating a whole new TB and
4597 repeating the fault, which is horribly inefficient.
4598 Better would be to execute just this insn uncached, or generate a
4599 second new TB. */
4600 cpu_resume_from_signal(env, NULL);
4601}
4602
Paul Brookb3755a92010-03-12 16:54:58 +00004603#if !defined(CONFIG_USER_ONLY)
4604
Stefan Weil055403b2010-10-22 23:03:32 +02004605void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004606{
4607 int i, target_code_size, max_target_code_size;
4608 int direct_jmp_count, direct_jmp2_count, cross_page;
4609 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004610
bellarde3db7222005-01-26 22:00:47 +00004611 target_code_size = 0;
4612 max_target_code_size = 0;
4613 cross_page = 0;
4614 direct_jmp_count = 0;
4615 direct_jmp2_count = 0;
4616 for(i = 0; i < nb_tbs; i++) {
4617 tb = &tbs[i];
4618 target_code_size += tb->size;
4619 if (tb->size > max_target_code_size)
4620 max_target_code_size = tb->size;
4621 if (tb->page_addr[1] != -1)
4622 cross_page++;
4623 if (tb->tb_next_offset[0] != 0xffff) {
4624 direct_jmp_count++;
4625 if (tb->tb_next_offset[1] != 0xffff) {
4626 direct_jmp2_count++;
4627 }
4628 }
4629 }
4630 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004631 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004632 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004633 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4634 cpu_fprintf(f, "TB count %d/%d\n",
4635 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004636 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004637 nb_tbs ? target_code_size / nb_tbs : 0,
4638 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004639 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004640 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4641 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004642 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4643 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004644 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4645 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004646 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004647 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4648 direct_jmp2_count,
4649 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004650 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004651 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4652 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4653 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004654 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004655}
4656
Avi Kivityd39e8222012-01-01 23:35:10 +02004657/* NOTE: this function can trigger an exception */
4658/* NOTE2: the returned address is not exactly the physical address: it
4659 is the offset relative to phys_ram_base */
4660tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4661{
4662 int mmu_idx, page_index, pd;
4663 void *p;
4664
4665 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4666 mmu_idx = cpu_mmu_index(env1);
4667 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4668 (addr & TARGET_PAGE_MASK))) {
4669 ldub_code(addr);
4670 }
4671 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004672 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
Avi Kivity06ef3522012-02-13 16:11:22 +02004673 && !io_mem_region[pd]->rom_device) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004674#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4675 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4676#else
4677 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4678#endif
4679 }
4680 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4681 return qemu_ram_addr_from_host_nofail(p);
4682}
4683
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004684/*
4685 * A helper function for the _utterly broken_ virtio device model to find out if
4686 * it's running on a big endian machine. Don't do this at home kids!
4687 */
4688bool virtio_is_big_endian(void);
4689bool virtio_is_big_endian(void)
4690{
4691#if defined(TARGET_WORDS_BIGENDIAN)
4692 return true;
4693#else
4694 return false;
4695#endif
4696}
4697
bellard61382a52003-10-27 21:22:23 +00004698#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004699#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004700#define GETPC() NULL
4701#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004702#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004703
4704#define SHIFT 0
4705#include "softmmu_template.h"
4706
4707#define SHIFT 1
4708#include "softmmu_template.h"
4709
4710#define SHIFT 2
4711#include "softmmu_template.h"
4712
4713#define SHIFT 3
4714#include "softmmu_template.h"
4715
4716#undef env
4717
4718#endif