blob: 24423d5cb6afbafbee88b09645cd64570abc44af [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Avi Kivity3eef53d2012-02-10 14:57:31 +0200163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200189typedef struct PhysPageEntry PhysPageEntry;
190
Avi Kivity5312bd82012-02-12 18:32:55 +0200191static MemoryRegionSection *phys_sections;
192static unsigned phys_sections_nb, phys_sections_nb_alloc;
193static uint16_t phys_section_unassigned;
194
Avi Kivity4346ae32012-02-10 17:00:01 +0200195struct PhysPageEntry {
196 union {
Avi Kivity5312bd82012-02-12 18:32:55 +0200197 uint16_t leaf; /* index into phys_sections */
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200198 uint16_t node; /* index into phys_map_nodes */
Avi Kivity4346ae32012-02-10 17:00:01 +0200199 } u;
200};
201
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202/* Simple allocator for PhysPageEntry nodes */
203static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
204static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
205
206#define PHYS_MAP_NODE_NIL ((uint16_t)~0)
207
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800208/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200209 The bottom level has pointers to MemoryRegionSections. */
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200210static PhysPageEntry phys_map = { .u.node = PHYS_MAP_NODE_NIL };
Paul Brook6d9a1302010-02-28 23:55:53 +0000211
pbrooke2eef172008-06-08 01:09:01 +0000212static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300213static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000214
bellard33417e72003-08-10 21:47:01 +0000215/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200216MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000217static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200218static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000219#endif
bellard33417e72003-08-10 21:47:01 +0000220
bellard34865132003-10-05 14:28:56 +0000221/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200222#ifdef WIN32
223static const char *logfilename = "qemu.log";
224#else
blueswir1d9b630f2008-10-05 09:57:08 +0000225static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200226#endif
bellard34865132003-10-05 14:28:56 +0000227FILE *logfile;
228int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000229static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000230
bellarde3db7222005-01-26 22:00:47 +0000231/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000232#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000233static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000234#endif
bellarde3db7222005-01-26 22:00:47 +0000235static int tb_flush_count;
236static int tb_phys_invalidate_count;
237
bellard7cb69ca2008-05-10 10:55:51 +0000238#ifdef _WIN32
239static void map_exec(void *addr, long size)
240{
241 DWORD old_protect;
242 VirtualProtect(addr, size,
243 PAGE_EXECUTE_READWRITE, &old_protect);
244
245}
246#else
247static void map_exec(void *addr, long size)
248{
bellard43694152008-05-29 09:35:57 +0000249 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000250
bellard43694152008-05-29 09:35:57 +0000251 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000252 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000253 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000254
255 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000256 end += page_size - 1;
257 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000258
259 mprotect((void *)start, end - start,
260 PROT_READ | PROT_WRITE | PROT_EXEC);
261}
262#endif
263
bellardb346ff42003-06-15 20:05:50 +0000264static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000265{
bellard83fb7ad2004-07-05 21:25:26 +0000266 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000267 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000268#ifdef _WIN32
269 {
270 SYSTEM_INFO system_info;
271
272 GetSystemInfo(&system_info);
273 qemu_real_host_page_size = system_info.dwPageSize;
274 }
275#else
276 qemu_real_host_page_size = getpagesize();
277#endif
bellard83fb7ad2004-07-05 21:25:26 +0000278 if (qemu_host_page_size == 0)
279 qemu_host_page_size = qemu_real_host_page_size;
280 if (qemu_host_page_size < TARGET_PAGE_SIZE)
281 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000282 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000283
Paul Brook2e9a5712010-05-05 16:32:59 +0100284#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000285 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100286#ifdef HAVE_KINFO_GETVMMAP
287 struct kinfo_vmentry *freep;
288 int i, cnt;
289
290 freep = kinfo_getvmmap(getpid(), &cnt);
291 if (freep) {
292 mmap_lock();
293 for (i = 0; i < cnt; i++) {
294 unsigned long startaddr, endaddr;
295
296 startaddr = freep[i].kve_start;
297 endaddr = freep[i].kve_end;
298 if (h2g_valid(startaddr)) {
299 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
300
301 if (h2g_valid(endaddr)) {
302 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200303 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100304 } else {
305#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
306 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200307 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100308#endif
309 }
310 }
311 }
312 free(freep);
313 mmap_unlock();
314 }
315#else
balrog50a95692007-12-12 01:16:23 +0000316 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000317
pbrook07765902008-05-31 16:33:53 +0000318 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800319
Aurelien Jarnofd436902010-04-10 17:20:36 +0200320 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000321 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800322 mmap_lock();
323
balrog50a95692007-12-12 01:16:23 +0000324 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800325 unsigned long startaddr, endaddr;
326 int n;
327
328 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
329
330 if (n == 2 && h2g_valid(startaddr)) {
331 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
332
333 if (h2g_valid(endaddr)) {
334 endaddr = h2g(endaddr);
335 } else {
336 endaddr = ~0ul;
337 }
338 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000339 }
340 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800341
balrog50a95692007-12-12 01:16:23 +0000342 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800343 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000344 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100345#endif
balrog50a95692007-12-12 01:16:23 +0000346 }
347#endif
bellard54936002003-05-13 00:25:15 +0000348}
349
Paul Brook41c1b1c2010-03-12 16:54:58 +0000350static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000351{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000352 PageDesc *pd;
353 void **lp;
354 int i;
355
pbrook17e23772008-06-09 13:47:45 +0000356#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500357 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800358# define ALLOC(P, SIZE) \
359 do { \
360 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
361 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000363#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800364# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500365 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000366#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800367
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800368 /* Level 1. Always allocated. */
369 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
370
371 /* Level 2..N-1. */
372 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
373 void **p = *lp;
374
375 if (p == NULL) {
376 if (!alloc) {
377 return NULL;
378 }
379 ALLOC(p, sizeof(void *) * L2_SIZE);
380 *lp = p;
381 }
382
383 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000384 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800385
386 pd = *lp;
387 if (pd == NULL) {
388 if (!alloc) {
389 return NULL;
390 }
391 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
392 *lp = pd;
393 }
394
395#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800396
397 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000398}
399
Paul Brook41c1b1c2010-03-12 16:54:58 +0000400static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000401{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800402 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000403}
404
Paul Brook6d9a1302010-02-28 23:55:53 +0000405#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200406
407static PhysPageEntry *phys_map_node_alloc(uint16_t *ptr)
408{
409 unsigned i;
410 uint16_t ret;
411
412 /* Assign early to avoid the pointer being invalidated by g_renew() */
413 *ptr = ret = phys_map_nodes_nb++;
414 assert(ret != PHYS_MAP_NODE_NIL);
415 if (ret == phys_map_nodes_nb_alloc) {
416 typedef PhysPageEntry Node[L2_SIZE];
417 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
418 phys_map_nodes = g_renew(Node, phys_map_nodes,
419 phys_map_nodes_nb_alloc);
420 }
421 for (i = 0; i < L2_SIZE; ++i) {
422 phys_map_nodes[ret][i].u.node = PHYS_MAP_NODE_NIL;
423 }
424 return phys_map_nodes[ret];
425}
426
427static void phys_map_nodes_reset(void)
428{
429 phys_map_nodes_nb = 0;
430}
431
Avi Kivity5312bd82012-02-12 18:32:55 +0200432static uint16_t *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000433{
Avi Kivity4346ae32012-02-10 17:00:01 +0200434 PhysPageEntry *lp, *p;
435 int i, j;
bellard92e873b2004-05-21 14:52:29 +0000436
Avi Kivity3eef53d2012-02-10 14:57:31 +0200437 lp = &phys_map;
bellard108c49b2005-07-24 12:55:09 +0000438
Avi Kivity4346ae32012-02-10 17:00:01 +0200439 /* Level 1..N. */
440 for (i = P_L2_LEVELS - 1; i >= 0; i--) {
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200441 if (lp->u.node == PHYS_MAP_NODE_NIL) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800442 if (!alloc) {
443 return NULL;
444 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200445 p = phys_map_node_alloc(&lp->u.node);
Avi Kivity4346ae32012-02-10 17:00:01 +0200446 if (i == 0) {
Avi Kivity4346ae32012-02-10 17:00:01 +0200447 for (j = 0; j < L2_SIZE; j++) {
Avi Kivity5312bd82012-02-12 18:32:55 +0200448 p[j].u.leaf = phys_section_unassigned;
Avi Kivity4346ae32012-02-10 17:00:01 +0200449 }
450 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200451 } else {
452 p = phys_map_nodes[lp->u.node];
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800453 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200454 lp = &p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
bellard108c49b2005-07-24 12:55:09 +0000455 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800456
Avi Kivity4346ae32012-02-10 17:00:01 +0200457 return &lp->u.leaf;
bellard92e873b2004-05-21 14:52:29 +0000458}
459
Avi Kivity06ef3522012-02-13 16:11:22 +0200460static MemoryRegionSection phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000461{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200462 PhysPageEntry lp = phys_map;
463 PhysPageEntry *p;
464 int i;
Avi Kivity06ef3522012-02-13 16:11:22 +0200465 MemoryRegionSection section;
466 target_phys_addr_t delta;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200467 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200468
Avi Kivity31ab2b42012-02-13 16:44:19 +0200469 for (i = P_L2_LEVELS - 1; i >= 0; i--) {
470 if (lp.u.node == PHYS_MAP_NODE_NIL) {
471 goto not_found;
472 }
473 p = phys_map_nodes[lp.u.node];
474 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200475 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200476
477 s_index = lp.u.leaf;
478not_found:
Avi Kivity06ef3522012-02-13 16:11:22 +0200479 section = phys_sections[s_index];
Avi Kivity5312bd82012-02-12 18:32:55 +0200480 index <<= TARGET_PAGE_BITS;
Avi Kivity06ef3522012-02-13 16:11:22 +0200481 assert(section.offset_within_address_space <= index
482 && index <= section.offset_within_address_space + section.size-1);
483 delta = index - section.offset_within_address_space;
484 section.offset_within_address_space += delta;
485 section.offset_within_region += delta;
486 section.size -= delta;
487 return section;
bellard92e873b2004-05-21 14:52:29 +0000488}
489
Anthony Liguoric227f092009-10-01 16:12:16 -0500490static void tlb_protect_code(ram_addr_t ram_addr);
491static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000492 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000493#define mmap_lock() do { } while(0)
494#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000495#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000496
bellard43694152008-05-29 09:35:57 +0000497#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
498
499#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100500/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000501 user mode. It will change when a dedicated libc will be used */
502#define USE_STATIC_CODE_GEN_BUFFER
503#endif
504
505#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200506static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
507 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000508#endif
509
blueswir18fcd3692008-08-17 20:26:25 +0000510static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000511{
bellard43694152008-05-29 09:35:57 +0000512#ifdef USE_STATIC_CODE_GEN_BUFFER
513 code_gen_buffer = static_code_gen_buffer;
514 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
515 map_exec(code_gen_buffer, code_gen_buffer_size);
516#else
bellard26a5f132008-05-28 12:30:31 +0000517 code_gen_buffer_size = tb_size;
518 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000519#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000520 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
521#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100522 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000523 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000524#endif
bellard26a5f132008-05-28 12:30:31 +0000525 }
526 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
527 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
528 /* The code gen buffer location may have constraints depending on
529 the host cpu and OS */
530#if defined(__linux__)
531 {
532 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000533 void *start = NULL;
534
bellard26a5f132008-05-28 12:30:31 +0000535 flags = MAP_PRIVATE | MAP_ANONYMOUS;
536#if defined(__x86_64__)
537 flags |= MAP_32BIT;
538 /* Cannot map more than that */
539 if (code_gen_buffer_size > (800 * 1024 * 1024))
540 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000541#elif defined(__sparc_v9__)
542 // Map the buffer below 2G, so we can use direct calls and branches
543 flags |= MAP_FIXED;
544 start = (void *) 0x60000000UL;
545 if (code_gen_buffer_size > (512 * 1024 * 1024))
546 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000547#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100548 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000549 if (code_gen_buffer_size > 16 * 1024 * 1024)
550 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700551#elif defined(__s390x__)
552 /* Map the buffer so that we can use direct calls and branches. */
553 /* We have a +- 4GB range on the branches; leave some slop. */
554 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
555 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
556 }
557 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000558#endif
blueswir1141ac462008-07-26 15:05:57 +0000559 code_gen_buffer = mmap(start, code_gen_buffer_size,
560 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000561 flags, -1, 0);
562 if (code_gen_buffer == MAP_FAILED) {
563 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
564 exit(1);
565 }
566 }
Bradcbb608a2010-12-20 21:25:40 -0500567#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000568 || defined(__DragonFly__) || defined(__OpenBSD__) \
569 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000570 {
571 int flags;
572 void *addr = NULL;
573 flags = MAP_PRIVATE | MAP_ANONYMOUS;
574#if defined(__x86_64__)
575 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
576 * 0x40000000 is free */
577 flags |= MAP_FIXED;
578 addr = (void *)0x40000000;
579 /* Cannot map more than that */
580 if (code_gen_buffer_size > (800 * 1024 * 1024))
581 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000582#elif defined(__sparc_v9__)
583 // Map the buffer below 2G, so we can use direct calls and branches
584 flags |= MAP_FIXED;
585 addr = (void *) 0x60000000UL;
586 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
587 code_gen_buffer_size = (512 * 1024 * 1024);
588 }
aliguori06e67a82008-09-27 15:32:41 +0000589#endif
590 code_gen_buffer = mmap(addr, code_gen_buffer_size,
591 PROT_WRITE | PROT_READ | PROT_EXEC,
592 flags, -1, 0);
593 if (code_gen_buffer == MAP_FAILED) {
594 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
595 exit(1);
596 }
597 }
bellard26a5f132008-05-28 12:30:31 +0000598#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500599 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000600 map_exec(code_gen_buffer, code_gen_buffer_size);
601#endif
bellard43694152008-05-29 09:35:57 +0000602#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000603 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100604 code_gen_buffer_max_size = code_gen_buffer_size -
605 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000606 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500607 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000608}
609
610/* Must be called before using the QEMU cpus. 'tb_size' is the size
611 (in bytes) allocated to the translation buffer. Zero means default
612 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200613void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000614{
bellard26a5f132008-05-28 12:30:31 +0000615 cpu_gen_init();
616 code_gen_alloc(tb_size);
617 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000618 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700619#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
620 /* There's no guest base to take into account, so go ahead and
621 initialize the prologue now. */
622 tcg_prologue_init(&tcg_ctx);
623#endif
bellard26a5f132008-05-28 12:30:31 +0000624}
625
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200626bool tcg_enabled(void)
627{
628 return code_gen_buffer != NULL;
629}
630
631void cpu_exec_init_all(void)
632{
633#if !defined(CONFIG_USER_ONLY)
634 memory_map_init();
635 io_mem_init();
636#endif
637}
638
pbrook9656f322008-07-01 20:01:19 +0000639#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
640
Juan Quintelae59fb372009-09-29 22:48:21 +0200641static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200642{
643 CPUState *env = opaque;
644
aurel323098dba2009-03-07 21:28:24 +0000645 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
646 version_id is increased. */
647 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000648 tlb_flush(env, 1);
649
650 return 0;
651}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200652
653static const VMStateDescription vmstate_cpu_common = {
654 .name = "cpu_common",
655 .version_id = 1,
656 .minimum_version_id = 1,
657 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200658 .post_load = cpu_common_post_load,
659 .fields = (VMStateField []) {
660 VMSTATE_UINT32(halted, CPUState),
661 VMSTATE_UINT32(interrupt_request, CPUState),
662 VMSTATE_END_OF_LIST()
663 }
664};
pbrook9656f322008-07-01 20:01:19 +0000665#endif
666
Glauber Costa950f1472009-06-09 12:15:18 -0400667CPUState *qemu_get_cpu(int cpu)
668{
669 CPUState *env = first_cpu;
670
671 while (env) {
672 if (env->cpu_index == cpu)
673 break;
674 env = env->next_cpu;
675 }
676
677 return env;
678}
679
bellard6a00d602005-11-21 23:25:50 +0000680void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000681{
bellard6a00d602005-11-21 23:25:50 +0000682 CPUState **penv;
683 int cpu_index;
684
pbrookc2764712009-03-07 15:24:59 +0000685#if defined(CONFIG_USER_ONLY)
686 cpu_list_lock();
687#endif
bellard6a00d602005-11-21 23:25:50 +0000688 env->next_cpu = NULL;
689 penv = &first_cpu;
690 cpu_index = 0;
691 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700692 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000693 cpu_index++;
694 }
695 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000696 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000697 QTAILQ_INIT(&env->breakpoints);
698 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100699#ifndef CONFIG_USER_ONLY
700 env->thread_id = qemu_get_thread_id();
701#endif
bellard6a00d602005-11-21 23:25:50 +0000702 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000703#if defined(CONFIG_USER_ONLY)
704 cpu_list_unlock();
705#endif
pbrookb3c77242008-06-30 16:31:04 +0000706#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600707 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
708 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000709 cpu_save, cpu_load, env);
710#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000711}
712
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100713/* Allocate a new translation block. Flush the translation buffer if
714 too many translation blocks or too much generated code. */
715static TranslationBlock *tb_alloc(target_ulong pc)
716{
717 TranslationBlock *tb;
718
719 if (nb_tbs >= code_gen_max_blocks ||
720 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
721 return NULL;
722 tb = &tbs[nb_tbs++];
723 tb->pc = pc;
724 tb->cflags = 0;
725 return tb;
726}
727
728void tb_free(TranslationBlock *tb)
729{
730 /* In practice this is mostly used for single use temporary TB
731 Ignore the hard cases and just back up if this TB happens to
732 be the last one generated. */
733 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
734 code_gen_ptr = tb->tc_ptr;
735 nb_tbs--;
736 }
737}
738
bellard9fa3e852004-01-04 18:06:42 +0000739static inline void invalidate_page_bitmap(PageDesc *p)
740{
741 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500742 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000743 p->code_bitmap = NULL;
744 }
745 p->code_write_count = 0;
746}
747
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800748/* Set to NULL all the 'first_tb' fields in all PageDescs. */
749
750static void page_flush_tb_1 (int level, void **lp)
751{
752 int i;
753
754 if (*lp == NULL) {
755 return;
756 }
757 if (level == 0) {
758 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000759 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800760 pd[i].first_tb = NULL;
761 invalidate_page_bitmap(pd + i);
762 }
763 } else {
764 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000765 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800766 page_flush_tb_1 (level - 1, pp + i);
767 }
768 }
769}
770
bellardfd6ce8f2003-05-14 19:00:11 +0000771static void page_flush_tb(void)
772{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800773 int i;
774 for (i = 0; i < V_L1_SIZE; i++) {
775 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000776 }
777}
778
779/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000780/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000781void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000782{
bellard6a00d602005-11-21 23:25:50 +0000783 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000784#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000785 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
786 (unsigned long)(code_gen_ptr - code_gen_buffer),
787 nb_tbs, nb_tbs > 0 ?
788 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000789#endif
bellard26a5f132008-05-28 12:30:31 +0000790 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000791 cpu_abort(env1, "Internal error: code buffer overflow\n");
792
bellardfd6ce8f2003-05-14 19:00:11 +0000793 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000794
bellard6a00d602005-11-21 23:25:50 +0000795 for(env = first_cpu; env != NULL; env = env->next_cpu) {
796 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
797 }
bellard9fa3e852004-01-04 18:06:42 +0000798
bellard8a8a6082004-10-03 13:36:49 +0000799 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000800 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000801
bellardfd6ce8f2003-05-14 19:00:11 +0000802 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000803 /* XXX: flush processor icache at this point if cache flush is
804 expensive */
bellarde3db7222005-01-26 22:00:47 +0000805 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000806}
807
808#ifdef DEBUG_TB_CHECK
809
j_mayerbc98a7e2007-04-04 07:55:12 +0000810static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000811{
812 TranslationBlock *tb;
813 int i;
814 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000815 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
816 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000817 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
818 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000819 printf("ERROR invalidate: address=" TARGET_FMT_lx
820 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000821 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000822 }
823 }
824 }
825}
826
827/* verify that all the pages have correct rights for code */
828static void tb_page_check(void)
829{
830 TranslationBlock *tb;
831 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000832
pbrook99773bd2006-04-16 15:14:59 +0000833 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
834 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000835 flags1 = page_get_flags(tb->pc);
836 flags2 = page_get_flags(tb->pc + tb->size - 1);
837 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
838 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000839 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000840 }
841 }
842 }
843}
844
845#endif
846
847/* invalidate one TB */
848static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
849 int next_offset)
850{
851 TranslationBlock *tb1;
852 for(;;) {
853 tb1 = *ptb;
854 if (tb1 == tb) {
855 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
856 break;
857 }
858 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
859 }
860}
861
bellard9fa3e852004-01-04 18:06:42 +0000862static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
863{
864 TranslationBlock *tb1;
865 unsigned int n1;
866
867 for(;;) {
868 tb1 = *ptb;
869 n1 = (long)tb1 & 3;
870 tb1 = (TranslationBlock *)((long)tb1 & ~3);
871 if (tb1 == tb) {
872 *ptb = tb1->page_next[n1];
873 break;
874 }
875 ptb = &tb1->page_next[n1];
876 }
877}
878
bellardd4e81642003-05-25 16:46:15 +0000879static inline void tb_jmp_remove(TranslationBlock *tb, int n)
880{
881 TranslationBlock *tb1, **ptb;
882 unsigned int n1;
883
884 ptb = &tb->jmp_next[n];
885 tb1 = *ptb;
886 if (tb1) {
887 /* find tb(n) in circular list */
888 for(;;) {
889 tb1 = *ptb;
890 n1 = (long)tb1 & 3;
891 tb1 = (TranslationBlock *)((long)tb1 & ~3);
892 if (n1 == n && tb1 == tb)
893 break;
894 if (n1 == 2) {
895 ptb = &tb1->jmp_first;
896 } else {
897 ptb = &tb1->jmp_next[n1];
898 }
899 }
900 /* now we can suppress tb(n) from the list */
901 *ptb = tb->jmp_next[n];
902
903 tb->jmp_next[n] = NULL;
904 }
905}
906
907/* reset the jump entry 'n' of a TB so that it is not chained to
908 another TB */
909static inline void tb_reset_jump(TranslationBlock *tb, int n)
910{
911 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
912}
913
Paul Brook41c1b1c2010-03-12 16:54:58 +0000914void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000915{
bellard6a00d602005-11-21 23:25:50 +0000916 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000917 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000918 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000919 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000920 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000921
bellard9fa3e852004-01-04 18:06:42 +0000922 /* remove the TB from the hash list */
923 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
924 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000925 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000926 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000927
bellard9fa3e852004-01-04 18:06:42 +0000928 /* remove the TB from the page list */
929 if (tb->page_addr[0] != page_addr) {
930 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
931 tb_page_remove(&p->first_tb, tb);
932 invalidate_page_bitmap(p);
933 }
934 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
935 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
936 tb_page_remove(&p->first_tb, tb);
937 invalidate_page_bitmap(p);
938 }
939
bellard8a40a182005-11-20 10:35:40 +0000940 tb_invalidated_flag = 1;
941
942 /* remove the TB from the hash list */
943 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000944 for(env = first_cpu; env != NULL; env = env->next_cpu) {
945 if (env->tb_jmp_cache[h] == tb)
946 env->tb_jmp_cache[h] = NULL;
947 }
bellard8a40a182005-11-20 10:35:40 +0000948
949 /* suppress this TB from the two jump lists */
950 tb_jmp_remove(tb, 0);
951 tb_jmp_remove(tb, 1);
952
953 /* suppress any remaining jumps to this TB */
954 tb1 = tb->jmp_first;
955 for(;;) {
956 n1 = (long)tb1 & 3;
957 if (n1 == 2)
958 break;
959 tb1 = (TranslationBlock *)((long)tb1 & ~3);
960 tb2 = tb1->jmp_next[n1];
961 tb_reset_jump(tb1, n1);
962 tb1->jmp_next[n1] = NULL;
963 tb1 = tb2;
964 }
965 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
966
bellarde3db7222005-01-26 22:00:47 +0000967 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000968}
969
970static inline void set_bits(uint8_t *tab, int start, int len)
971{
972 int end, mask, end1;
973
974 end = start + len;
975 tab += start >> 3;
976 mask = 0xff << (start & 7);
977 if ((start & ~7) == (end & ~7)) {
978 if (start < end) {
979 mask &= ~(0xff << (end & 7));
980 *tab |= mask;
981 }
982 } else {
983 *tab++ |= mask;
984 start = (start + 8) & ~7;
985 end1 = end & ~7;
986 while (start < end1) {
987 *tab++ = 0xff;
988 start += 8;
989 }
990 if (start < end) {
991 mask = ~(0xff << (end & 7));
992 *tab |= mask;
993 }
994 }
995}
996
997static void build_page_bitmap(PageDesc *p)
998{
999 int n, tb_start, tb_end;
1000 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001001
Anthony Liguori7267c092011-08-20 22:09:37 -05001002 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001003
1004 tb = p->first_tb;
1005 while (tb != NULL) {
1006 n = (long)tb & 3;
1007 tb = (TranslationBlock *)((long)tb & ~3);
1008 /* NOTE: this is subtle as a TB may span two physical pages */
1009 if (n == 0) {
1010 /* NOTE: tb_end may be after the end of the page, but
1011 it is not a problem */
1012 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1013 tb_end = tb_start + tb->size;
1014 if (tb_end > TARGET_PAGE_SIZE)
1015 tb_end = TARGET_PAGE_SIZE;
1016 } else {
1017 tb_start = 0;
1018 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1019 }
1020 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1021 tb = tb->page_next[n];
1022 }
1023}
1024
pbrook2e70f6e2008-06-29 01:03:05 +00001025TranslationBlock *tb_gen_code(CPUState *env,
1026 target_ulong pc, target_ulong cs_base,
1027 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001028{
1029 TranslationBlock *tb;
1030 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001031 tb_page_addr_t phys_pc, phys_page2;
1032 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001033 int code_gen_size;
1034
Paul Brook41c1b1c2010-03-12 16:54:58 +00001035 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001036 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001037 if (!tb) {
1038 /* flush must be done */
1039 tb_flush(env);
1040 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001041 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001042 /* Don't forget to invalidate previous TB info. */
1043 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001044 }
1045 tc_ptr = code_gen_ptr;
1046 tb->tc_ptr = tc_ptr;
1047 tb->cs_base = cs_base;
1048 tb->flags = flags;
1049 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001050 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001051 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001052
bellardd720b932004-04-25 17:57:43 +00001053 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001054 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001055 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001056 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001057 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001058 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001059 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001060 return tb;
bellardd720b932004-04-25 17:57:43 +00001061}
ths3b46e622007-09-17 08:09:54 +00001062
bellard9fa3e852004-01-04 18:06:42 +00001063/* invalidate all TBs which intersect with the target physical page
1064 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001065 the same physical page. 'is_cpu_write_access' should be true if called
1066 from a real cpu write access: the virtual CPU will exit the current
1067 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001068void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001069 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001070{
aliguori6b917542008-11-18 19:46:41 +00001071 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001072 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001073 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001074 PageDesc *p;
1075 int n;
1076#ifdef TARGET_HAS_PRECISE_SMC
1077 int current_tb_not_found = is_cpu_write_access;
1078 TranslationBlock *current_tb = NULL;
1079 int current_tb_modified = 0;
1080 target_ulong current_pc = 0;
1081 target_ulong current_cs_base = 0;
1082 int current_flags = 0;
1083#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001084
1085 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001086 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001087 return;
ths5fafdf22007-09-16 21:08:06 +00001088 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001089 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1090 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001091 /* build code bitmap */
1092 build_page_bitmap(p);
1093 }
1094
1095 /* we remove all the TBs in the range [start, end[ */
1096 /* XXX: see if in some cases it could be faster to invalidate all the code */
1097 tb = p->first_tb;
1098 while (tb != NULL) {
1099 n = (long)tb & 3;
1100 tb = (TranslationBlock *)((long)tb & ~3);
1101 tb_next = tb->page_next[n];
1102 /* NOTE: this is subtle as a TB may span two physical pages */
1103 if (n == 0) {
1104 /* NOTE: tb_end may be after the end of the page, but
1105 it is not a problem */
1106 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1107 tb_end = tb_start + tb->size;
1108 } else {
1109 tb_start = tb->page_addr[1];
1110 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1111 }
1112 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001113#ifdef TARGET_HAS_PRECISE_SMC
1114 if (current_tb_not_found) {
1115 current_tb_not_found = 0;
1116 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001117 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001118 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001119 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001120 }
1121 }
1122 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001123 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001124 /* If we are modifying the current TB, we must stop
1125 its execution. We could be more precise by checking
1126 that the modification is after the current PC, but it
1127 would require a specialized function to partially
1128 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001129
bellardd720b932004-04-25 17:57:43 +00001130 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001131 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001132 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1133 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001134 }
1135#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001136 /* we need to do that to handle the case where a signal
1137 occurs while doing tb_phys_invalidate() */
1138 saved_tb = NULL;
1139 if (env) {
1140 saved_tb = env->current_tb;
1141 env->current_tb = NULL;
1142 }
bellard9fa3e852004-01-04 18:06:42 +00001143 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001144 if (env) {
1145 env->current_tb = saved_tb;
1146 if (env->interrupt_request && env->current_tb)
1147 cpu_interrupt(env, env->interrupt_request);
1148 }
bellard9fa3e852004-01-04 18:06:42 +00001149 }
1150 tb = tb_next;
1151 }
1152#if !defined(CONFIG_USER_ONLY)
1153 /* if no code remaining, no need to continue to use slow writes */
1154 if (!p->first_tb) {
1155 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001156 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001157 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001158 }
1159 }
1160#endif
1161#ifdef TARGET_HAS_PRECISE_SMC
1162 if (current_tb_modified) {
1163 /* we generate a block containing just the instruction
1164 modifying the memory. It will ensure that it cannot modify
1165 itself */
bellardea1c1802004-06-14 18:56:36 +00001166 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001167 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001168 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001169 }
1170#endif
1171}
1172
1173/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001174static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001175{
1176 PageDesc *p;
1177 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001178#if 0
bellarda4193c82004-06-03 14:01:43 +00001179 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001180 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1181 cpu_single_env->mem_io_vaddr, len,
1182 cpu_single_env->eip,
1183 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001184 }
1185#endif
bellard9fa3e852004-01-04 18:06:42 +00001186 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001187 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001188 return;
1189 if (p->code_bitmap) {
1190 offset = start & ~TARGET_PAGE_MASK;
1191 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1192 if (b & ((1 << len) - 1))
1193 goto do_invalidate;
1194 } else {
1195 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001196 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001197 }
1198}
1199
bellard9fa3e852004-01-04 18:06:42 +00001200#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001201static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001202 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001203{
aliguori6b917542008-11-18 19:46:41 +00001204 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001205 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001206 int n;
bellardd720b932004-04-25 17:57:43 +00001207#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001208 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001209 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001210 int current_tb_modified = 0;
1211 target_ulong current_pc = 0;
1212 target_ulong current_cs_base = 0;
1213 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001214#endif
bellard9fa3e852004-01-04 18:06:42 +00001215
1216 addr &= TARGET_PAGE_MASK;
1217 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001218 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001219 return;
1220 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001221#ifdef TARGET_HAS_PRECISE_SMC
1222 if (tb && pc != 0) {
1223 current_tb = tb_find_pc(pc);
1224 }
1225#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001226 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001227 n = (long)tb & 3;
1228 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001229#ifdef TARGET_HAS_PRECISE_SMC
1230 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001231 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001232 /* If we are modifying the current TB, we must stop
1233 its execution. We could be more precise by checking
1234 that the modification is after the current PC, but it
1235 would require a specialized function to partially
1236 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001237
bellardd720b932004-04-25 17:57:43 +00001238 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001239 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001240 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1241 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001242 }
1243#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001244 tb_phys_invalidate(tb, addr);
1245 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001246 }
1247 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001248#ifdef TARGET_HAS_PRECISE_SMC
1249 if (current_tb_modified) {
1250 /* we generate a block containing just the instruction
1251 modifying the memory. It will ensure that it cannot modify
1252 itself */
bellardea1c1802004-06-14 18:56:36 +00001253 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001254 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001255 cpu_resume_from_signal(env, puc);
1256 }
1257#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001258}
bellard9fa3e852004-01-04 18:06:42 +00001259#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001260
1261/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001262static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001263 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001264{
1265 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001266#ifndef CONFIG_USER_ONLY
1267 bool page_already_protected;
1268#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001269
bellard9fa3e852004-01-04 18:06:42 +00001270 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001271 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001272 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001273#ifndef CONFIG_USER_ONLY
1274 page_already_protected = p->first_tb != NULL;
1275#endif
bellard9fa3e852004-01-04 18:06:42 +00001276 p->first_tb = (TranslationBlock *)((long)tb | n);
1277 invalidate_page_bitmap(p);
1278
bellard107db442004-06-22 18:48:46 +00001279#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001280
bellard9fa3e852004-01-04 18:06:42 +00001281#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001282 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001283 target_ulong addr;
1284 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001285 int prot;
1286
bellardfd6ce8f2003-05-14 19:00:11 +00001287 /* force the host page as non writable (writes will have a
1288 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001289 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001290 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001291 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1292 addr += TARGET_PAGE_SIZE) {
1293
1294 p2 = page_find (addr >> TARGET_PAGE_BITS);
1295 if (!p2)
1296 continue;
1297 prot |= p2->flags;
1298 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001299 }
ths5fafdf22007-09-16 21:08:06 +00001300 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001301 (prot & PAGE_BITS) & ~PAGE_WRITE);
1302#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001303 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001304 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001305#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001306 }
bellard9fa3e852004-01-04 18:06:42 +00001307#else
1308 /* if some code is already present, then the pages are already
1309 protected. So we handle the case where only the first TB is
1310 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001311 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001312 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001313 }
1314#endif
bellardd720b932004-04-25 17:57:43 +00001315
1316#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001317}
1318
bellard9fa3e852004-01-04 18:06:42 +00001319/* add a new TB and link it to the physical page tables. phys_page2 is
1320 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001321void tb_link_page(TranslationBlock *tb,
1322 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001323{
bellard9fa3e852004-01-04 18:06:42 +00001324 unsigned int h;
1325 TranslationBlock **ptb;
1326
pbrookc8a706f2008-06-02 16:16:42 +00001327 /* Grab the mmap lock to stop another thread invalidating this TB
1328 before we are done. */
1329 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001330 /* add in the physical hash table */
1331 h = tb_phys_hash_func(phys_pc);
1332 ptb = &tb_phys_hash[h];
1333 tb->phys_hash_next = *ptb;
1334 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001335
1336 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001337 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1338 if (phys_page2 != -1)
1339 tb_alloc_page(tb, 1, phys_page2);
1340 else
1341 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001342
bellardd4e81642003-05-25 16:46:15 +00001343 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1344 tb->jmp_next[0] = NULL;
1345 tb->jmp_next[1] = NULL;
1346
1347 /* init original jump addresses */
1348 if (tb->tb_next_offset[0] != 0xffff)
1349 tb_reset_jump(tb, 0);
1350 if (tb->tb_next_offset[1] != 0xffff)
1351 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001352
1353#ifdef DEBUG_TB_CHECK
1354 tb_page_check();
1355#endif
pbrookc8a706f2008-06-02 16:16:42 +00001356 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001357}
1358
bellarda513fe12003-05-27 23:29:48 +00001359/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1360 tb[1].tc_ptr. Return NULL if not found */
1361TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1362{
1363 int m_min, m_max, m;
1364 unsigned long v;
1365 TranslationBlock *tb;
1366
1367 if (nb_tbs <= 0)
1368 return NULL;
1369 if (tc_ptr < (unsigned long)code_gen_buffer ||
1370 tc_ptr >= (unsigned long)code_gen_ptr)
1371 return NULL;
1372 /* binary search (cf Knuth) */
1373 m_min = 0;
1374 m_max = nb_tbs - 1;
1375 while (m_min <= m_max) {
1376 m = (m_min + m_max) >> 1;
1377 tb = &tbs[m];
1378 v = (unsigned long)tb->tc_ptr;
1379 if (v == tc_ptr)
1380 return tb;
1381 else if (tc_ptr < v) {
1382 m_max = m - 1;
1383 } else {
1384 m_min = m + 1;
1385 }
ths5fafdf22007-09-16 21:08:06 +00001386 }
bellarda513fe12003-05-27 23:29:48 +00001387 return &tbs[m_max];
1388}
bellard75012672003-06-21 13:11:07 +00001389
bellardea041c02003-06-25 16:16:50 +00001390static void tb_reset_jump_recursive(TranslationBlock *tb);
1391
1392static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1393{
1394 TranslationBlock *tb1, *tb_next, **ptb;
1395 unsigned int n1;
1396
1397 tb1 = tb->jmp_next[n];
1398 if (tb1 != NULL) {
1399 /* find head of list */
1400 for(;;) {
1401 n1 = (long)tb1 & 3;
1402 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1403 if (n1 == 2)
1404 break;
1405 tb1 = tb1->jmp_next[n1];
1406 }
1407 /* we are now sure now that tb jumps to tb1 */
1408 tb_next = tb1;
1409
1410 /* remove tb from the jmp_first list */
1411 ptb = &tb_next->jmp_first;
1412 for(;;) {
1413 tb1 = *ptb;
1414 n1 = (long)tb1 & 3;
1415 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1416 if (n1 == n && tb1 == tb)
1417 break;
1418 ptb = &tb1->jmp_next[n1];
1419 }
1420 *ptb = tb->jmp_next[n];
1421 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001422
bellardea041c02003-06-25 16:16:50 +00001423 /* suppress the jump to next tb in generated code */
1424 tb_reset_jump(tb, n);
1425
bellard01243112004-01-04 15:48:17 +00001426 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001427 tb_reset_jump_recursive(tb_next);
1428 }
1429}
1430
1431static void tb_reset_jump_recursive(TranslationBlock *tb)
1432{
1433 tb_reset_jump_recursive2(tb, 0);
1434 tb_reset_jump_recursive2(tb, 1);
1435}
1436
bellard1fddef42005-04-17 19:16:13 +00001437#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001438#if defined(CONFIG_USER_ONLY)
1439static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1440{
1441 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1442}
1443#else
bellardd720b932004-04-25 17:57:43 +00001444static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1445{
Anthony Liguoric227f092009-10-01 16:12:16 -05001446 target_phys_addr_t addr;
Anthony Liguoric227f092009-10-01 16:12:16 -05001447 ram_addr_t ram_addr;
Avi Kivity06ef3522012-02-13 16:11:22 +02001448 MemoryRegionSection section;
bellardd720b932004-04-25 17:57:43 +00001449
pbrookc2f07f82006-04-08 17:14:56 +00001450 addr = cpu_get_phys_page_debug(env, pc);
Avi Kivity06ef3522012-02-13 16:11:22 +02001451 section = phys_page_find(addr >> TARGET_PAGE_BITS);
1452 if (!(memory_region_is_ram(section.mr)
1453 || (section.mr->rom_device && section.mr->readable))) {
1454 return;
1455 }
1456 ram_addr = (memory_region_get_ram_addr(section.mr)
1457 + section.offset_within_region) & TARGET_PAGE_MASK;
1458 ram_addr |= (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001459 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001460}
bellardc27004e2005-01-03 23:35:10 +00001461#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001462#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001463
Paul Brookc527ee82010-03-01 03:31:14 +00001464#if defined(CONFIG_USER_ONLY)
1465void cpu_watchpoint_remove_all(CPUState *env, int mask)
1466
1467{
1468}
1469
1470int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1471 int flags, CPUWatchpoint **watchpoint)
1472{
1473 return -ENOSYS;
1474}
1475#else
pbrook6658ffb2007-03-16 23:58:11 +00001476/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001477int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1478 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001479{
aliguorib4051332008-11-18 20:14:20 +00001480 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001481 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001482
aliguorib4051332008-11-18 20:14:20 +00001483 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1484 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1485 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1486 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1487 return -EINVAL;
1488 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001489 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001490
aliguoria1d1bb32008-11-18 20:07:32 +00001491 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001492 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001493 wp->flags = flags;
1494
aliguori2dc9f412008-11-18 20:56:59 +00001495 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001496 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001497 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001498 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001499 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001500
pbrook6658ffb2007-03-16 23:58:11 +00001501 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001502
1503 if (watchpoint)
1504 *watchpoint = wp;
1505 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001506}
1507
aliguoria1d1bb32008-11-18 20:07:32 +00001508/* Remove a specific watchpoint. */
1509int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1510 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001511{
aliguorib4051332008-11-18 20:14:20 +00001512 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001513 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001514
Blue Swirl72cf2d42009-09-12 07:36:22 +00001515 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001516 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001517 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001518 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001519 return 0;
1520 }
1521 }
aliguoria1d1bb32008-11-18 20:07:32 +00001522 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001523}
1524
aliguoria1d1bb32008-11-18 20:07:32 +00001525/* Remove a specific watchpoint by reference. */
1526void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1527{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001528 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001529
aliguoria1d1bb32008-11-18 20:07:32 +00001530 tlb_flush_page(env, watchpoint->vaddr);
1531
Anthony Liguori7267c092011-08-20 22:09:37 -05001532 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001533}
1534
aliguoria1d1bb32008-11-18 20:07:32 +00001535/* Remove all matching watchpoints. */
1536void cpu_watchpoint_remove_all(CPUState *env, int mask)
1537{
aliguoric0ce9982008-11-25 22:13:57 +00001538 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001539
Blue Swirl72cf2d42009-09-12 07:36:22 +00001540 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001541 if (wp->flags & mask)
1542 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001543 }
aliguoria1d1bb32008-11-18 20:07:32 +00001544}
Paul Brookc527ee82010-03-01 03:31:14 +00001545#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001546
1547/* Add a breakpoint. */
1548int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1549 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001550{
bellard1fddef42005-04-17 19:16:13 +00001551#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001552 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001553
Anthony Liguori7267c092011-08-20 22:09:37 -05001554 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001555
1556 bp->pc = pc;
1557 bp->flags = flags;
1558
aliguori2dc9f412008-11-18 20:56:59 +00001559 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001560 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001561 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001562 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001563 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001564
1565 breakpoint_invalidate(env, pc);
1566
1567 if (breakpoint)
1568 *breakpoint = bp;
1569 return 0;
1570#else
1571 return -ENOSYS;
1572#endif
1573}
1574
1575/* Remove a specific breakpoint. */
1576int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1577{
1578#if defined(TARGET_HAS_ICE)
1579 CPUBreakpoint *bp;
1580
Blue Swirl72cf2d42009-09-12 07:36:22 +00001581 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001582 if (bp->pc == pc && bp->flags == flags) {
1583 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001584 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001585 }
bellard4c3a88a2003-07-26 12:06:08 +00001586 }
aliguoria1d1bb32008-11-18 20:07:32 +00001587 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001588#else
aliguoria1d1bb32008-11-18 20:07:32 +00001589 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001590#endif
1591}
1592
aliguoria1d1bb32008-11-18 20:07:32 +00001593/* Remove a specific breakpoint by reference. */
1594void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001595{
bellard1fddef42005-04-17 19:16:13 +00001596#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001597 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001598
aliguoria1d1bb32008-11-18 20:07:32 +00001599 breakpoint_invalidate(env, breakpoint->pc);
1600
Anthony Liguori7267c092011-08-20 22:09:37 -05001601 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001602#endif
1603}
1604
1605/* Remove all matching breakpoints. */
1606void cpu_breakpoint_remove_all(CPUState *env, int mask)
1607{
1608#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001609 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001610
Blue Swirl72cf2d42009-09-12 07:36:22 +00001611 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001612 if (bp->flags & mask)
1613 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001614 }
bellard4c3a88a2003-07-26 12:06:08 +00001615#endif
1616}
1617
bellardc33a3462003-07-29 20:50:33 +00001618/* enable or disable single step mode. EXCP_DEBUG is returned by the
1619 CPU loop after each instruction */
1620void cpu_single_step(CPUState *env, int enabled)
1621{
bellard1fddef42005-04-17 19:16:13 +00001622#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001623 if (env->singlestep_enabled != enabled) {
1624 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001625 if (kvm_enabled())
1626 kvm_update_guest_debug(env, 0);
1627 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001628 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001629 /* XXX: only flush what is necessary */
1630 tb_flush(env);
1631 }
bellardc33a3462003-07-29 20:50:33 +00001632 }
1633#endif
1634}
1635
bellard34865132003-10-05 14:28:56 +00001636/* enable or disable low levels log */
1637void cpu_set_log(int log_flags)
1638{
1639 loglevel = log_flags;
1640 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001641 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001642 if (!logfile) {
1643 perror(logfilename);
1644 _exit(1);
1645 }
bellard9fa3e852004-01-04 18:06:42 +00001646#if !defined(CONFIG_SOFTMMU)
1647 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1648 {
blueswir1b55266b2008-09-20 08:07:15 +00001649 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001650 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1651 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001652#elif defined(_WIN32)
1653 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1654 setvbuf(logfile, NULL, _IONBF, 0);
1655#else
bellard34865132003-10-05 14:28:56 +00001656 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001657#endif
pbrooke735b912007-06-30 13:53:24 +00001658 log_append = 1;
1659 }
1660 if (!loglevel && logfile) {
1661 fclose(logfile);
1662 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001663 }
1664}
1665
1666void cpu_set_log_filename(const char *filename)
1667{
1668 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001669 if (logfile) {
1670 fclose(logfile);
1671 logfile = NULL;
1672 }
1673 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001674}
bellardc33a3462003-07-29 20:50:33 +00001675
aurel323098dba2009-03-07 21:28:24 +00001676static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001677{
pbrookd5975362008-06-07 20:50:51 +00001678 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1679 problem and hope the cpu will stop of its own accord. For userspace
1680 emulation this often isn't actually as bad as it sounds. Often
1681 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001682 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001683 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001684
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001685 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001686 tb = env->current_tb;
1687 /* if the cpu is currently executing code, we must unlink it and
1688 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001689 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001690 env->current_tb = NULL;
1691 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001692 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001693 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001694}
1695
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001696#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001697/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001698static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001699{
1700 int old_mask;
1701
1702 old_mask = env->interrupt_request;
1703 env->interrupt_request |= mask;
1704
aliguori8edac962009-04-24 18:03:45 +00001705 /*
1706 * If called from iothread context, wake the target cpu in
1707 * case its halted.
1708 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001709 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001710 qemu_cpu_kick(env);
1711 return;
1712 }
aliguori8edac962009-04-24 18:03:45 +00001713
pbrook2e70f6e2008-06-29 01:03:05 +00001714 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001715 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001716 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001717 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001718 cpu_abort(env, "Raised interrupt while not in I/O function");
1719 }
pbrook2e70f6e2008-06-29 01:03:05 +00001720 } else {
aurel323098dba2009-03-07 21:28:24 +00001721 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001722 }
1723}
1724
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001725CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1726
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001727#else /* CONFIG_USER_ONLY */
1728
1729void cpu_interrupt(CPUState *env, int mask)
1730{
1731 env->interrupt_request |= mask;
1732 cpu_unlink_tb(env);
1733}
1734#endif /* CONFIG_USER_ONLY */
1735
bellardb54ad042004-05-20 13:42:52 +00001736void cpu_reset_interrupt(CPUState *env, int mask)
1737{
1738 env->interrupt_request &= ~mask;
1739}
1740
aurel323098dba2009-03-07 21:28:24 +00001741void cpu_exit(CPUState *env)
1742{
1743 env->exit_request = 1;
1744 cpu_unlink_tb(env);
1745}
1746
blueswir1c7cd6a32008-10-02 18:27:46 +00001747const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001748 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001749 "show generated host assembly code for each compiled TB" },
1750 { CPU_LOG_TB_IN_ASM, "in_asm",
1751 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001752 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001753 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001754 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001755 "show micro ops "
1756#ifdef TARGET_I386
1757 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001758#endif
blueswir1e01a1152008-03-14 17:37:11 +00001759 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001760 { CPU_LOG_INT, "int",
1761 "show interrupts/exceptions in short format" },
1762 { CPU_LOG_EXEC, "exec",
1763 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001764 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001765 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001766#ifdef TARGET_I386
1767 { CPU_LOG_PCALL, "pcall",
1768 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001769 { CPU_LOG_RESET, "cpu_reset",
1770 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001771#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001772#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001773 { CPU_LOG_IOPORT, "ioport",
1774 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001775#endif
bellardf193c792004-03-21 17:06:25 +00001776 { 0, NULL, NULL },
1777};
1778
1779static int cmp1(const char *s1, int n, const char *s2)
1780{
1781 if (strlen(s2) != n)
1782 return 0;
1783 return memcmp(s1, s2, n) == 0;
1784}
ths3b46e622007-09-17 08:09:54 +00001785
bellardf193c792004-03-21 17:06:25 +00001786/* takes a comma separated list of log masks. Return 0 if error. */
1787int cpu_str_to_log_mask(const char *str)
1788{
blueswir1c7cd6a32008-10-02 18:27:46 +00001789 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001790 int mask;
1791 const char *p, *p1;
1792
1793 p = str;
1794 mask = 0;
1795 for(;;) {
1796 p1 = strchr(p, ',');
1797 if (!p1)
1798 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001799 if(cmp1(p,p1-p,"all")) {
1800 for(item = cpu_log_items; item->mask != 0; item++) {
1801 mask |= item->mask;
1802 }
1803 } else {
1804 for(item = cpu_log_items; item->mask != 0; item++) {
1805 if (cmp1(p, p1 - p, item->name))
1806 goto found;
1807 }
1808 return 0;
bellardf193c792004-03-21 17:06:25 +00001809 }
bellardf193c792004-03-21 17:06:25 +00001810 found:
1811 mask |= item->mask;
1812 if (*p1 != ',')
1813 break;
1814 p = p1 + 1;
1815 }
1816 return mask;
1817}
bellardea041c02003-06-25 16:16:50 +00001818
bellard75012672003-06-21 13:11:07 +00001819void cpu_abort(CPUState *env, const char *fmt, ...)
1820{
1821 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001822 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001823
1824 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001825 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001826 fprintf(stderr, "qemu: fatal: ");
1827 vfprintf(stderr, fmt, ap);
1828 fprintf(stderr, "\n");
1829#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001830 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1831#else
1832 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001833#endif
aliguori93fcfe32009-01-15 22:34:14 +00001834 if (qemu_log_enabled()) {
1835 qemu_log("qemu: fatal: ");
1836 qemu_log_vprintf(fmt, ap2);
1837 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001838#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001839 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001840#else
aliguori93fcfe32009-01-15 22:34:14 +00001841 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001842#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001843 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001844 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001845 }
pbrook493ae1f2007-11-23 16:53:59 +00001846 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001847 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001848#if defined(CONFIG_USER_ONLY)
1849 {
1850 struct sigaction act;
1851 sigfillset(&act.sa_mask);
1852 act.sa_handler = SIG_DFL;
1853 sigaction(SIGABRT, &act, NULL);
1854 }
1855#endif
bellard75012672003-06-21 13:11:07 +00001856 abort();
1857}
1858
thsc5be9f02007-02-28 20:20:53 +00001859CPUState *cpu_copy(CPUState *env)
1860{
ths01ba9812007-12-09 02:22:57 +00001861 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001862 CPUState *next_cpu = new_env->next_cpu;
1863 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001864#if defined(TARGET_HAS_ICE)
1865 CPUBreakpoint *bp;
1866 CPUWatchpoint *wp;
1867#endif
1868
thsc5be9f02007-02-28 20:20:53 +00001869 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001870
1871 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001872 new_env->next_cpu = next_cpu;
1873 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001874
1875 /* Clone all break/watchpoints.
1876 Note: Once we support ptrace with hw-debug register access, make sure
1877 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001878 QTAILQ_INIT(&env->breakpoints);
1879 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001880#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001881 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001882 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1883 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001884 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001885 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1886 wp->flags, NULL);
1887 }
1888#endif
1889
thsc5be9f02007-02-28 20:20:53 +00001890 return new_env;
1891}
1892
bellard01243112004-01-04 15:48:17 +00001893#if !defined(CONFIG_USER_ONLY)
1894
edgar_igl5c751e92008-05-06 08:44:21 +00001895static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1896{
1897 unsigned int i;
1898
1899 /* Discard jump cache entries for any tb which might potentially
1900 overlap the flushed page. */
1901 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1902 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001903 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001904
1905 i = tb_jmp_cache_hash_page(addr);
1906 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001907 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001908}
1909
Igor Kovalenko08738982009-07-12 02:15:40 +04001910static CPUTLBEntry s_cputlb_empty_entry = {
1911 .addr_read = -1,
1912 .addr_write = -1,
1913 .addr_code = -1,
1914 .addend = -1,
1915};
1916
Peter Maydell771124e2012-01-17 13:23:13 +00001917/* NOTE:
1918 * If flush_global is true (the usual case), flush all tlb entries.
1919 * If flush_global is false, flush (at least) all tlb entries not
1920 * marked global.
1921 *
1922 * Since QEMU doesn't currently implement a global/not-global flag
1923 * for tlb entries, at the moment tlb_flush() will also flush all
1924 * tlb entries in the flush_global == false case. This is OK because
1925 * CPU architectures generally permit an implementation to drop
1926 * entries from the TLB at any time, so flushing more entries than
1927 * required is only an efficiency issue, not a correctness issue.
1928 */
bellardee8b7022004-02-03 23:35:10 +00001929void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001930{
bellard33417e72003-08-10 21:47:01 +00001931 int i;
bellard01243112004-01-04 15:48:17 +00001932
bellard9fa3e852004-01-04 18:06:42 +00001933#if defined(DEBUG_TLB)
1934 printf("tlb_flush:\n");
1935#endif
bellard01243112004-01-04 15:48:17 +00001936 /* must reset current TB so that interrupts cannot modify the
1937 links while we are modifying them */
1938 env->current_tb = NULL;
1939
bellard33417e72003-08-10 21:47:01 +00001940 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001941 int mmu_idx;
1942 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001943 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001944 }
bellard33417e72003-08-10 21:47:01 +00001945 }
bellard9fa3e852004-01-04 18:06:42 +00001946
bellard8a40a182005-11-20 10:35:40 +00001947 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001948
Paul Brookd4c430a2010-03-17 02:14:28 +00001949 env->tlb_flush_addr = -1;
1950 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001951 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001952}
1953
bellard274da6b2004-05-20 21:56:27 +00001954static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001955{
ths5fafdf22007-09-16 21:08:06 +00001956 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001957 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001958 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001959 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001960 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001961 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001962 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001963 }
bellard61382a52003-10-27 21:22:23 +00001964}
1965
bellard2e126692004-04-25 21:28:44 +00001966void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001967{
bellard8a40a182005-11-20 10:35:40 +00001968 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001969 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001970
bellard9fa3e852004-01-04 18:06:42 +00001971#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001972 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001973#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001974 /* Check if we need to flush due to large pages. */
1975 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1976#if defined(DEBUG_TLB)
1977 printf("tlb_flush_page: forced full flush ("
1978 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1979 env->tlb_flush_addr, env->tlb_flush_mask);
1980#endif
1981 tlb_flush(env, 1);
1982 return;
1983 }
bellard01243112004-01-04 15:48:17 +00001984 /* must reset current TB so that interrupts cannot modify the
1985 links while we are modifying them */
1986 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001987
bellard61382a52003-10-27 21:22:23 +00001988 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001989 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001990 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1991 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001992
edgar_igl5c751e92008-05-06 08:44:21 +00001993 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001994}
1995
bellard9fa3e852004-01-04 18:06:42 +00001996/* update the TLBs so that writes to code in the virtual page 'addr'
1997 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001998static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001999{
ths5fafdf22007-09-16 21:08:06 +00002000 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002001 ram_addr + TARGET_PAGE_SIZE,
2002 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002003}
2004
bellard9fa3e852004-01-04 18:06:42 +00002005/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002006 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002007static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002008 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002009{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002010 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002011}
2012
ths5fafdf22007-09-16 21:08:06 +00002013static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002014 unsigned long start, unsigned long length)
2015{
2016 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002017 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00002018 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002019 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002020 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002021 }
2022 }
2023}
2024
pbrook5579c7f2009-04-11 14:47:08 +00002025/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002026void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002027 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002028{
2029 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002030 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002031 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002032
2033 start &= TARGET_PAGE_MASK;
2034 end = TARGET_PAGE_ALIGN(end);
2035
2036 length = end - start;
2037 if (length == 0)
2038 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002039 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002040
bellard1ccde1c2004-02-06 19:46:14 +00002041 /* we modify the TLB cache so that the dirty bit will be set again
2042 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002043 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002044 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002045 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002046 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002047 != (end - 1) - start) {
2048 abort();
2049 }
2050
bellard6a00d602005-11-21 23:25:50 +00002051 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002052 int mmu_idx;
2053 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2054 for(i = 0; i < CPU_TLB_SIZE; i++)
2055 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2056 start1, length);
2057 }
bellard6a00d602005-11-21 23:25:50 +00002058 }
bellard1ccde1c2004-02-06 19:46:14 +00002059}
2060
aliguori74576192008-10-06 14:02:03 +00002061int cpu_physical_memory_set_dirty_tracking(int enable)
2062{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002063 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002064 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002065 return ret;
aliguori74576192008-10-06 14:02:03 +00002066}
2067
bellard3a7d9292005-08-21 09:26:42 +00002068static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2069{
Anthony Liguoric227f092009-10-01 16:12:16 -05002070 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002071 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002072
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002073 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002074 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2075 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002076 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002077 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002078 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002079 }
2080 }
2081}
2082
2083/* update the TLB according to the current state of the dirty bits */
2084void cpu_tlb_update_dirty(CPUState *env)
2085{
2086 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002087 int mmu_idx;
2088 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2089 for(i = 0; i < CPU_TLB_SIZE; i++)
2090 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2091 }
bellard3a7d9292005-08-21 09:26:42 +00002092}
2093
pbrook0f459d12008-06-09 00:20:13 +00002094static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002095{
pbrook0f459d12008-06-09 00:20:13 +00002096 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2097 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002098}
2099
pbrook0f459d12008-06-09 00:20:13 +00002100/* update the TLB corresponding to virtual page vaddr
2101 so that it is no longer dirty */
2102static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002103{
bellard1ccde1c2004-02-06 19:46:14 +00002104 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002105 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002106
pbrook0f459d12008-06-09 00:20:13 +00002107 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002108 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002109 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2110 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002111}
2112
Paul Brookd4c430a2010-03-17 02:14:28 +00002113/* Our TLB does not support large pages, so remember the area covered by
2114 large pages and trigger a full TLB flush if these are invalidated. */
2115static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2116 target_ulong size)
2117{
2118 target_ulong mask = ~(size - 1);
2119
2120 if (env->tlb_flush_addr == (target_ulong)-1) {
2121 env->tlb_flush_addr = vaddr & mask;
2122 env->tlb_flush_mask = mask;
2123 return;
2124 }
2125 /* Extend the existing region to include the new page.
2126 This is a compromise between unnecessary flushes and the cost
2127 of maintaining a full variable size TLB. */
2128 mask &= env->tlb_flush_mask;
2129 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2130 mask <<= 1;
2131 }
2132 env->tlb_flush_addr &= mask;
2133 env->tlb_flush_mask = mask;
2134}
2135
Avi Kivity06ef3522012-02-13 16:11:22 +02002136static bool is_ram_rom(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002137{
Avi Kivity06ef3522012-02-13 16:11:22 +02002138 return memory_region_is_ram(s->mr);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002139}
2140
Avi Kivity06ef3522012-02-13 16:11:22 +02002141static bool is_romd(MemoryRegionSection *s)
Avi Kivity75c578d2012-01-02 15:40:52 +02002142{
Avi Kivity06ef3522012-02-13 16:11:22 +02002143 MemoryRegion *mr = s->mr;
Avi Kivity75c578d2012-01-02 15:40:52 +02002144
Avi Kivity75c578d2012-01-02 15:40:52 +02002145 return mr->rom_device && mr->readable;
2146}
2147
Avi Kivity06ef3522012-02-13 16:11:22 +02002148static bool is_ram_rom_romd(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002149{
Avi Kivity06ef3522012-02-13 16:11:22 +02002150 return is_ram_rom(s) || is_romd(s);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002151}
2152
Paul Brookd4c430a2010-03-17 02:14:28 +00002153/* Add a new TLB entry. At most one entry for a given virtual address
2154 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2155 supplied size is only used by tlb_flush_page. */
2156void tlb_set_page(CPUState *env, target_ulong vaddr,
2157 target_phys_addr_t paddr, int prot,
2158 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002159{
Avi Kivity06ef3522012-02-13 16:11:22 +02002160 MemoryRegionSection section;
bellard9fa3e852004-01-04 18:06:42 +00002161 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002162 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002163 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002164 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002165 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002166 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002167 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002168
Paul Brookd4c430a2010-03-17 02:14:28 +00002169 assert(size >= TARGET_PAGE_SIZE);
2170 if (size != TARGET_PAGE_SIZE) {
2171 tlb_add_large_page(env, vaddr, size);
2172 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002173 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002174#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002175 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2176 " prot=%x idx=%d pd=0x%08lx\n",
2177 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002178#endif
2179
pbrook0f459d12008-06-09 00:20:13 +00002180 address = vaddr;
Avi Kivity06ef3522012-02-13 16:11:22 +02002181 if (!is_ram_rom_romd(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002182 /* IO memory case (romd handled later) */
2183 address |= TLB_MMIO;
2184 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002185 if (is_ram_rom_romd(&section)) {
2186 addend = (unsigned long)(memory_region_get_ram_ptr(section.mr)
2187 + section.offset_within_region);
2188 } else {
2189 addend = 0;
2190 }
2191 if (is_ram_rom(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002192 /* Normal RAM. */
Avi Kivity06ef3522012-02-13 16:11:22 +02002193 iotlb = (memory_region_get_ram_addr(section.mr)
2194 + section.offset_within_region) & TARGET_PAGE_MASK;
2195 if (!section.readonly)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002196 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002197 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002198 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002199 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002200 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002201 It would be nice to pass an offset from the base address
2202 of that region. This would avoid having to special case RAM,
2203 and avoid full address decoding in every device.
2204 We can't use the high bits of pd for this because
2205 IO_MEM_ROMD uses these as a ram address. */
Avi Kivity06ef3522012-02-13 16:11:22 +02002206 iotlb = memory_region_get_ram_addr(section.mr) & ~TARGET_PAGE_MASK;
2207 iotlb += section.offset_within_region;
pbrook0f459d12008-06-09 00:20:13 +00002208 }
pbrook6658ffb2007-03-16 23:58:11 +00002209
pbrook0f459d12008-06-09 00:20:13 +00002210 code_address = address;
2211 /* Make accesses to pages with watchpoints go via the
2212 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002213 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002214 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002215 /* Avoid trapping reads of pages with a write breakpoint. */
2216 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivity1ec9b902012-01-02 12:47:48 +02002217 iotlb = io_mem_watch.ram_addr + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002218 address |= TLB_MMIO;
2219 break;
2220 }
pbrook6658ffb2007-03-16 23:58:11 +00002221 }
pbrook0f459d12008-06-09 00:20:13 +00002222 }
balrogd79acba2007-06-26 20:01:13 +00002223
pbrook0f459d12008-06-09 00:20:13 +00002224 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2225 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2226 te = &env->tlb_table[mmu_idx][index];
2227 te->addend = addend - vaddr;
2228 if (prot & PAGE_READ) {
2229 te->addr_read = address;
2230 } else {
2231 te->addr_read = -1;
2232 }
edgar_igl5c751e92008-05-06 08:44:21 +00002233
pbrook0f459d12008-06-09 00:20:13 +00002234 if (prot & PAGE_EXEC) {
2235 te->addr_code = code_address;
2236 } else {
2237 te->addr_code = -1;
2238 }
2239 if (prot & PAGE_WRITE) {
Avi Kivity06ef3522012-02-13 16:11:22 +02002240 if ((memory_region_is_ram(section.mr) && section.readonly)
2241 || is_romd(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002242 /* Write access calls the I/O callback. */
2243 te->addr_write = address | TLB_MMIO;
Avi Kivity06ef3522012-02-13 16:11:22 +02002244 } else if (memory_region_is_ram(section.mr)
2245 && !cpu_physical_memory_is_dirty(
2246 section.mr->ram_addr
2247 + section.offset_within_region)) {
pbrook0f459d12008-06-09 00:20:13 +00002248 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002249 } else {
pbrook0f459d12008-06-09 00:20:13 +00002250 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002251 }
pbrook0f459d12008-06-09 00:20:13 +00002252 } else {
2253 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002254 }
bellard9fa3e852004-01-04 18:06:42 +00002255}
2256
bellard01243112004-01-04 15:48:17 +00002257#else
2258
bellardee8b7022004-02-03 23:35:10 +00002259void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002260{
2261}
2262
bellard2e126692004-04-25 21:28:44 +00002263void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002264{
2265}
2266
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002267/*
2268 * Walks guest process memory "regions" one by one
2269 * and calls callback function 'fn' for each region.
2270 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002271
2272struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002273{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002274 walk_memory_regions_fn fn;
2275 void *priv;
2276 unsigned long start;
2277 int prot;
2278};
bellard9fa3e852004-01-04 18:06:42 +00002279
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002280static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002281 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002282{
2283 if (data->start != -1ul) {
2284 int rc = data->fn(data->priv, data->start, end, data->prot);
2285 if (rc != 0) {
2286 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002287 }
bellard33417e72003-08-10 21:47:01 +00002288 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002289
2290 data->start = (new_prot ? end : -1ul);
2291 data->prot = new_prot;
2292
2293 return 0;
2294}
2295
2296static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002297 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002298{
Paul Brookb480d9b2010-03-12 23:23:29 +00002299 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002300 int i, rc;
2301
2302 if (*lp == NULL) {
2303 return walk_memory_regions_end(data, base, 0);
2304 }
2305
2306 if (level == 0) {
2307 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002308 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002309 int prot = pd[i].flags;
2310
2311 pa = base | (i << TARGET_PAGE_BITS);
2312 if (prot != data->prot) {
2313 rc = walk_memory_regions_end(data, pa, prot);
2314 if (rc != 0) {
2315 return rc;
2316 }
2317 }
2318 }
2319 } else {
2320 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002321 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002322 pa = base | ((abi_ulong)i <<
2323 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002324 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2325 if (rc != 0) {
2326 return rc;
2327 }
2328 }
2329 }
2330
2331 return 0;
2332}
2333
2334int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2335{
2336 struct walk_memory_regions_data data;
2337 unsigned long i;
2338
2339 data.fn = fn;
2340 data.priv = priv;
2341 data.start = -1ul;
2342 data.prot = 0;
2343
2344 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002345 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002346 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2347 if (rc != 0) {
2348 return rc;
2349 }
2350 }
2351
2352 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002353}
2354
Paul Brookb480d9b2010-03-12 23:23:29 +00002355static int dump_region(void *priv, abi_ulong start,
2356 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002357{
2358 FILE *f = (FILE *)priv;
2359
Paul Brookb480d9b2010-03-12 23:23:29 +00002360 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2361 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002362 start, end, end - start,
2363 ((prot & PAGE_READ) ? 'r' : '-'),
2364 ((prot & PAGE_WRITE) ? 'w' : '-'),
2365 ((prot & PAGE_EXEC) ? 'x' : '-'));
2366
2367 return (0);
2368}
2369
2370/* dump memory mappings */
2371void page_dump(FILE *f)
2372{
2373 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2374 "start", "end", "size", "prot");
2375 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002376}
2377
pbrook53a59602006-03-25 19:31:22 +00002378int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002379{
bellard9fa3e852004-01-04 18:06:42 +00002380 PageDesc *p;
2381
2382 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002383 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002384 return 0;
2385 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002386}
2387
Richard Henderson376a7902010-03-10 15:57:04 -08002388/* Modify the flags of a page and invalidate the code if necessary.
2389 The flag PAGE_WRITE_ORG is positioned automatically depending
2390 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002391void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002392{
Richard Henderson376a7902010-03-10 15:57:04 -08002393 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002394
Richard Henderson376a7902010-03-10 15:57:04 -08002395 /* This function should never be called with addresses outside the
2396 guest address space. If this assert fires, it probably indicates
2397 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002398#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2399 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002400#endif
2401 assert(start < end);
2402
bellard9fa3e852004-01-04 18:06:42 +00002403 start = start & TARGET_PAGE_MASK;
2404 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002405
2406 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002407 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002408 }
2409
2410 for (addr = start, len = end - start;
2411 len != 0;
2412 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2413 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2414
2415 /* If the write protection bit is set, then we invalidate
2416 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002417 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002418 (flags & PAGE_WRITE) &&
2419 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002420 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002421 }
2422 p->flags = flags;
2423 }
bellard9fa3e852004-01-04 18:06:42 +00002424}
2425
ths3d97b402007-11-02 19:02:07 +00002426int page_check_range(target_ulong start, target_ulong len, int flags)
2427{
2428 PageDesc *p;
2429 target_ulong end;
2430 target_ulong addr;
2431
Richard Henderson376a7902010-03-10 15:57:04 -08002432 /* This function should never be called with addresses outside the
2433 guest address space. If this assert fires, it probably indicates
2434 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002435#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2436 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002437#endif
2438
Richard Henderson3e0650a2010-03-29 10:54:42 -07002439 if (len == 0) {
2440 return 0;
2441 }
Richard Henderson376a7902010-03-10 15:57:04 -08002442 if (start + len - 1 < start) {
2443 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002444 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002445 }
balrog55f280c2008-10-28 10:24:11 +00002446
ths3d97b402007-11-02 19:02:07 +00002447 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2448 start = start & TARGET_PAGE_MASK;
2449
Richard Henderson376a7902010-03-10 15:57:04 -08002450 for (addr = start, len = end - start;
2451 len != 0;
2452 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002453 p = page_find(addr >> TARGET_PAGE_BITS);
2454 if( !p )
2455 return -1;
2456 if( !(p->flags & PAGE_VALID) )
2457 return -1;
2458
bellarddae32702007-11-14 10:51:00 +00002459 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002460 return -1;
bellarddae32702007-11-14 10:51:00 +00002461 if (flags & PAGE_WRITE) {
2462 if (!(p->flags & PAGE_WRITE_ORG))
2463 return -1;
2464 /* unprotect the page if it was put read-only because it
2465 contains translated code */
2466 if (!(p->flags & PAGE_WRITE)) {
2467 if (!page_unprotect(addr, 0, NULL))
2468 return -1;
2469 }
2470 return 0;
2471 }
ths3d97b402007-11-02 19:02:07 +00002472 }
2473 return 0;
2474}
2475
bellard9fa3e852004-01-04 18:06:42 +00002476/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002477 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002478int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002479{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002480 unsigned int prot;
2481 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002482 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002483
pbrookc8a706f2008-06-02 16:16:42 +00002484 /* Technically this isn't safe inside a signal handler. However we
2485 know this only ever happens in a synchronous SEGV handler, so in
2486 practice it seems to be ok. */
2487 mmap_lock();
2488
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002489 p = page_find(address >> TARGET_PAGE_BITS);
2490 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002491 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002492 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002493 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002494
bellard9fa3e852004-01-04 18:06:42 +00002495 /* if the page was really writable, then we change its
2496 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002497 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2498 host_start = address & qemu_host_page_mask;
2499 host_end = host_start + qemu_host_page_size;
2500
2501 prot = 0;
2502 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2503 p = page_find(addr >> TARGET_PAGE_BITS);
2504 p->flags |= PAGE_WRITE;
2505 prot |= p->flags;
2506
bellard9fa3e852004-01-04 18:06:42 +00002507 /* and since the content will be modified, we must invalidate
2508 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002509 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002510#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002511 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002512#endif
bellard9fa3e852004-01-04 18:06:42 +00002513 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002514 mprotect((void *)g2h(host_start), qemu_host_page_size,
2515 prot & PAGE_BITS);
2516
2517 mmap_unlock();
2518 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002519 }
pbrookc8a706f2008-06-02 16:16:42 +00002520 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002521 return 0;
2522}
2523
bellard6a00d602005-11-21 23:25:50 +00002524static inline void tlb_set_dirty(CPUState *env,
2525 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002526{
2527}
bellard9fa3e852004-01-04 18:06:42 +00002528#endif /* defined(CONFIG_USER_ONLY) */
2529
pbrooke2eef172008-06-08 01:09:01 +00002530#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002531
Paul Brookc04b2b72010-03-01 03:31:14 +00002532#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2533typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002534 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002535 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002536 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002537} subpage_t;
2538
Anthony Liguoric227f092009-10-01 16:12:16 -05002539static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002540 uint16_t section);
2541static subpage_t *subpage_init (target_phys_addr_t base, uint16_t *section,
2542 uint16_t orig_section);
blueswir1db7b5422007-05-26 17:36:03 +00002543#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2544 need_subpage) \
2545 do { \
2546 if (addr > start_addr) \
2547 start_addr2 = 0; \
2548 else { \
2549 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2550 if (start_addr2 > 0) \
2551 need_subpage = 1; \
2552 } \
2553 \
blueswir149e9fba2007-05-30 17:25:06 +00002554 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002555 end_addr2 = TARGET_PAGE_SIZE - 1; \
2556 else { \
2557 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2558 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2559 need_subpage = 1; \
2560 } \
2561 } while (0)
2562
Avi Kivity5312bd82012-02-12 18:32:55 +02002563static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002564{
Avi Kivity5312bd82012-02-12 18:32:55 +02002565 MemoryRegionSection *section = &phys_sections[section_index];
2566 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002567
2568 if (mr->subpage) {
2569 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2570 memory_region_destroy(&subpage->iomem);
2571 g_free(subpage);
2572 }
2573}
2574
Avi Kivity4346ae32012-02-10 17:00:01 +02002575static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002576{
2577 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002578 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002579
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002580 if (lp->u.node == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002581 return;
2582 }
2583
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002584 p = phys_map_nodes[lp->u.node];
Avi Kivity4346ae32012-02-10 17:00:01 +02002585 for (i = 0; i < L2_SIZE; ++i) {
2586 if (level > 0) {
Avi Kivity54688b12012-02-09 17:34:32 +02002587 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002588 } else {
2589 destroy_page_desc(p[i].u.leaf);
Avi Kivity54688b12012-02-09 17:34:32 +02002590 }
Avi Kivity54688b12012-02-09 17:34:32 +02002591 }
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002592 lp->u.node = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002593}
2594
2595static void destroy_all_mappings(void)
2596{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002597 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002598 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002599}
2600
Avi Kivity5312bd82012-02-12 18:32:55 +02002601static uint16_t phys_section_add(MemoryRegionSection *section)
2602{
2603 if (phys_sections_nb == phys_sections_nb_alloc) {
2604 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2605 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2606 phys_sections_nb_alloc);
2607 }
2608 phys_sections[phys_sections_nb] = *section;
2609 return phys_sections_nb++;
2610}
2611
2612static void phys_sections_clear(void)
2613{
2614 phys_sections_nb = 0;
2615}
2616
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002617/* register physical memory.
2618 For RAM, 'size' must be a multiple of the target page size.
2619 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002620 io memory page. The address used when calling the IO function is
2621 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002622 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002623 before calculating this offset. This should not be a problem unless
2624 the low bits of start_addr and region_offset differ. */
Avi Kivitydd811242012-01-02 12:17:03 +02002625void cpu_register_physical_memory_log(MemoryRegionSection *section,
Avi Kivityd7ec83e2012-02-08 17:07:26 +02002626 bool readonly)
bellard33417e72003-08-10 21:47:01 +00002627{
Avi Kivitydd811242012-01-02 12:17:03 +02002628 target_phys_addr_t start_addr = section->offset_within_address_space;
2629 ram_addr_t size = section->size;
Anthony Liguoric227f092009-10-01 16:12:16 -05002630 target_phys_addr_t addr, end_addr;
Anthony Liguoric227f092009-10-01 16:12:16 -05002631 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002632 subpage_t *subpage;
Avi Kivity5312bd82012-02-12 18:32:55 +02002633 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002634
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002635 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002636
bellard5fd386f2004-05-23 21:11:22 +00002637 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002638 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002639
2640 addr = start_addr;
2641 do {
Avi Kivity717cb7b2012-02-12 21:21:21 +02002642 uint16_t *p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2643 uint16_t orig_memory = *p;
2644 target_phys_addr_t start_addr2, end_addr2;
2645 int need_subpage = 0;
2646 MemoryRegion *mr = phys_sections[orig_memory].mr;
blueswir1db7b5422007-05-26 17:36:03 +00002647
Avi Kivity717cb7b2012-02-12 21:21:21 +02002648 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2649 need_subpage);
2650 if (need_subpage) {
2651 if (!(mr->subpage)) {
Avi Kivity8636b922012-02-12 21:10:50 +02002652 subpage = subpage_init((addr & TARGET_PAGE_MASK),
Avi Kivity717cb7b2012-02-12 21:21:21 +02002653 p, orig_memory);
2654 } else {
2655 subpage = container_of(mr, subpage_t, iomem);
blueswir1db7b5422007-05-26 17:36:03 +00002656 }
Avi Kivity717cb7b2012-02-12 21:21:21 +02002657 subpage_register(subpage, start_addr2, end_addr2,
2658 section_index);
2659 } else {
2660 *p = section_index;
blueswir1db7b5422007-05-26 17:36:03 +00002661 }
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002662 addr += TARGET_PAGE_SIZE;
2663 } while (addr != end_addr);
bellard33417e72003-08-10 21:47:01 +00002664}
2665
Anthony Liguoric227f092009-10-01 16:12:16 -05002666void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002667{
2668 if (kvm_enabled())
2669 kvm_coalesce_mmio_region(addr, size);
2670}
2671
Anthony Liguoric227f092009-10-01 16:12:16 -05002672void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002673{
2674 if (kvm_enabled())
2675 kvm_uncoalesce_mmio_region(addr, size);
2676}
2677
Sheng Yang62a27442010-01-26 19:21:16 +08002678void qemu_flush_coalesced_mmio_buffer(void)
2679{
2680 if (kvm_enabled())
2681 kvm_flush_coalesced_mmio_buffer();
2682}
2683
Marcelo Tosattic9027602010-03-01 20:25:08 -03002684#if defined(__linux__) && !defined(TARGET_S390X)
2685
2686#include <sys/vfs.h>
2687
2688#define HUGETLBFS_MAGIC 0x958458f6
2689
2690static long gethugepagesize(const char *path)
2691{
2692 struct statfs fs;
2693 int ret;
2694
2695 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002696 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002697 } while (ret != 0 && errno == EINTR);
2698
2699 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002700 perror(path);
2701 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002702 }
2703
2704 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002705 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002706
2707 return fs.f_bsize;
2708}
2709
Alex Williamson04b16652010-07-02 11:13:17 -06002710static void *file_ram_alloc(RAMBlock *block,
2711 ram_addr_t memory,
2712 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002713{
2714 char *filename;
2715 void *area;
2716 int fd;
2717#ifdef MAP_POPULATE
2718 int flags;
2719#endif
2720 unsigned long hpagesize;
2721
2722 hpagesize = gethugepagesize(path);
2723 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002724 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002725 }
2726
2727 if (memory < hpagesize) {
2728 return NULL;
2729 }
2730
2731 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2732 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2733 return NULL;
2734 }
2735
2736 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002737 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002738 }
2739
2740 fd = mkstemp(filename);
2741 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002742 perror("unable to create backing store for hugepages");
2743 free(filename);
2744 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002745 }
2746 unlink(filename);
2747 free(filename);
2748
2749 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2750
2751 /*
2752 * ftruncate is not supported by hugetlbfs in older
2753 * hosts, so don't bother bailing out on errors.
2754 * If anything goes wrong with it under other filesystems,
2755 * mmap will fail.
2756 */
2757 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002758 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002759
2760#ifdef MAP_POPULATE
2761 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2762 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2763 * to sidestep this quirk.
2764 */
2765 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2766 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2767#else
2768 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2769#endif
2770 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002771 perror("file_ram_alloc: can't mmap RAM pages");
2772 close(fd);
2773 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002774 }
Alex Williamson04b16652010-07-02 11:13:17 -06002775 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002776 return area;
2777}
2778#endif
2779
Alex Williamsond17b5282010-06-25 11:08:38 -06002780static ram_addr_t find_ram_offset(ram_addr_t size)
2781{
Alex Williamson04b16652010-07-02 11:13:17 -06002782 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002783 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002784
2785 if (QLIST_EMPTY(&ram_list.blocks))
2786 return 0;
2787
2788 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002789 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002790
2791 end = block->offset + block->length;
2792
2793 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2794 if (next_block->offset >= end) {
2795 next = MIN(next, next_block->offset);
2796 }
2797 }
2798 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002799 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002800 mingap = next - end;
2801 }
2802 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002803
2804 if (offset == RAM_ADDR_MAX) {
2805 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2806 (uint64_t)size);
2807 abort();
2808 }
2809
Alex Williamson04b16652010-07-02 11:13:17 -06002810 return offset;
2811}
2812
2813static ram_addr_t last_ram_offset(void)
2814{
Alex Williamsond17b5282010-06-25 11:08:38 -06002815 RAMBlock *block;
2816 ram_addr_t last = 0;
2817
2818 QLIST_FOREACH(block, &ram_list.blocks, next)
2819 last = MAX(last, block->offset + block->length);
2820
2821 return last;
2822}
2823
Avi Kivityc5705a72011-12-20 15:59:12 +02002824void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002825{
2826 RAMBlock *new_block, *block;
2827
Avi Kivityc5705a72011-12-20 15:59:12 +02002828 new_block = NULL;
2829 QLIST_FOREACH(block, &ram_list.blocks, next) {
2830 if (block->offset == addr) {
2831 new_block = block;
2832 break;
2833 }
2834 }
2835 assert(new_block);
2836 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002837
2838 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2839 char *id = dev->parent_bus->info->get_dev_path(dev);
2840 if (id) {
2841 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002842 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002843 }
2844 }
2845 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2846
2847 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002848 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002849 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2850 new_block->idstr);
2851 abort();
2852 }
2853 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002854}
2855
2856ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2857 MemoryRegion *mr)
2858{
2859 RAMBlock *new_block;
2860
2861 size = TARGET_PAGE_ALIGN(size);
2862 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002863
Avi Kivity7c637362011-12-21 13:09:49 +02002864 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002865 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002866 if (host) {
2867 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002868 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002869 } else {
2870 if (mem_path) {
2871#if defined (__linux__) && !defined(TARGET_S390X)
2872 new_block->host = file_ram_alloc(new_block, size, mem_path);
2873 if (!new_block->host) {
2874 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002875 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002876 }
2877#else
2878 fprintf(stderr, "-mem-path option unsupported\n");
2879 exit(1);
2880#endif
2881 } else {
2882#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002883 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2884 an system defined value, which is at least 256GB. Larger systems
2885 have larger values. We put the guest between the end of data
2886 segment (system break) and this value. We use 32GB as a base to
2887 have enough room for the system break to grow. */
2888 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002889 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002890 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002891 if (new_block->host == MAP_FAILED) {
2892 fprintf(stderr, "Allocating RAM failed\n");
2893 abort();
2894 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002895#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002896 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002897 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002898 } else {
2899 new_block->host = qemu_vmalloc(size);
2900 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002901#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002902 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002903 }
2904 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002905 new_block->length = size;
2906
2907 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2908
Anthony Liguori7267c092011-08-20 22:09:37 -05002909 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002910 last_ram_offset() >> TARGET_PAGE_BITS);
2911 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2912 0xff, size >> TARGET_PAGE_BITS);
2913
2914 if (kvm_enabled())
2915 kvm_setup_guest_memory(new_block->host, size);
2916
2917 return new_block->offset;
2918}
2919
Avi Kivityc5705a72011-12-20 15:59:12 +02002920ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002921{
Avi Kivityc5705a72011-12-20 15:59:12 +02002922 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002923}
bellarde9a1ab12007-02-08 23:08:38 +00002924
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002925void qemu_ram_free_from_ptr(ram_addr_t addr)
2926{
2927 RAMBlock *block;
2928
2929 QLIST_FOREACH(block, &ram_list.blocks, next) {
2930 if (addr == block->offset) {
2931 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002932 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002933 return;
2934 }
2935 }
2936}
2937
Anthony Liguoric227f092009-10-01 16:12:16 -05002938void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002939{
Alex Williamson04b16652010-07-02 11:13:17 -06002940 RAMBlock *block;
2941
2942 QLIST_FOREACH(block, &ram_list.blocks, next) {
2943 if (addr == block->offset) {
2944 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002945 if (block->flags & RAM_PREALLOC_MASK) {
2946 ;
2947 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002948#if defined (__linux__) && !defined(TARGET_S390X)
2949 if (block->fd) {
2950 munmap(block->host, block->length);
2951 close(block->fd);
2952 } else {
2953 qemu_vfree(block->host);
2954 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002955#else
2956 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002957#endif
2958 } else {
2959#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2960 munmap(block->host, block->length);
2961#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002962 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002963 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002964 } else {
2965 qemu_vfree(block->host);
2966 }
Alex Williamson04b16652010-07-02 11:13:17 -06002967#endif
2968 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002969 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002970 return;
2971 }
2972 }
2973
bellarde9a1ab12007-02-08 23:08:38 +00002974}
2975
Huang Yingcd19cfa2011-03-02 08:56:19 +01002976#ifndef _WIN32
2977void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2978{
2979 RAMBlock *block;
2980 ram_addr_t offset;
2981 int flags;
2982 void *area, *vaddr;
2983
2984 QLIST_FOREACH(block, &ram_list.blocks, next) {
2985 offset = addr - block->offset;
2986 if (offset < block->length) {
2987 vaddr = block->host + offset;
2988 if (block->flags & RAM_PREALLOC_MASK) {
2989 ;
2990 } else {
2991 flags = MAP_FIXED;
2992 munmap(vaddr, length);
2993 if (mem_path) {
2994#if defined(__linux__) && !defined(TARGET_S390X)
2995 if (block->fd) {
2996#ifdef MAP_POPULATE
2997 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2998 MAP_PRIVATE;
2999#else
3000 flags |= MAP_PRIVATE;
3001#endif
3002 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3003 flags, block->fd, offset);
3004 } else {
3005 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3006 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3007 flags, -1, 0);
3008 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003009#else
3010 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003011#endif
3012 } else {
3013#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3014 flags |= MAP_SHARED | MAP_ANONYMOUS;
3015 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3016 flags, -1, 0);
3017#else
3018 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3019 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3020 flags, -1, 0);
3021#endif
3022 }
3023 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003024 fprintf(stderr, "Could not remap addr: "
3025 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003026 length, addr);
3027 exit(1);
3028 }
3029 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3030 }
3031 return;
3032 }
3033 }
3034}
3035#endif /* !_WIN32 */
3036
pbrookdc828ca2009-04-09 22:21:07 +00003037/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003038 With the exception of the softmmu code in this file, this should
3039 only be used for local memory (e.g. video ram) that the device owns,
3040 and knows it isn't going to access beyond the end of the block.
3041
3042 It should not be used for general purpose DMA.
3043 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3044 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003045void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003046{
pbrook94a6b542009-04-11 17:15:54 +00003047 RAMBlock *block;
3048
Alex Williamsonf471a172010-06-11 11:11:42 -06003049 QLIST_FOREACH(block, &ram_list.blocks, next) {
3050 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003051 /* Move this entry to to start of the list. */
3052 if (block != QLIST_FIRST(&ram_list.blocks)) {
3053 QLIST_REMOVE(block, next);
3054 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3055 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003056 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003057 /* We need to check if the requested address is in the RAM
3058 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003059 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003060 */
3061 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003062 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003063 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003064 block->host =
3065 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003066 }
3067 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003068 return block->host + (addr - block->offset);
3069 }
pbrook94a6b542009-04-11 17:15:54 +00003070 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003071
3072 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3073 abort();
3074
3075 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003076}
3077
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003078/* Return a host pointer to ram allocated with qemu_ram_alloc.
3079 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3080 */
3081void *qemu_safe_ram_ptr(ram_addr_t addr)
3082{
3083 RAMBlock *block;
3084
3085 QLIST_FOREACH(block, &ram_list.blocks, next) {
3086 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003087 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003088 /* We need to check if the requested address is in the RAM
3089 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003090 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003091 */
3092 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003093 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003094 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003095 block->host =
3096 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003097 }
3098 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003099 return block->host + (addr - block->offset);
3100 }
3101 }
3102
3103 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3104 abort();
3105
3106 return NULL;
3107}
3108
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003109/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3110 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003111void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003112{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003113 if (*size == 0) {
3114 return NULL;
3115 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003116 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003117 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003118 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003119 RAMBlock *block;
3120
3121 QLIST_FOREACH(block, &ram_list.blocks, next) {
3122 if (addr - block->offset < block->length) {
3123 if (addr - block->offset + *size > block->length)
3124 *size = block->length - addr + block->offset;
3125 return block->host + (addr - block->offset);
3126 }
3127 }
3128
3129 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3130 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003131 }
3132}
3133
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003134void qemu_put_ram_ptr(void *addr)
3135{
3136 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003137}
3138
Marcelo Tosattie8902612010-10-11 15:31:19 -03003139int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003140{
pbrook94a6b542009-04-11 17:15:54 +00003141 RAMBlock *block;
3142 uint8_t *host = ptr;
3143
Jan Kiszka868bb332011-06-21 22:59:09 +02003144 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003145 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003146 return 0;
3147 }
3148
Alex Williamsonf471a172010-06-11 11:11:42 -06003149 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003150 /* This case append when the block is not mapped. */
3151 if (block->host == NULL) {
3152 continue;
3153 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003154 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003155 *ram_addr = block->offset + (host - block->host);
3156 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003157 }
pbrook94a6b542009-04-11 17:15:54 +00003158 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003159
Marcelo Tosattie8902612010-10-11 15:31:19 -03003160 return -1;
3161}
Alex Williamsonf471a172010-06-11 11:11:42 -06003162
Marcelo Tosattie8902612010-10-11 15:31:19 -03003163/* Some of the softmmu routines need to translate from a host pointer
3164 (typically a TLB entry) back to a ram offset. */
3165ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3166{
3167 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003168
Marcelo Tosattie8902612010-10-11 15:31:19 -03003169 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3170 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3171 abort();
3172 }
3173 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003174}
3175
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003176static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3177 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003178{
pbrook67d3b952006-12-18 05:03:52 +00003179#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003180 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003181#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003182#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003183 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003184#endif
3185 return 0;
3186}
3187
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003188static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3189 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003190{
3191#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003192 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003193#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003194#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003195 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003196#endif
3197}
3198
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003199static const MemoryRegionOps unassigned_mem_ops = {
3200 .read = unassigned_mem_read,
3201 .write = unassigned_mem_write,
3202 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003203};
3204
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003205static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3206 unsigned size)
3207{
3208 abort();
3209}
3210
3211static void error_mem_write(void *opaque, target_phys_addr_t addr,
3212 uint64_t value, unsigned size)
3213{
3214 abort();
3215}
3216
3217static const MemoryRegionOps error_mem_ops = {
3218 .read = error_mem_read,
3219 .write = error_mem_write,
3220 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003221};
3222
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003223static const MemoryRegionOps rom_mem_ops = {
3224 .read = error_mem_read,
3225 .write = unassigned_mem_write,
3226 .endianness = DEVICE_NATIVE_ENDIAN,
3227};
3228
3229static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3230 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003231{
bellard3a7d9292005-08-21 09:26:42 +00003232 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003233 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003234 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3235#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003236 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003237 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003238#endif
3239 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003240 switch (size) {
3241 case 1:
3242 stb_p(qemu_get_ram_ptr(ram_addr), val);
3243 break;
3244 case 2:
3245 stw_p(qemu_get_ram_ptr(ram_addr), val);
3246 break;
3247 case 4:
3248 stl_p(qemu_get_ram_ptr(ram_addr), val);
3249 break;
3250 default:
3251 abort();
3252 }
bellardf23db162005-08-21 19:12:28 +00003253 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003254 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003255 /* we remove the notdirty callback only if the code has been
3256 flushed */
3257 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003258 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003259}
3260
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003261static const MemoryRegionOps notdirty_mem_ops = {
3262 .read = error_mem_read,
3263 .write = notdirty_mem_write,
3264 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003265};
3266
pbrook0f459d12008-06-09 00:20:13 +00003267/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003268static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003269{
3270 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003271 target_ulong pc, cs_base;
3272 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003273 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003274 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003275 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003276
aliguori06d55cc2008-11-18 20:24:06 +00003277 if (env->watchpoint_hit) {
3278 /* We re-entered the check after replacing the TB. Now raise
3279 * the debug interrupt so that is will trigger after the
3280 * current instruction. */
3281 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3282 return;
3283 }
pbrook2e70f6e2008-06-29 01:03:05 +00003284 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003285 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003286 if ((vaddr == (wp->vaddr & len_mask) ||
3287 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003288 wp->flags |= BP_WATCHPOINT_HIT;
3289 if (!env->watchpoint_hit) {
3290 env->watchpoint_hit = wp;
3291 tb = tb_find_pc(env->mem_io_pc);
3292 if (!tb) {
3293 cpu_abort(env, "check_watchpoint: could not find TB for "
3294 "pc=%p", (void *)env->mem_io_pc);
3295 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003296 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003297 tb_phys_invalidate(tb, -1);
3298 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3299 env->exception_index = EXCP_DEBUG;
3300 } else {
3301 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3302 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3303 }
3304 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003305 }
aliguori6e140f22008-11-18 20:37:55 +00003306 } else {
3307 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003308 }
3309 }
3310}
3311
pbrook6658ffb2007-03-16 23:58:11 +00003312/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3313 so these check for a hit then pass through to the normal out-of-line
3314 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003315static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3316 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003317{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003318 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3319 switch (size) {
3320 case 1: return ldub_phys(addr);
3321 case 2: return lduw_phys(addr);
3322 case 4: return ldl_phys(addr);
3323 default: abort();
3324 }
pbrook6658ffb2007-03-16 23:58:11 +00003325}
3326
Avi Kivity1ec9b902012-01-02 12:47:48 +02003327static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3328 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003329{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003330 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3331 switch (size) {
3332 case 1: stb_phys(addr, val);
3333 case 2: stw_phys(addr, val);
3334 case 4: stl_phys(addr, val);
3335 default: abort();
3336 }
pbrook6658ffb2007-03-16 23:58:11 +00003337}
3338
Avi Kivity1ec9b902012-01-02 12:47:48 +02003339static const MemoryRegionOps watch_mem_ops = {
3340 .read = watch_mem_read,
3341 .write = watch_mem_write,
3342 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003343};
pbrook6658ffb2007-03-16 23:58:11 +00003344
Avi Kivity70c68e42012-01-02 12:32:48 +02003345static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3346 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003347{
Avi Kivity70c68e42012-01-02 12:32:48 +02003348 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003349 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003350 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003351#if defined(DEBUG_SUBPAGE)
3352 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3353 mmio, len, addr, idx);
3354#endif
blueswir1db7b5422007-05-26 17:36:03 +00003355
Avi Kivity5312bd82012-02-12 18:32:55 +02003356 section = &phys_sections[mmio->sub_section[idx]];
3357 addr += mmio->base;
3358 addr -= section->offset_within_address_space;
3359 addr += section->offset_within_region;
3360 return io_mem_read(section->mr->ram_addr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003361}
3362
Avi Kivity70c68e42012-01-02 12:32:48 +02003363static void subpage_write(void *opaque, target_phys_addr_t addr,
3364 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003365{
Avi Kivity70c68e42012-01-02 12:32:48 +02003366 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003367 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003368 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003369#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003370 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3371 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003372 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003373#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003374
Avi Kivity5312bd82012-02-12 18:32:55 +02003375 section = &phys_sections[mmio->sub_section[idx]];
3376 addr += mmio->base;
3377 addr -= section->offset_within_address_space;
3378 addr += section->offset_within_region;
3379 io_mem_write(section->mr->ram_addr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003380}
3381
Avi Kivity70c68e42012-01-02 12:32:48 +02003382static const MemoryRegionOps subpage_ops = {
3383 .read = subpage_read,
3384 .write = subpage_write,
3385 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003386};
3387
Avi Kivityde712f92012-01-02 12:41:07 +02003388static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3389 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003390{
3391 ram_addr_t raddr = addr;
3392 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003393 switch (size) {
3394 case 1: return ldub_p(ptr);
3395 case 2: return lduw_p(ptr);
3396 case 4: return ldl_p(ptr);
3397 default: abort();
3398 }
Andreas Färber56384e82011-11-30 16:26:21 +01003399}
3400
Avi Kivityde712f92012-01-02 12:41:07 +02003401static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3402 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003403{
3404 ram_addr_t raddr = addr;
3405 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003406 switch (size) {
3407 case 1: return stb_p(ptr, value);
3408 case 2: return stw_p(ptr, value);
3409 case 4: return stl_p(ptr, value);
3410 default: abort();
3411 }
Andreas Färber56384e82011-11-30 16:26:21 +01003412}
3413
Avi Kivityde712f92012-01-02 12:41:07 +02003414static const MemoryRegionOps subpage_ram_ops = {
3415 .read = subpage_ram_read,
3416 .write = subpage_ram_write,
3417 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003418};
3419
Anthony Liguoric227f092009-10-01 16:12:16 -05003420static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003421 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003422{
3423 int idx, eidx;
3424
3425 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3426 return -1;
3427 idx = SUBPAGE_IDX(start);
3428 eidx = SUBPAGE_IDX(end);
3429#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003430 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003431 mmio, start, end, idx, eidx, memory);
3432#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003433 if (memory_region_is_ram(phys_sections[section].mr)) {
3434 MemoryRegionSection new_section = phys_sections[section];
3435 new_section.mr = &io_mem_subpage_ram;
3436 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003437 }
blueswir1db7b5422007-05-26 17:36:03 +00003438 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003439 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003440 }
3441
3442 return 0;
3443}
3444
Avi Kivity5312bd82012-02-12 18:32:55 +02003445static subpage_t *subpage_init (target_phys_addr_t base, uint16_t *section_ind,
3446 uint16_t orig_section)
blueswir1db7b5422007-05-26 17:36:03 +00003447{
Anthony Liguoric227f092009-10-01 16:12:16 -05003448 subpage_t *mmio;
Avi Kivity5312bd82012-02-12 18:32:55 +02003449 MemoryRegionSection section = {
3450 .offset_within_address_space = base,
3451 .size = TARGET_PAGE_SIZE,
3452 };
blueswir1db7b5422007-05-26 17:36:03 +00003453
Anthony Liguori7267c092011-08-20 22:09:37 -05003454 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003455
3456 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003457 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3458 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003459 mmio->iomem.subpage = true;
Avi Kivity5312bd82012-02-12 18:32:55 +02003460 section.mr = &mmio->iomem;
blueswir1db7b5422007-05-26 17:36:03 +00003461#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003462 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3463 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003464#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003465 *section_ind = phys_section_add(&section);
3466 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_section);
blueswir1db7b5422007-05-26 17:36:03 +00003467
3468 return mmio;
3469}
3470
aliguori88715652009-02-11 15:20:58 +00003471static int get_free_io_mem_idx(void)
3472{
3473 int i;
3474
3475 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3476 if (!io_mem_used[i]) {
3477 io_mem_used[i] = 1;
3478 return i;
3479 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003480 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003481 return -1;
3482}
3483
bellard33417e72003-08-10 21:47:01 +00003484/* mem_read and mem_write are arrays of functions containing the
3485 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003486 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003487 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003488 modified. If it is zero, a new io zone is allocated. The return
3489 value can be used with cpu_register_physical_memory(). (-1) is
3490 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003491static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003492{
bellard33417e72003-08-10 21:47:01 +00003493 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003494 io_index = get_free_io_mem_idx();
3495 if (io_index == -1)
3496 return io_index;
bellard33417e72003-08-10 21:47:01 +00003497 } else {
3498 if (io_index >= IO_MEM_NB_ENTRIES)
3499 return -1;
3500 }
bellardb5ff1b32005-11-26 10:38:39 +00003501
Avi Kivitya621f382012-01-02 13:12:08 +02003502 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003503
Avi Kivity11c7ef02012-01-02 17:21:07 +02003504 return io_index;
bellard33417e72003-08-10 21:47:01 +00003505}
bellard61382a52003-10-27 21:22:23 +00003506
Avi Kivitya621f382012-01-02 13:12:08 +02003507int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003508{
Avi Kivitya621f382012-01-02 13:12:08 +02003509 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003510}
3511
Avi Kivity11c7ef02012-01-02 17:21:07 +02003512void cpu_unregister_io_memory(int io_index)
aliguori88715652009-02-11 15:20:58 +00003513{
Avi Kivitya621f382012-01-02 13:12:08 +02003514 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003515 io_mem_used[io_index] = 0;
3516}
3517
Avi Kivity5312bd82012-02-12 18:32:55 +02003518static uint16_t dummy_section(MemoryRegion *mr)
3519{
3520 MemoryRegionSection section = {
3521 .mr = mr,
3522 .offset_within_address_space = 0,
3523 .offset_within_region = 0,
3524 .size = UINT64_MAX,
3525 };
3526
3527 return phys_section_add(&section);
3528}
3529
Avi Kivitye9179ce2009-06-14 11:38:52 +03003530static void io_mem_init(void)
3531{
3532 int i;
3533
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003534 /* Must be first: */
3535 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3536 assert(io_mem_ram.ram_addr == 0);
3537 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3538 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3539 "unassigned", UINT64_MAX);
3540 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3541 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003542 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3543 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003544 for (i=0; i<5; i++)
3545 io_mem_used[i] = 1;
3546
Avi Kivity1ec9b902012-01-02 12:47:48 +02003547 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3548 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003549}
3550
Avi Kivity50c1e142012-02-08 21:36:02 +02003551static void core_begin(MemoryListener *listener)
3552{
Avi Kivity54688b12012-02-09 17:34:32 +02003553 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003554 phys_sections_clear();
Avi Kivityd6f2ea22012-02-12 20:12:49 +02003555 phys_map.u.node = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003556 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivity50c1e142012-02-08 21:36:02 +02003557}
3558
3559static void core_commit(MemoryListener *listener)
3560{
Avi Kivity117712c2012-02-12 21:23:17 +02003561 CPUState *env;
3562
3563 /* since each CPU stores ram addresses in its TLB cache, we must
3564 reset the modified entries */
3565 /* XXX: slow ! */
3566 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3567 tlb_flush(env, 1);
3568 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003569}
3570
Avi Kivity93632742012-02-08 16:54:16 +02003571static void core_region_add(MemoryListener *listener,
3572 MemoryRegionSection *section)
3573{
Avi Kivity4855d412012-02-08 21:16:05 +02003574 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003575}
3576
3577static void core_region_del(MemoryListener *listener,
3578 MemoryRegionSection *section)
3579{
Avi Kivity93632742012-02-08 16:54:16 +02003580}
3581
Avi Kivity50c1e142012-02-08 21:36:02 +02003582static void core_region_nop(MemoryListener *listener,
3583 MemoryRegionSection *section)
3584{
Avi Kivity54688b12012-02-09 17:34:32 +02003585 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003586}
3587
Avi Kivity93632742012-02-08 16:54:16 +02003588static void core_log_start(MemoryListener *listener,
3589 MemoryRegionSection *section)
3590{
3591}
3592
3593static void core_log_stop(MemoryListener *listener,
3594 MemoryRegionSection *section)
3595{
3596}
3597
3598static void core_log_sync(MemoryListener *listener,
3599 MemoryRegionSection *section)
3600{
3601}
3602
3603static void core_log_global_start(MemoryListener *listener)
3604{
3605 cpu_physical_memory_set_dirty_tracking(1);
3606}
3607
3608static void core_log_global_stop(MemoryListener *listener)
3609{
3610 cpu_physical_memory_set_dirty_tracking(0);
3611}
3612
3613static void core_eventfd_add(MemoryListener *listener,
3614 MemoryRegionSection *section,
3615 bool match_data, uint64_t data, int fd)
3616{
3617}
3618
3619static void core_eventfd_del(MemoryListener *listener,
3620 MemoryRegionSection *section,
3621 bool match_data, uint64_t data, int fd)
3622{
3623}
3624
Avi Kivity50c1e142012-02-08 21:36:02 +02003625static void io_begin(MemoryListener *listener)
3626{
3627}
3628
3629static void io_commit(MemoryListener *listener)
3630{
3631}
3632
Avi Kivity4855d412012-02-08 21:16:05 +02003633static void io_region_add(MemoryListener *listener,
3634 MemoryRegionSection *section)
3635{
3636 iorange_init(&section->mr->iorange, &memory_region_iorange_ops,
3637 section->offset_within_address_space, section->size);
3638 ioport_register(&section->mr->iorange);
3639}
3640
3641static void io_region_del(MemoryListener *listener,
3642 MemoryRegionSection *section)
3643{
3644 isa_unassign_ioport(section->offset_within_address_space, section->size);
3645}
3646
Avi Kivity50c1e142012-02-08 21:36:02 +02003647static void io_region_nop(MemoryListener *listener,
3648 MemoryRegionSection *section)
3649{
3650}
3651
Avi Kivity4855d412012-02-08 21:16:05 +02003652static void io_log_start(MemoryListener *listener,
3653 MemoryRegionSection *section)
3654{
3655}
3656
3657static void io_log_stop(MemoryListener *listener,
3658 MemoryRegionSection *section)
3659{
3660}
3661
3662static void io_log_sync(MemoryListener *listener,
3663 MemoryRegionSection *section)
3664{
3665}
3666
3667static void io_log_global_start(MemoryListener *listener)
3668{
3669}
3670
3671static void io_log_global_stop(MemoryListener *listener)
3672{
3673}
3674
3675static void io_eventfd_add(MemoryListener *listener,
3676 MemoryRegionSection *section,
3677 bool match_data, uint64_t data, int fd)
3678{
3679}
3680
3681static void io_eventfd_del(MemoryListener *listener,
3682 MemoryRegionSection *section,
3683 bool match_data, uint64_t data, int fd)
3684{
3685}
3686
Avi Kivity93632742012-02-08 16:54:16 +02003687static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003688 .begin = core_begin,
3689 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003690 .region_add = core_region_add,
3691 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003692 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003693 .log_start = core_log_start,
3694 .log_stop = core_log_stop,
3695 .log_sync = core_log_sync,
3696 .log_global_start = core_log_global_start,
3697 .log_global_stop = core_log_global_stop,
3698 .eventfd_add = core_eventfd_add,
3699 .eventfd_del = core_eventfd_del,
3700 .priority = 0,
3701};
3702
Avi Kivity4855d412012-02-08 21:16:05 +02003703static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003704 .begin = io_begin,
3705 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003706 .region_add = io_region_add,
3707 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003708 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003709 .log_start = io_log_start,
3710 .log_stop = io_log_stop,
3711 .log_sync = io_log_sync,
3712 .log_global_start = io_log_global_start,
3713 .log_global_stop = io_log_global_stop,
3714 .eventfd_add = io_eventfd_add,
3715 .eventfd_del = io_eventfd_del,
3716 .priority = 0,
3717};
3718
Avi Kivity62152b82011-07-26 14:26:14 +03003719static void memory_map_init(void)
3720{
Anthony Liguori7267c092011-08-20 22:09:37 -05003721 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003722 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003723 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003724
Anthony Liguori7267c092011-08-20 22:09:37 -05003725 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003726 memory_region_init(system_io, "io", 65536);
3727 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003728
Avi Kivity4855d412012-02-08 21:16:05 +02003729 memory_listener_register(&core_memory_listener, system_memory);
3730 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003731}
3732
3733MemoryRegion *get_system_memory(void)
3734{
3735 return system_memory;
3736}
3737
Avi Kivity309cb472011-08-08 16:09:03 +03003738MemoryRegion *get_system_io(void)
3739{
3740 return system_io;
3741}
3742
pbrooke2eef172008-06-08 01:09:01 +00003743#endif /* !defined(CONFIG_USER_ONLY) */
3744
bellard13eb76e2004-01-24 15:23:36 +00003745/* physical memory access (slow version, mainly for debug) */
3746#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003747int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3748 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003749{
3750 int l, flags;
3751 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003752 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003753
3754 while (len > 0) {
3755 page = addr & TARGET_PAGE_MASK;
3756 l = (page + TARGET_PAGE_SIZE) - addr;
3757 if (l > len)
3758 l = len;
3759 flags = page_get_flags(page);
3760 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003761 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003762 if (is_write) {
3763 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003764 return -1;
bellard579a97f2007-11-11 14:26:47 +00003765 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003766 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003767 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003768 memcpy(p, buf, l);
3769 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003770 } else {
3771 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003772 return -1;
bellard579a97f2007-11-11 14:26:47 +00003773 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003774 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003775 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003776 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003777 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003778 }
3779 len -= l;
3780 buf += l;
3781 addr += l;
3782 }
Paul Brooka68fe892010-03-01 00:08:59 +00003783 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003784}
bellard8df1cd02005-01-28 22:37:22 +00003785
bellard13eb76e2004-01-24 15:23:36 +00003786#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003787void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003788 int len, int is_write)
3789{
3790 int l, io_index;
3791 uint8_t *ptr;
3792 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003793 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003794 MemoryRegionSection section;
ths3b46e622007-09-17 08:09:54 +00003795
bellard13eb76e2004-01-24 15:23:36 +00003796 while (len > 0) {
3797 page = addr & TARGET_PAGE_MASK;
3798 l = (page + TARGET_PAGE_SIZE) - addr;
3799 if (l > len)
3800 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003801 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003802
bellard13eb76e2004-01-24 15:23:36 +00003803 if (is_write) {
Avi Kivity06ef3522012-02-13 16:11:22 +02003804 if (!memory_region_is_ram(section.mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003805 target_phys_addr_t addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003806 io_index = memory_region_get_ram_addr(section.mr)
3807 & (IO_MEM_NB_ENTRIES - 1);
3808 addr1 = (addr & ~TARGET_PAGE_MASK)
3809 + section.offset_within_region;
bellard6a00d602005-11-21 23:25:50 +00003810 /* XXX: could force cpu_single_env to NULL to avoid
3811 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003812 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003813 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003814 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003815 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003816 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003817 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003818 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003819 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003820 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003821 l = 2;
3822 } else {
bellard1c213d12005-09-03 10:49:04 +00003823 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003824 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003825 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003826 l = 1;
3827 }
Avi Kivity06ef3522012-02-13 16:11:22 +02003828 } else if (!section.readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003829 ram_addr_t addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003830 addr1 = (memory_region_get_ram_addr(section.mr)
3831 + section.offset_within_region)
3832 | (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003833 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003834 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003835 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003836 if (!cpu_physical_memory_is_dirty(addr1)) {
3837 /* invalidate code */
3838 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3839 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003840 cpu_physical_memory_set_dirty_flags(
3841 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003842 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003843 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003844 }
3845 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02003846 if (!is_ram_rom_romd(&section)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003847 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003848 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02003849 io_index = memory_region_get_ram_addr(section.mr)
3850 & (IO_MEM_NB_ENTRIES - 1);
3851 addr1 = (addr & ~TARGET_PAGE_MASK)
3852 + section.offset_within_region;
aurel326c2934d2009-02-18 21:37:17 +00003853 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003854 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003855 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003856 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003857 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003858 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003859 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003860 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003861 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003862 l = 2;
3863 } else {
bellard1c213d12005-09-03 10:49:04 +00003864 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003865 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003866 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003867 l = 1;
3868 }
3869 } else {
3870 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02003871 ptr = qemu_get_ram_ptr(section.mr->ram_addr
3872 + section.offset_within_region);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003873 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3874 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003875 }
3876 }
3877 len -= l;
3878 buf += l;
3879 addr += l;
3880 }
3881}
bellard8df1cd02005-01-28 22:37:22 +00003882
bellardd0ecd2a2006-04-23 17:14:48 +00003883/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003884void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003885 const uint8_t *buf, int len)
3886{
3887 int l;
3888 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003889 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003890 MemoryRegionSection section;
ths3b46e622007-09-17 08:09:54 +00003891
bellardd0ecd2a2006-04-23 17:14:48 +00003892 while (len > 0) {
3893 page = addr & TARGET_PAGE_MASK;
3894 l = (page + TARGET_PAGE_SIZE) - addr;
3895 if (l > len)
3896 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003897 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003898
Avi Kivity06ef3522012-02-13 16:11:22 +02003899 if (!is_ram_rom_romd(&section)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003900 /* do nothing */
3901 } else {
3902 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003903 addr1 = (memory_region_get_ram_addr(section.mr)
3904 + section.offset_within_region)
3905 + (addr & ~TARGET_PAGE_MASK);
bellardd0ecd2a2006-04-23 17:14:48 +00003906 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003907 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003908 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003909 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003910 }
3911 len -= l;
3912 buf += l;
3913 addr += l;
3914 }
3915}
3916
aliguori6d16c2f2009-01-22 16:59:11 +00003917typedef struct {
3918 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003919 target_phys_addr_t addr;
3920 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003921} BounceBuffer;
3922
3923static BounceBuffer bounce;
3924
aliguoriba223c22009-01-22 16:59:16 +00003925typedef struct MapClient {
3926 void *opaque;
3927 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003928 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003929} MapClient;
3930
Blue Swirl72cf2d42009-09-12 07:36:22 +00003931static QLIST_HEAD(map_client_list, MapClient) map_client_list
3932 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003933
3934void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3935{
Anthony Liguori7267c092011-08-20 22:09:37 -05003936 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003937
3938 client->opaque = opaque;
3939 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003940 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003941 return client;
3942}
3943
3944void cpu_unregister_map_client(void *_client)
3945{
3946 MapClient *client = (MapClient *)_client;
3947
Blue Swirl72cf2d42009-09-12 07:36:22 +00003948 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003949 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003950}
3951
3952static void cpu_notify_map_clients(void)
3953{
3954 MapClient *client;
3955
Blue Swirl72cf2d42009-09-12 07:36:22 +00003956 while (!QLIST_EMPTY(&map_client_list)) {
3957 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003958 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003959 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003960 }
3961}
3962
aliguori6d16c2f2009-01-22 16:59:11 +00003963/* Map a physical memory region into a host virtual address.
3964 * May map a subset of the requested range, given by and returned in *plen.
3965 * May return NULL if resources needed to perform the mapping are exhausted.
3966 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003967 * Use cpu_register_map_client() to know when retrying the map operation is
3968 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003969 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003970void *cpu_physical_memory_map(target_phys_addr_t addr,
3971 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003972 int is_write)
3973{
Anthony Liguoric227f092009-10-01 16:12:16 -05003974 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003975 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003976 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003977 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003978 MemoryRegionSection section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003979 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003980 ram_addr_t rlen;
3981 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003982
3983 while (len > 0) {
3984 page = addr & TARGET_PAGE_MASK;
3985 l = (page + TARGET_PAGE_SIZE) - addr;
3986 if (l > len)
3987 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003988 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003989
Avi Kivity06ef3522012-02-13 16:11:22 +02003990 if (!(memory_region_is_ram(section.mr) && !section.readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003991 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003992 break;
3993 }
3994 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3995 bounce.addr = addr;
3996 bounce.len = l;
3997 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003998 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003999 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004000
4001 *plen = l;
4002 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004003 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004004 if (!todo) {
Avi Kivity06ef3522012-02-13 16:11:22 +02004005 raddr = memory_region_get_ram_addr(section.mr)
4006 + section.offset_within_region
4007 + (addr & ~TARGET_PAGE_MASK);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004008 }
aliguori6d16c2f2009-01-22 16:59:11 +00004009
4010 len -= l;
4011 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004012 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004013 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004014 rlen = todo;
4015 ret = qemu_ram_ptr_length(raddr, &rlen);
4016 *plen = rlen;
4017 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004018}
4019
4020/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4021 * Will also mark the memory as dirty if is_write == 1. access_len gives
4022 * the amount of memory that was actually read or written by the caller.
4023 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004024void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4025 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004026{
4027 if (buffer != bounce.buffer) {
4028 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004029 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004030 while (access_len) {
4031 unsigned l;
4032 l = TARGET_PAGE_SIZE;
4033 if (l > access_len)
4034 l = access_len;
4035 if (!cpu_physical_memory_is_dirty(addr1)) {
4036 /* invalidate code */
4037 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4038 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004039 cpu_physical_memory_set_dirty_flags(
4040 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004041 }
4042 addr1 += l;
4043 access_len -= l;
4044 }
4045 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004046 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004047 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004048 }
aliguori6d16c2f2009-01-22 16:59:11 +00004049 return;
4050 }
4051 if (is_write) {
4052 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4053 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004054 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004055 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004056 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004057}
bellardd0ecd2a2006-04-23 17:14:48 +00004058
bellard8df1cd02005-01-28 22:37:22 +00004059/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004060static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4061 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004062{
4063 int io_index;
4064 uint8_t *ptr;
4065 uint32_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004066 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004067
Avi Kivity06ef3522012-02-13 16:11:22 +02004068 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004069
Avi Kivity06ef3522012-02-13 16:11:22 +02004070 if (!is_ram_rom_romd(&section)) {
bellard8df1cd02005-01-28 22:37:22 +00004071 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004072 io_index = memory_region_get_ram_addr(section.mr)
4073 & (IO_MEM_NB_ENTRIES - 1);
4074 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004075 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004076#if defined(TARGET_WORDS_BIGENDIAN)
4077 if (endian == DEVICE_LITTLE_ENDIAN) {
4078 val = bswap32(val);
4079 }
4080#else
4081 if (endian == DEVICE_BIG_ENDIAN) {
4082 val = bswap32(val);
4083 }
4084#endif
bellard8df1cd02005-01-28 22:37:22 +00004085 } else {
4086 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004087 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4088 & TARGET_PAGE_MASK)
4089 + section.offset_within_region) +
bellard8df1cd02005-01-28 22:37:22 +00004090 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004091 switch (endian) {
4092 case DEVICE_LITTLE_ENDIAN:
4093 val = ldl_le_p(ptr);
4094 break;
4095 case DEVICE_BIG_ENDIAN:
4096 val = ldl_be_p(ptr);
4097 break;
4098 default:
4099 val = ldl_p(ptr);
4100 break;
4101 }
bellard8df1cd02005-01-28 22:37:22 +00004102 }
4103 return val;
4104}
4105
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004106uint32_t ldl_phys(target_phys_addr_t addr)
4107{
4108 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4109}
4110
4111uint32_t ldl_le_phys(target_phys_addr_t addr)
4112{
4113 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4114}
4115
4116uint32_t ldl_be_phys(target_phys_addr_t addr)
4117{
4118 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4119}
4120
bellard84b7b8e2005-11-28 21:19:04 +00004121/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004122static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4123 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004124{
4125 int io_index;
4126 uint8_t *ptr;
4127 uint64_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004128 MemoryRegionSection section;
bellard84b7b8e2005-11-28 21:19:04 +00004129
Avi Kivity06ef3522012-02-13 16:11:22 +02004130 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004131
Avi Kivity06ef3522012-02-13 16:11:22 +02004132 if (!is_ram_rom_romd(&section)) {
bellard84b7b8e2005-11-28 21:19:04 +00004133 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004134 io_index = memory_region_get_ram_addr(section.mr)
4135 & (IO_MEM_NB_ENTRIES - 1);
4136 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004137
4138 /* XXX This is broken when device endian != cpu endian.
4139 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004140#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004141 val = io_mem_read(io_index, addr, 4) << 32;
4142 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004143#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004144 val = io_mem_read(io_index, addr, 4);
4145 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004146#endif
4147 } else {
4148 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004149 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4150 & TARGET_PAGE_MASK)
4151 + section.offset_within_region)
4152 + (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004153 switch (endian) {
4154 case DEVICE_LITTLE_ENDIAN:
4155 val = ldq_le_p(ptr);
4156 break;
4157 case DEVICE_BIG_ENDIAN:
4158 val = ldq_be_p(ptr);
4159 break;
4160 default:
4161 val = ldq_p(ptr);
4162 break;
4163 }
bellard84b7b8e2005-11-28 21:19:04 +00004164 }
4165 return val;
4166}
4167
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004168uint64_t ldq_phys(target_phys_addr_t addr)
4169{
4170 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4171}
4172
4173uint64_t ldq_le_phys(target_phys_addr_t addr)
4174{
4175 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4176}
4177
4178uint64_t ldq_be_phys(target_phys_addr_t addr)
4179{
4180 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4181}
4182
bellardaab33092005-10-30 20:48:42 +00004183/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004184uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004185{
4186 uint8_t val;
4187 cpu_physical_memory_read(addr, &val, 1);
4188 return val;
4189}
4190
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004191/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004192static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4193 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004194{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004195 int io_index;
4196 uint8_t *ptr;
4197 uint64_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004198 MemoryRegionSection section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004199
Avi Kivity06ef3522012-02-13 16:11:22 +02004200 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004201
Avi Kivity06ef3522012-02-13 16:11:22 +02004202 if (!is_ram_rom_romd(&section)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004203 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004204 io_index = memory_region_get_ram_addr(section.mr)
4205 & (IO_MEM_NB_ENTRIES - 1);
4206 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004207 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004208#if defined(TARGET_WORDS_BIGENDIAN)
4209 if (endian == DEVICE_LITTLE_ENDIAN) {
4210 val = bswap16(val);
4211 }
4212#else
4213 if (endian == DEVICE_BIG_ENDIAN) {
4214 val = bswap16(val);
4215 }
4216#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004217 } else {
4218 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004219 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4220 & TARGET_PAGE_MASK)
4221 + section.offset_within_region)
4222 + (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004223 switch (endian) {
4224 case DEVICE_LITTLE_ENDIAN:
4225 val = lduw_le_p(ptr);
4226 break;
4227 case DEVICE_BIG_ENDIAN:
4228 val = lduw_be_p(ptr);
4229 break;
4230 default:
4231 val = lduw_p(ptr);
4232 break;
4233 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004234 }
4235 return val;
bellardaab33092005-10-30 20:48:42 +00004236}
4237
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004238uint32_t lduw_phys(target_phys_addr_t addr)
4239{
4240 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4241}
4242
4243uint32_t lduw_le_phys(target_phys_addr_t addr)
4244{
4245 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4246}
4247
4248uint32_t lduw_be_phys(target_phys_addr_t addr)
4249{
4250 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4251}
4252
bellard8df1cd02005-01-28 22:37:22 +00004253/* warning: addr must be aligned. The ram page is not masked as dirty
4254 and the code inside is not invalidated. It is useful if the dirty
4255 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004256void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004257{
4258 int io_index;
4259 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004260 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004261
Avi Kivity06ef3522012-02-13 16:11:22 +02004262 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004263
Avi Kivity06ef3522012-02-13 16:11:22 +02004264 if (!memory_region_is_ram(section.mr) || section.readonly) {
4265 if (memory_region_is_ram(section.mr)) {
4266 io_index = io_mem_rom.ram_addr;
4267 } else {
4268 io_index = memory_region_get_ram_addr(section.mr);
4269 }
4270 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004271 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004272 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02004273 unsigned long addr1 = (memory_region_get_ram_addr(section.mr)
4274 & TARGET_PAGE_MASK)
4275 + section.offset_within_region
4276 + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004277 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004278 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004279
4280 if (unlikely(in_migration)) {
4281 if (!cpu_physical_memory_is_dirty(addr1)) {
4282 /* invalidate code */
4283 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4284 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004285 cpu_physical_memory_set_dirty_flags(
4286 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004287 }
4288 }
bellard8df1cd02005-01-28 22:37:22 +00004289 }
4290}
4291
Anthony Liguoric227f092009-10-01 16:12:16 -05004292void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004293{
4294 int io_index;
4295 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004296 MemoryRegionSection section;
j_mayerbc98a7e2007-04-04 07:55:12 +00004297
Avi Kivity06ef3522012-02-13 16:11:22 +02004298 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004299
Avi Kivity06ef3522012-02-13 16:11:22 +02004300 if (!memory_region_is_ram(section.mr) || section.readonly) {
4301 if (memory_region_is_ram(section.mr)) {
4302 io_index = io_mem_rom.ram_addr;
4303 } else {
4304 io_index = memory_region_get_ram_addr(section.mr)
4305 & (IO_MEM_NB_ENTRIES - 1);
4306 }
4307 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
j_mayerbc98a7e2007-04-04 07:55:12 +00004308#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004309 io_mem_write(io_index, addr, val >> 32, 4);
4310 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004311#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004312 io_mem_write(io_index, addr, (uint32_t)val, 4);
4313 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004314#endif
4315 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02004316 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4317 & TARGET_PAGE_MASK)
4318 + section.offset_within_region)
4319 + (addr & ~TARGET_PAGE_MASK);
j_mayerbc98a7e2007-04-04 07:55:12 +00004320 stq_p(ptr, val);
4321 }
4322}
4323
bellard8df1cd02005-01-28 22:37:22 +00004324/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004325static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4326 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004327{
4328 int io_index;
4329 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004330 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004331
Avi Kivity06ef3522012-02-13 16:11:22 +02004332 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004333
Avi Kivity06ef3522012-02-13 16:11:22 +02004334 if (!memory_region_is_ram(section.mr) || section.readonly) {
4335 if (memory_region_is_ram(section.mr)) {
4336 io_index = io_mem_rom.ram_addr;
4337 } else {
4338 io_index = memory_region_get_ram_addr(section.mr)
4339 & (IO_MEM_NB_ENTRIES - 1);
4340 }
4341 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004342#if defined(TARGET_WORDS_BIGENDIAN)
4343 if (endian == DEVICE_LITTLE_ENDIAN) {
4344 val = bswap32(val);
4345 }
4346#else
4347 if (endian == DEVICE_BIG_ENDIAN) {
4348 val = bswap32(val);
4349 }
4350#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004351 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004352 } else {
4353 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02004354 addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
4355 + section.offset_within_region
4356 + (addr & ~TARGET_PAGE_MASK);
bellard8df1cd02005-01-28 22:37:22 +00004357 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004358 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004359 switch (endian) {
4360 case DEVICE_LITTLE_ENDIAN:
4361 stl_le_p(ptr, val);
4362 break;
4363 case DEVICE_BIG_ENDIAN:
4364 stl_be_p(ptr, val);
4365 break;
4366 default:
4367 stl_p(ptr, val);
4368 break;
4369 }
bellard3a7d9292005-08-21 09:26:42 +00004370 if (!cpu_physical_memory_is_dirty(addr1)) {
4371 /* invalidate code */
4372 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4373 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004374 cpu_physical_memory_set_dirty_flags(addr1,
4375 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004376 }
bellard8df1cd02005-01-28 22:37:22 +00004377 }
4378}
4379
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004380void stl_phys(target_phys_addr_t addr, uint32_t val)
4381{
4382 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4383}
4384
4385void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4386{
4387 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4388}
4389
4390void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4391{
4392 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4393}
4394
bellardaab33092005-10-30 20:48:42 +00004395/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004396void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004397{
4398 uint8_t v = val;
4399 cpu_physical_memory_write(addr, &v, 1);
4400}
4401
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004402/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004403static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4404 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004405{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004406 int io_index;
4407 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004408 MemoryRegionSection section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004409
Avi Kivity06ef3522012-02-13 16:11:22 +02004410 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004411
Avi Kivity06ef3522012-02-13 16:11:22 +02004412 if (!memory_region_is_ram(section.mr) || section.readonly) {
4413 if (memory_region_is_ram(section.mr)) {
4414 io_index = io_mem_rom.ram_addr;
4415 } else {
4416 io_index = memory_region_get_ram_addr(section.mr)
4417 & (IO_MEM_NB_ENTRIES - 1);
4418 }
4419 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004420#if defined(TARGET_WORDS_BIGENDIAN)
4421 if (endian == DEVICE_LITTLE_ENDIAN) {
4422 val = bswap16(val);
4423 }
4424#else
4425 if (endian == DEVICE_BIG_ENDIAN) {
4426 val = bswap16(val);
4427 }
4428#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004429 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004430 } else {
4431 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02004432 addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
4433 + section.offset_within_region + (addr & ~TARGET_PAGE_MASK);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004434 /* RAM case */
4435 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004436 switch (endian) {
4437 case DEVICE_LITTLE_ENDIAN:
4438 stw_le_p(ptr, val);
4439 break;
4440 case DEVICE_BIG_ENDIAN:
4441 stw_be_p(ptr, val);
4442 break;
4443 default:
4444 stw_p(ptr, val);
4445 break;
4446 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004447 if (!cpu_physical_memory_is_dirty(addr1)) {
4448 /* invalidate code */
4449 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4450 /* set dirty bit */
4451 cpu_physical_memory_set_dirty_flags(addr1,
4452 (0xff & ~CODE_DIRTY_FLAG));
4453 }
4454 }
bellardaab33092005-10-30 20:48:42 +00004455}
4456
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004457void stw_phys(target_phys_addr_t addr, uint32_t val)
4458{
4459 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4460}
4461
4462void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4463{
4464 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4465}
4466
4467void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4468{
4469 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4470}
4471
bellardaab33092005-10-30 20:48:42 +00004472/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004473void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004474{
4475 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004476 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004477}
4478
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004479void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4480{
4481 val = cpu_to_le64(val);
4482 cpu_physical_memory_write(addr, &val, 8);
4483}
4484
4485void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4486{
4487 val = cpu_to_be64(val);
4488 cpu_physical_memory_write(addr, &val, 8);
4489}
4490
aliguori5e2972f2009-03-28 17:51:36 +00004491/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004492int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004493 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004494{
4495 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004496 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004497 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004498
4499 while (len > 0) {
4500 page = addr & TARGET_PAGE_MASK;
4501 phys_addr = cpu_get_phys_page_debug(env, page);
4502 /* if no physical page mapped, return an error */
4503 if (phys_addr == -1)
4504 return -1;
4505 l = (page + TARGET_PAGE_SIZE) - addr;
4506 if (l > len)
4507 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004508 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004509 if (is_write)
4510 cpu_physical_memory_write_rom(phys_addr, buf, l);
4511 else
aliguori5e2972f2009-03-28 17:51:36 +00004512 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004513 len -= l;
4514 buf += l;
4515 addr += l;
4516 }
4517 return 0;
4518}
Paul Brooka68fe892010-03-01 00:08:59 +00004519#endif
bellard13eb76e2004-01-24 15:23:36 +00004520
pbrook2e70f6e2008-06-29 01:03:05 +00004521/* in deterministic execution mode, instructions doing device I/Os
4522 must be at the end of the TB */
4523void cpu_io_recompile(CPUState *env, void *retaddr)
4524{
4525 TranslationBlock *tb;
4526 uint32_t n, cflags;
4527 target_ulong pc, cs_base;
4528 uint64_t flags;
4529
4530 tb = tb_find_pc((unsigned long)retaddr);
4531 if (!tb) {
4532 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4533 retaddr);
4534 }
4535 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004536 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004537 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004538 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004539 n = n - env->icount_decr.u16.low;
4540 /* Generate a new TB ending on the I/O insn. */
4541 n++;
4542 /* On MIPS and SH, delay slot instructions can only be restarted if
4543 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004544 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004545 branch. */
4546#if defined(TARGET_MIPS)
4547 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4548 env->active_tc.PC -= 4;
4549 env->icount_decr.u16.low++;
4550 env->hflags &= ~MIPS_HFLAG_BMASK;
4551 }
4552#elif defined(TARGET_SH4)
4553 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4554 && n > 1) {
4555 env->pc -= 2;
4556 env->icount_decr.u16.low++;
4557 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4558 }
4559#endif
4560 /* This should never happen. */
4561 if (n > CF_COUNT_MASK)
4562 cpu_abort(env, "TB too big during recompile");
4563
4564 cflags = n | CF_LAST_IO;
4565 pc = tb->pc;
4566 cs_base = tb->cs_base;
4567 flags = tb->flags;
4568 tb_phys_invalidate(tb, -1);
4569 /* FIXME: In theory this could raise an exception. In practice
4570 we have already translated the block once so it's probably ok. */
4571 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004572 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004573 the first in the TB) then we end up generating a whole new TB and
4574 repeating the fault, which is horribly inefficient.
4575 Better would be to execute just this insn uncached, or generate a
4576 second new TB. */
4577 cpu_resume_from_signal(env, NULL);
4578}
4579
Paul Brookb3755a92010-03-12 16:54:58 +00004580#if !defined(CONFIG_USER_ONLY)
4581
Stefan Weil055403b2010-10-22 23:03:32 +02004582void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004583{
4584 int i, target_code_size, max_target_code_size;
4585 int direct_jmp_count, direct_jmp2_count, cross_page;
4586 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004587
bellarde3db7222005-01-26 22:00:47 +00004588 target_code_size = 0;
4589 max_target_code_size = 0;
4590 cross_page = 0;
4591 direct_jmp_count = 0;
4592 direct_jmp2_count = 0;
4593 for(i = 0; i < nb_tbs; i++) {
4594 tb = &tbs[i];
4595 target_code_size += tb->size;
4596 if (tb->size > max_target_code_size)
4597 max_target_code_size = tb->size;
4598 if (tb->page_addr[1] != -1)
4599 cross_page++;
4600 if (tb->tb_next_offset[0] != 0xffff) {
4601 direct_jmp_count++;
4602 if (tb->tb_next_offset[1] != 0xffff) {
4603 direct_jmp2_count++;
4604 }
4605 }
4606 }
4607 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004608 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004609 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004610 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4611 cpu_fprintf(f, "TB count %d/%d\n",
4612 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004613 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004614 nb_tbs ? target_code_size / nb_tbs : 0,
4615 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004616 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004617 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4618 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004619 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4620 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004621 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4622 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004623 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004624 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4625 direct_jmp2_count,
4626 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004627 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004628 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4629 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4630 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004631 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004632}
4633
Avi Kivityd39e8222012-01-01 23:35:10 +02004634/* NOTE: this function can trigger an exception */
4635/* NOTE2: the returned address is not exactly the physical address: it
4636 is the offset relative to phys_ram_base */
4637tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4638{
4639 int mmu_idx, page_index, pd;
4640 void *p;
4641
4642 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4643 mmu_idx = cpu_mmu_index(env1);
4644 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4645 (addr & TARGET_PAGE_MASK))) {
4646 ldub_code(addr);
4647 }
4648 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004649 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
Avi Kivity06ef3522012-02-13 16:11:22 +02004650 && !io_mem_region[pd]->rom_device) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004651#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4652 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4653#else
4654 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4655#endif
4656 }
4657 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4658 return qemu_ram_addr_from_host_nofail(p);
4659}
4660
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004661/*
4662 * A helper function for the _utterly broken_ virtio device model to find out if
4663 * it's running on a big endian machine. Don't do this at home kids!
4664 */
4665bool virtio_is_big_endian(void);
4666bool virtio_is_big_endian(void)
4667{
4668#if defined(TARGET_WORDS_BIGENDIAN)
4669 return true;
4670#else
4671 return false;
4672#endif
4673}
4674
bellard61382a52003-10-27 21:22:23 +00004675#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004676#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004677#define GETPC() NULL
4678#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004679#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004680
4681#define SHIFT 0
4682#include "softmmu_template.h"
4683
4684#define SHIFT 1
4685#include "softmmu_template.h"
4686
4687#define SHIFT 2
4688#include "softmmu_template.h"
4689
4690#define SHIFT 3
4691#include "softmmu_template.h"
4692
4693#undef env
4694
4695#endif