blob: 3c2b7e51805945f67193dad06b444d58848f3597 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
Andreas Färber9349b4f2012-03-14 01:38:32 +0100126CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100129DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Avi Kivity3eef53d2012-02-10 14:57:31 +0200163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200189typedef struct PhysPageEntry PhysPageEntry;
190
Avi Kivity5312bd82012-02-12 18:32:55 +0200191static MemoryRegionSection *phys_sections;
192static unsigned phys_sections_nb, phys_sections_nb_alloc;
193static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200194static uint16_t phys_section_notdirty;
195static uint16_t phys_section_rom;
196static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200197
Avi Kivity4346ae32012-02-10 17:00:01 +0200198struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200199 uint16_t is_leaf : 1;
200 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
201 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200202};
203
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200204/* Simple allocator for PhysPageEntry nodes */
205static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
206static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
207
Avi Kivity07f07b32012-02-13 20:45:32 +0200208#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200209
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800210/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200211 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200212static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000213
pbrooke2eef172008-06-08 01:09:01 +0000214static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300215static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000216
Avi Kivity1ec9b902012-01-02 12:47:48 +0200217static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000218#endif
bellard33417e72003-08-10 21:47:01 +0000219
bellard34865132003-10-05 14:28:56 +0000220/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200221#ifdef WIN32
222static const char *logfilename = "qemu.log";
223#else
blueswir1d9b630f2008-10-05 09:57:08 +0000224static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200225#endif
bellard34865132003-10-05 14:28:56 +0000226FILE *logfile;
227int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000228static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000229
bellarde3db7222005-01-26 22:00:47 +0000230/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000231#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000232static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000233#endif
bellarde3db7222005-01-26 22:00:47 +0000234static int tb_flush_count;
235static int tb_phys_invalidate_count;
236
bellard7cb69ca2008-05-10 10:55:51 +0000237#ifdef _WIN32
238static void map_exec(void *addr, long size)
239{
240 DWORD old_protect;
241 VirtualProtect(addr, size,
242 PAGE_EXECUTE_READWRITE, &old_protect);
243
244}
245#else
246static void map_exec(void *addr, long size)
247{
bellard43694152008-05-29 09:35:57 +0000248 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000249
bellard43694152008-05-29 09:35:57 +0000250 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000251 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000252 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000253
254 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000255 end += page_size - 1;
256 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000257
258 mprotect((void *)start, end - start,
259 PROT_READ | PROT_WRITE | PROT_EXEC);
260}
261#endif
262
bellardb346ff42003-06-15 20:05:50 +0000263static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000264{
bellard83fb7ad2004-07-05 21:25:26 +0000265 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000266 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000267#ifdef _WIN32
268 {
269 SYSTEM_INFO system_info;
270
271 GetSystemInfo(&system_info);
272 qemu_real_host_page_size = system_info.dwPageSize;
273 }
274#else
275 qemu_real_host_page_size = getpagesize();
276#endif
bellard83fb7ad2004-07-05 21:25:26 +0000277 if (qemu_host_page_size == 0)
278 qemu_host_page_size = qemu_real_host_page_size;
279 if (qemu_host_page_size < TARGET_PAGE_SIZE)
280 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000281 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000282
Paul Brook2e9a5712010-05-05 16:32:59 +0100283#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000284 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100285#ifdef HAVE_KINFO_GETVMMAP
286 struct kinfo_vmentry *freep;
287 int i, cnt;
288
289 freep = kinfo_getvmmap(getpid(), &cnt);
290 if (freep) {
291 mmap_lock();
292 for (i = 0; i < cnt; i++) {
293 unsigned long startaddr, endaddr;
294
295 startaddr = freep[i].kve_start;
296 endaddr = freep[i].kve_end;
297 if (h2g_valid(startaddr)) {
298 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
299
300 if (h2g_valid(endaddr)) {
301 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100303 } else {
304#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
305 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200306 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100307#endif
308 }
309 }
310 }
311 free(freep);
312 mmap_unlock();
313 }
314#else
balrog50a95692007-12-12 01:16:23 +0000315 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000316
pbrook07765902008-05-31 16:33:53 +0000317 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800318
Aurelien Jarnofd436902010-04-10 17:20:36 +0200319 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000320 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800321 mmap_lock();
322
balrog50a95692007-12-12 01:16:23 +0000323 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800324 unsigned long startaddr, endaddr;
325 int n;
326
327 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
328
329 if (n == 2 && h2g_valid(startaddr)) {
330 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
331
332 if (h2g_valid(endaddr)) {
333 endaddr = h2g(endaddr);
334 } else {
335 endaddr = ~0ul;
336 }
337 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000338 }
339 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800340
balrog50a95692007-12-12 01:16:23 +0000341 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800342 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000343 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100344#endif
balrog50a95692007-12-12 01:16:23 +0000345 }
346#endif
bellard54936002003-05-13 00:25:15 +0000347}
348
Paul Brook41c1b1c2010-03-12 16:54:58 +0000349static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000350{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000351 PageDesc *pd;
352 void **lp;
353 int i;
354
pbrook17e23772008-06-09 13:47:45 +0000355#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500356 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800357# define ALLOC(P, SIZE) \
358 do { \
359 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
360 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800361 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000362#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800363# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500364 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000365#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800366
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800367 /* Level 1. Always allocated. */
368 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
369
370 /* Level 2..N-1. */
371 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
372 void **p = *lp;
373
374 if (p == NULL) {
375 if (!alloc) {
376 return NULL;
377 }
378 ALLOC(p, sizeof(void *) * L2_SIZE);
379 *lp = p;
380 }
381
382 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000383 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800384
385 pd = *lp;
386 if (pd == NULL) {
387 if (!alloc) {
388 return NULL;
389 }
390 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
391 *lp = pd;
392 }
393
394#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800395
396 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000397}
398
Paul Brook41c1b1c2010-03-12 16:54:58 +0000399static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000400{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800401 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000402}
403
Paul Brook6d9a1302010-02-28 23:55:53 +0000404#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200405
Avi Kivityf7bf5462012-02-13 20:12:05 +0200406static void phys_map_node_reserve(unsigned nodes)
407{
408 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
409 typedef PhysPageEntry Node[L2_SIZE];
410 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
411 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
412 phys_map_nodes_nb + nodes);
413 phys_map_nodes = g_renew(Node, phys_map_nodes,
414 phys_map_nodes_nb_alloc);
415 }
416}
417
418static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200419{
420 unsigned i;
421 uint16_t ret;
422
Avi Kivityf7bf5462012-02-13 20:12:05 +0200423 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200424 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200425 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200426 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200427 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200428 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200429 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200430 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200431}
432
433static void phys_map_nodes_reset(void)
434{
435 phys_map_nodes_nb = 0;
436}
437
Avi Kivityf7bf5462012-02-13 20:12:05 +0200438
Avi Kivity29990972012-02-13 20:21:20 +0200439static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
440 target_phys_addr_t *nb, uint16_t leaf,
441 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200442{
443 PhysPageEntry *p;
444 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200445 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200446
Avi Kivity07f07b32012-02-13 20:45:32 +0200447 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200448 lp->ptr = phys_map_node_alloc();
449 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200450 if (level == 0) {
451 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200452 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200453 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200454 }
455 }
456 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200457 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200458 }
Avi Kivity29990972012-02-13 20:21:20 +0200459 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200460
Avi Kivity29990972012-02-13 20:21:20 +0200461 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200462 if ((*index & (step - 1)) == 0 && *nb >= step) {
463 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200464 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200465 *index += step;
466 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200467 } else {
468 phys_page_set_level(lp, index, nb, leaf, level - 1);
469 }
470 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200471 }
472}
473
Avi Kivity29990972012-02-13 20:21:20 +0200474static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
475 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000476{
Avi Kivity29990972012-02-13 20:21:20 +0200477 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200478 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000479
Avi Kivity29990972012-02-13 20:21:20 +0200480 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000481}
482
Avi Kivityf3705d52012-03-08 16:16:34 +0200483static MemoryRegionSection *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000484{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200485 PhysPageEntry lp = phys_map;
486 PhysPageEntry *p;
487 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200488 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200489
Avi Kivity07f07b32012-02-13 20:45:32 +0200490 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200491 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200492 goto not_found;
493 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200494 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200495 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200496 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200497
Avi Kivityc19e8802012-02-13 20:25:31 +0200498 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200499not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200500 return &phys_sections[s_index];
501}
502
503static target_phys_addr_t section_addr(MemoryRegionSection *section,
504 target_phys_addr_t addr)
505{
506 addr -= section->offset_within_address_space;
507 addr += section->offset_within_region;
508 return addr;
bellard92e873b2004-05-21 14:52:29 +0000509}
510
Anthony Liguoric227f092009-10-01 16:12:16 -0500511static void tlb_protect_code(ram_addr_t ram_addr);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100512static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000513 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000514#define mmap_lock() do { } while(0)
515#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000516#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000517
bellard43694152008-05-29 09:35:57 +0000518#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
519
520#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100521/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000522 user mode. It will change when a dedicated libc will be used */
523#define USE_STATIC_CODE_GEN_BUFFER
524#endif
525
526#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200527static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
528 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000529#endif
530
blueswir18fcd3692008-08-17 20:26:25 +0000531static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000532{
bellard43694152008-05-29 09:35:57 +0000533#ifdef USE_STATIC_CODE_GEN_BUFFER
534 code_gen_buffer = static_code_gen_buffer;
535 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
536 map_exec(code_gen_buffer, code_gen_buffer_size);
537#else
bellard26a5f132008-05-28 12:30:31 +0000538 code_gen_buffer_size = tb_size;
539 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000540#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000541 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
542#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100543 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000544 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000545#endif
bellard26a5f132008-05-28 12:30:31 +0000546 }
547 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
548 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
549 /* The code gen buffer location may have constraints depending on
550 the host cpu and OS */
551#if defined(__linux__)
552 {
553 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000554 void *start = NULL;
555
bellard26a5f132008-05-28 12:30:31 +0000556 flags = MAP_PRIVATE | MAP_ANONYMOUS;
557#if defined(__x86_64__)
558 flags |= MAP_32BIT;
559 /* Cannot map more than that */
560 if (code_gen_buffer_size > (800 * 1024 * 1024))
561 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000562#elif defined(__sparc_v9__)
563 // Map the buffer below 2G, so we can use direct calls and branches
564 flags |= MAP_FIXED;
565 start = (void *) 0x60000000UL;
566 if (code_gen_buffer_size > (512 * 1024 * 1024))
567 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000568#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100569 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000570 if (code_gen_buffer_size > 16 * 1024 * 1024)
571 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700572#elif defined(__s390x__)
573 /* Map the buffer so that we can use direct calls and branches. */
574 /* We have a +- 4GB range on the branches; leave some slop. */
575 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
576 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
577 }
578 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000579#endif
blueswir1141ac462008-07-26 15:05:57 +0000580 code_gen_buffer = mmap(start, code_gen_buffer_size,
581 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000582 flags, -1, 0);
583 if (code_gen_buffer == MAP_FAILED) {
584 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
585 exit(1);
586 }
587 }
Bradcbb608a2010-12-20 21:25:40 -0500588#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000589 || defined(__DragonFly__) || defined(__OpenBSD__) \
590 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000591 {
592 int flags;
593 void *addr = NULL;
594 flags = MAP_PRIVATE | MAP_ANONYMOUS;
595#if defined(__x86_64__)
596 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
597 * 0x40000000 is free */
598 flags |= MAP_FIXED;
599 addr = (void *)0x40000000;
600 /* Cannot map more than that */
601 if (code_gen_buffer_size > (800 * 1024 * 1024))
602 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000603#elif defined(__sparc_v9__)
604 // Map the buffer below 2G, so we can use direct calls and branches
605 flags |= MAP_FIXED;
606 addr = (void *) 0x60000000UL;
607 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
608 code_gen_buffer_size = (512 * 1024 * 1024);
609 }
aliguori06e67a82008-09-27 15:32:41 +0000610#endif
611 code_gen_buffer = mmap(addr, code_gen_buffer_size,
612 PROT_WRITE | PROT_READ | PROT_EXEC,
613 flags, -1, 0);
614 if (code_gen_buffer == MAP_FAILED) {
615 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
616 exit(1);
617 }
618 }
bellard26a5f132008-05-28 12:30:31 +0000619#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500620 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000621 map_exec(code_gen_buffer, code_gen_buffer_size);
622#endif
bellard43694152008-05-29 09:35:57 +0000623#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000624 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100625 code_gen_buffer_max_size = code_gen_buffer_size -
626 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000627 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500628 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000629}
630
631/* Must be called before using the QEMU cpus. 'tb_size' is the size
632 (in bytes) allocated to the translation buffer. Zero means default
633 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200634void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000635{
bellard26a5f132008-05-28 12:30:31 +0000636 cpu_gen_init();
637 code_gen_alloc(tb_size);
638 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000639 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700640#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
641 /* There's no guest base to take into account, so go ahead and
642 initialize the prologue now. */
643 tcg_prologue_init(&tcg_ctx);
644#endif
bellard26a5f132008-05-28 12:30:31 +0000645}
646
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200647bool tcg_enabled(void)
648{
649 return code_gen_buffer != NULL;
650}
651
652void cpu_exec_init_all(void)
653{
654#if !defined(CONFIG_USER_ONLY)
655 memory_map_init();
656 io_mem_init();
657#endif
658}
659
pbrook9656f322008-07-01 20:01:19 +0000660#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
661
Juan Quintelae59fb372009-09-29 22:48:21 +0200662static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200663{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100664 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200665
aurel323098dba2009-03-07 21:28:24 +0000666 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
667 version_id is increased. */
668 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000669 tlb_flush(env, 1);
670
671 return 0;
672}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200673
674static const VMStateDescription vmstate_cpu_common = {
675 .name = "cpu_common",
676 .version_id = 1,
677 .minimum_version_id = 1,
678 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200679 .post_load = cpu_common_post_load,
680 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100681 VMSTATE_UINT32(halted, CPUArchState),
682 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200683 VMSTATE_END_OF_LIST()
684 }
685};
pbrook9656f322008-07-01 20:01:19 +0000686#endif
687
Andreas Färber9349b4f2012-03-14 01:38:32 +0100688CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400689{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100690 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400691
692 while (env) {
693 if (env->cpu_index == cpu)
694 break;
695 env = env->next_cpu;
696 }
697
698 return env;
699}
700
Andreas Färber9349b4f2012-03-14 01:38:32 +0100701void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000702{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100703 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000704 int cpu_index;
705
pbrookc2764712009-03-07 15:24:59 +0000706#if defined(CONFIG_USER_ONLY)
707 cpu_list_lock();
708#endif
bellard6a00d602005-11-21 23:25:50 +0000709 env->next_cpu = NULL;
710 penv = &first_cpu;
711 cpu_index = 0;
712 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700713 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000714 cpu_index++;
715 }
716 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000717 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000718 QTAILQ_INIT(&env->breakpoints);
719 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100720#ifndef CONFIG_USER_ONLY
721 env->thread_id = qemu_get_thread_id();
722#endif
bellard6a00d602005-11-21 23:25:50 +0000723 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000724#if defined(CONFIG_USER_ONLY)
725 cpu_list_unlock();
726#endif
pbrookb3c77242008-06-30 16:31:04 +0000727#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600728 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
729 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000730 cpu_save, cpu_load, env);
731#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000732}
733
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100734/* Allocate a new translation block. Flush the translation buffer if
735 too many translation blocks or too much generated code. */
736static TranslationBlock *tb_alloc(target_ulong pc)
737{
738 TranslationBlock *tb;
739
740 if (nb_tbs >= code_gen_max_blocks ||
741 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
742 return NULL;
743 tb = &tbs[nb_tbs++];
744 tb->pc = pc;
745 tb->cflags = 0;
746 return tb;
747}
748
749void tb_free(TranslationBlock *tb)
750{
751 /* In practice this is mostly used for single use temporary TB
752 Ignore the hard cases and just back up if this TB happens to
753 be the last one generated. */
754 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
755 code_gen_ptr = tb->tc_ptr;
756 nb_tbs--;
757 }
758}
759
bellard9fa3e852004-01-04 18:06:42 +0000760static inline void invalidate_page_bitmap(PageDesc *p)
761{
762 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500763 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000764 p->code_bitmap = NULL;
765 }
766 p->code_write_count = 0;
767}
768
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800769/* Set to NULL all the 'first_tb' fields in all PageDescs. */
770
771static void page_flush_tb_1 (int level, void **lp)
772{
773 int i;
774
775 if (*lp == NULL) {
776 return;
777 }
778 if (level == 0) {
779 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000780 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800781 pd[i].first_tb = NULL;
782 invalidate_page_bitmap(pd + i);
783 }
784 } else {
785 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000786 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800787 page_flush_tb_1 (level - 1, pp + i);
788 }
789 }
790}
791
bellardfd6ce8f2003-05-14 19:00:11 +0000792static void page_flush_tb(void)
793{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800794 int i;
795 for (i = 0; i < V_L1_SIZE; i++) {
796 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000797 }
798}
799
800/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000801/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100802void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000803{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100804 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000805#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000806 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
807 (unsigned long)(code_gen_ptr - code_gen_buffer),
808 nb_tbs, nb_tbs > 0 ?
809 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000810#endif
bellard26a5f132008-05-28 12:30:31 +0000811 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000812 cpu_abort(env1, "Internal error: code buffer overflow\n");
813
bellardfd6ce8f2003-05-14 19:00:11 +0000814 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000815
bellard6a00d602005-11-21 23:25:50 +0000816 for(env = first_cpu; env != NULL; env = env->next_cpu) {
817 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
818 }
bellard9fa3e852004-01-04 18:06:42 +0000819
bellard8a8a6082004-10-03 13:36:49 +0000820 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000821 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000822
bellardfd6ce8f2003-05-14 19:00:11 +0000823 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000824 /* XXX: flush processor icache at this point if cache flush is
825 expensive */
bellarde3db7222005-01-26 22:00:47 +0000826 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000827}
828
829#ifdef DEBUG_TB_CHECK
830
j_mayerbc98a7e2007-04-04 07:55:12 +0000831static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000832{
833 TranslationBlock *tb;
834 int i;
835 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000836 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
837 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000838 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
839 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000840 printf("ERROR invalidate: address=" TARGET_FMT_lx
841 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000842 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000843 }
844 }
845 }
846}
847
848/* verify that all the pages have correct rights for code */
849static void tb_page_check(void)
850{
851 TranslationBlock *tb;
852 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000853
pbrook99773bd2006-04-16 15:14:59 +0000854 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
855 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000856 flags1 = page_get_flags(tb->pc);
857 flags2 = page_get_flags(tb->pc + tb->size - 1);
858 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
859 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000860 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000861 }
862 }
863 }
864}
865
866#endif
867
868/* invalidate one TB */
869static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
870 int next_offset)
871{
872 TranslationBlock *tb1;
873 for(;;) {
874 tb1 = *ptb;
875 if (tb1 == tb) {
876 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
877 break;
878 }
879 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
880 }
881}
882
bellard9fa3e852004-01-04 18:06:42 +0000883static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
884{
885 TranslationBlock *tb1;
886 unsigned int n1;
887
888 for(;;) {
889 tb1 = *ptb;
890 n1 = (long)tb1 & 3;
891 tb1 = (TranslationBlock *)((long)tb1 & ~3);
892 if (tb1 == tb) {
893 *ptb = tb1->page_next[n1];
894 break;
895 }
896 ptb = &tb1->page_next[n1];
897 }
898}
899
bellardd4e81642003-05-25 16:46:15 +0000900static inline void tb_jmp_remove(TranslationBlock *tb, int n)
901{
902 TranslationBlock *tb1, **ptb;
903 unsigned int n1;
904
905 ptb = &tb->jmp_next[n];
906 tb1 = *ptb;
907 if (tb1) {
908 /* find tb(n) in circular list */
909 for(;;) {
910 tb1 = *ptb;
911 n1 = (long)tb1 & 3;
912 tb1 = (TranslationBlock *)((long)tb1 & ~3);
913 if (n1 == n && tb1 == tb)
914 break;
915 if (n1 == 2) {
916 ptb = &tb1->jmp_first;
917 } else {
918 ptb = &tb1->jmp_next[n1];
919 }
920 }
921 /* now we can suppress tb(n) from the list */
922 *ptb = tb->jmp_next[n];
923
924 tb->jmp_next[n] = NULL;
925 }
926}
927
928/* reset the jump entry 'n' of a TB so that it is not chained to
929 another TB */
930static inline void tb_reset_jump(TranslationBlock *tb, int n)
931{
932 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
933}
934
Paul Brook41c1b1c2010-03-12 16:54:58 +0000935void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000936{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100937 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000938 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000939 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000940 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000941 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000942
bellard9fa3e852004-01-04 18:06:42 +0000943 /* remove the TB from the hash list */
944 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
945 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000946 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000947 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000948
bellard9fa3e852004-01-04 18:06:42 +0000949 /* remove the TB from the page list */
950 if (tb->page_addr[0] != page_addr) {
951 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
952 tb_page_remove(&p->first_tb, tb);
953 invalidate_page_bitmap(p);
954 }
955 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
956 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
957 tb_page_remove(&p->first_tb, tb);
958 invalidate_page_bitmap(p);
959 }
960
bellard8a40a182005-11-20 10:35:40 +0000961 tb_invalidated_flag = 1;
962
963 /* remove the TB from the hash list */
964 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000965 for(env = first_cpu; env != NULL; env = env->next_cpu) {
966 if (env->tb_jmp_cache[h] == tb)
967 env->tb_jmp_cache[h] = NULL;
968 }
bellard8a40a182005-11-20 10:35:40 +0000969
970 /* suppress this TB from the two jump lists */
971 tb_jmp_remove(tb, 0);
972 tb_jmp_remove(tb, 1);
973
974 /* suppress any remaining jumps to this TB */
975 tb1 = tb->jmp_first;
976 for(;;) {
977 n1 = (long)tb1 & 3;
978 if (n1 == 2)
979 break;
980 tb1 = (TranslationBlock *)((long)tb1 & ~3);
981 tb2 = tb1->jmp_next[n1];
982 tb_reset_jump(tb1, n1);
983 tb1->jmp_next[n1] = NULL;
984 tb1 = tb2;
985 }
986 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
987
bellarde3db7222005-01-26 22:00:47 +0000988 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000989}
990
991static inline void set_bits(uint8_t *tab, int start, int len)
992{
993 int end, mask, end1;
994
995 end = start + len;
996 tab += start >> 3;
997 mask = 0xff << (start & 7);
998 if ((start & ~7) == (end & ~7)) {
999 if (start < end) {
1000 mask &= ~(0xff << (end & 7));
1001 *tab |= mask;
1002 }
1003 } else {
1004 *tab++ |= mask;
1005 start = (start + 8) & ~7;
1006 end1 = end & ~7;
1007 while (start < end1) {
1008 *tab++ = 0xff;
1009 start += 8;
1010 }
1011 if (start < end) {
1012 mask = ~(0xff << (end & 7));
1013 *tab |= mask;
1014 }
1015 }
1016}
1017
1018static void build_page_bitmap(PageDesc *p)
1019{
1020 int n, tb_start, tb_end;
1021 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001022
Anthony Liguori7267c092011-08-20 22:09:37 -05001023 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001024
1025 tb = p->first_tb;
1026 while (tb != NULL) {
1027 n = (long)tb & 3;
1028 tb = (TranslationBlock *)((long)tb & ~3);
1029 /* NOTE: this is subtle as a TB may span two physical pages */
1030 if (n == 0) {
1031 /* NOTE: tb_end may be after the end of the page, but
1032 it is not a problem */
1033 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1034 tb_end = tb_start + tb->size;
1035 if (tb_end > TARGET_PAGE_SIZE)
1036 tb_end = TARGET_PAGE_SIZE;
1037 } else {
1038 tb_start = 0;
1039 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1040 }
1041 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1042 tb = tb->page_next[n];
1043 }
1044}
1045
Andreas Färber9349b4f2012-03-14 01:38:32 +01001046TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001047 target_ulong pc, target_ulong cs_base,
1048 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001049{
1050 TranslationBlock *tb;
1051 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001052 tb_page_addr_t phys_pc, phys_page2;
1053 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001054 int code_gen_size;
1055
Paul Brook41c1b1c2010-03-12 16:54:58 +00001056 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001057 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001058 if (!tb) {
1059 /* flush must be done */
1060 tb_flush(env);
1061 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001062 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001063 /* Don't forget to invalidate previous TB info. */
1064 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001065 }
1066 tc_ptr = code_gen_ptr;
1067 tb->tc_ptr = tc_ptr;
1068 tb->cs_base = cs_base;
1069 tb->flags = flags;
1070 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001071 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001072 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001073
bellardd720b932004-04-25 17:57:43 +00001074 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001075 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001076 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001077 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001078 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001079 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001080 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001081 return tb;
bellardd720b932004-04-25 17:57:43 +00001082}
ths3b46e622007-09-17 08:09:54 +00001083
bellard9fa3e852004-01-04 18:06:42 +00001084/* invalidate all TBs which intersect with the target physical page
1085 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001086 the same physical page. 'is_cpu_write_access' should be true if called
1087 from a real cpu write access: the virtual CPU will exit the current
1088 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001089void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001090 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001091{
aliguori6b917542008-11-18 19:46:41 +00001092 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001093 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001094 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001095 PageDesc *p;
1096 int n;
1097#ifdef TARGET_HAS_PRECISE_SMC
1098 int current_tb_not_found = is_cpu_write_access;
1099 TranslationBlock *current_tb = NULL;
1100 int current_tb_modified = 0;
1101 target_ulong current_pc = 0;
1102 target_ulong current_cs_base = 0;
1103 int current_flags = 0;
1104#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001105
1106 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001107 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001108 return;
ths5fafdf22007-09-16 21:08:06 +00001109 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001110 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1111 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001112 /* build code bitmap */
1113 build_page_bitmap(p);
1114 }
1115
1116 /* we remove all the TBs in the range [start, end[ */
1117 /* XXX: see if in some cases it could be faster to invalidate all the code */
1118 tb = p->first_tb;
1119 while (tb != NULL) {
1120 n = (long)tb & 3;
1121 tb = (TranslationBlock *)((long)tb & ~3);
1122 tb_next = tb->page_next[n];
1123 /* NOTE: this is subtle as a TB may span two physical pages */
1124 if (n == 0) {
1125 /* NOTE: tb_end may be after the end of the page, but
1126 it is not a problem */
1127 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1128 tb_end = tb_start + tb->size;
1129 } else {
1130 tb_start = tb->page_addr[1];
1131 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1132 }
1133 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001134#ifdef TARGET_HAS_PRECISE_SMC
1135 if (current_tb_not_found) {
1136 current_tb_not_found = 0;
1137 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001138 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001139 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001140 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001141 }
1142 }
1143 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001144 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001145 /* If we are modifying the current TB, we must stop
1146 its execution. We could be more precise by checking
1147 that the modification is after the current PC, but it
1148 would require a specialized function to partially
1149 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001150
bellardd720b932004-04-25 17:57:43 +00001151 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001152 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001153 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1154 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001155 }
1156#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001157 /* we need to do that to handle the case where a signal
1158 occurs while doing tb_phys_invalidate() */
1159 saved_tb = NULL;
1160 if (env) {
1161 saved_tb = env->current_tb;
1162 env->current_tb = NULL;
1163 }
bellard9fa3e852004-01-04 18:06:42 +00001164 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001165 if (env) {
1166 env->current_tb = saved_tb;
1167 if (env->interrupt_request && env->current_tb)
1168 cpu_interrupt(env, env->interrupt_request);
1169 }
bellard9fa3e852004-01-04 18:06:42 +00001170 }
1171 tb = tb_next;
1172 }
1173#if !defined(CONFIG_USER_ONLY)
1174 /* if no code remaining, no need to continue to use slow writes */
1175 if (!p->first_tb) {
1176 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001177 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001178 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001179 }
1180 }
1181#endif
1182#ifdef TARGET_HAS_PRECISE_SMC
1183 if (current_tb_modified) {
1184 /* we generate a block containing just the instruction
1185 modifying the memory. It will ensure that it cannot modify
1186 itself */
bellardea1c1802004-06-14 18:56:36 +00001187 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001188 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001189 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001190 }
1191#endif
1192}
1193
1194/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001195static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001196{
1197 PageDesc *p;
1198 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001199#if 0
bellarda4193c82004-06-03 14:01:43 +00001200 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001201 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1202 cpu_single_env->mem_io_vaddr, len,
1203 cpu_single_env->eip,
1204 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001205 }
1206#endif
bellard9fa3e852004-01-04 18:06:42 +00001207 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001208 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001209 return;
1210 if (p->code_bitmap) {
1211 offset = start & ~TARGET_PAGE_MASK;
1212 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1213 if (b & ((1 << len) - 1))
1214 goto do_invalidate;
1215 } else {
1216 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001217 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001218 }
1219}
1220
bellard9fa3e852004-01-04 18:06:42 +00001221#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001222static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001223 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001224{
aliguori6b917542008-11-18 19:46:41 +00001225 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001226 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001227 int n;
bellardd720b932004-04-25 17:57:43 +00001228#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001229 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001230 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001231 int current_tb_modified = 0;
1232 target_ulong current_pc = 0;
1233 target_ulong current_cs_base = 0;
1234 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001235#endif
bellard9fa3e852004-01-04 18:06:42 +00001236
1237 addr &= TARGET_PAGE_MASK;
1238 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001239 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001240 return;
1241 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001242#ifdef TARGET_HAS_PRECISE_SMC
1243 if (tb && pc != 0) {
1244 current_tb = tb_find_pc(pc);
1245 }
1246#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001247 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001248 n = (long)tb & 3;
1249 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001250#ifdef TARGET_HAS_PRECISE_SMC
1251 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001252 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001253 /* If we are modifying the current TB, we must stop
1254 its execution. We could be more precise by checking
1255 that the modification is after the current PC, but it
1256 would require a specialized function to partially
1257 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001258
bellardd720b932004-04-25 17:57:43 +00001259 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001260 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001261 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1262 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001263 }
1264#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001265 tb_phys_invalidate(tb, addr);
1266 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001267 }
1268 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001269#ifdef TARGET_HAS_PRECISE_SMC
1270 if (current_tb_modified) {
1271 /* we generate a block containing just the instruction
1272 modifying the memory. It will ensure that it cannot modify
1273 itself */
bellardea1c1802004-06-14 18:56:36 +00001274 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001275 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001276 cpu_resume_from_signal(env, puc);
1277 }
1278#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001279}
bellard9fa3e852004-01-04 18:06:42 +00001280#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001281
1282/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001283static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001284 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001285{
1286 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001287#ifndef CONFIG_USER_ONLY
1288 bool page_already_protected;
1289#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001290
bellard9fa3e852004-01-04 18:06:42 +00001291 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001292 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001293 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001294#ifndef CONFIG_USER_ONLY
1295 page_already_protected = p->first_tb != NULL;
1296#endif
bellard9fa3e852004-01-04 18:06:42 +00001297 p->first_tb = (TranslationBlock *)((long)tb | n);
1298 invalidate_page_bitmap(p);
1299
bellard107db442004-06-22 18:48:46 +00001300#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001301
bellard9fa3e852004-01-04 18:06:42 +00001302#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001303 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001304 target_ulong addr;
1305 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001306 int prot;
1307
bellardfd6ce8f2003-05-14 19:00:11 +00001308 /* force the host page as non writable (writes will have a
1309 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001310 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001311 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001312 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1313 addr += TARGET_PAGE_SIZE) {
1314
1315 p2 = page_find (addr >> TARGET_PAGE_BITS);
1316 if (!p2)
1317 continue;
1318 prot |= p2->flags;
1319 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001320 }
ths5fafdf22007-09-16 21:08:06 +00001321 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001322 (prot & PAGE_BITS) & ~PAGE_WRITE);
1323#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001324 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001325 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001326#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001327 }
bellard9fa3e852004-01-04 18:06:42 +00001328#else
1329 /* if some code is already present, then the pages are already
1330 protected. So we handle the case where only the first TB is
1331 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001332 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001333 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001334 }
1335#endif
bellardd720b932004-04-25 17:57:43 +00001336
1337#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001338}
1339
bellard9fa3e852004-01-04 18:06:42 +00001340/* add a new TB and link it to the physical page tables. phys_page2 is
1341 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001342void tb_link_page(TranslationBlock *tb,
1343 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001344{
bellard9fa3e852004-01-04 18:06:42 +00001345 unsigned int h;
1346 TranslationBlock **ptb;
1347
pbrookc8a706f2008-06-02 16:16:42 +00001348 /* Grab the mmap lock to stop another thread invalidating this TB
1349 before we are done. */
1350 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001351 /* add in the physical hash table */
1352 h = tb_phys_hash_func(phys_pc);
1353 ptb = &tb_phys_hash[h];
1354 tb->phys_hash_next = *ptb;
1355 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001356
1357 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001358 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1359 if (phys_page2 != -1)
1360 tb_alloc_page(tb, 1, phys_page2);
1361 else
1362 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001363
bellardd4e81642003-05-25 16:46:15 +00001364 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1365 tb->jmp_next[0] = NULL;
1366 tb->jmp_next[1] = NULL;
1367
1368 /* init original jump addresses */
1369 if (tb->tb_next_offset[0] != 0xffff)
1370 tb_reset_jump(tb, 0);
1371 if (tb->tb_next_offset[1] != 0xffff)
1372 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001373
1374#ifdef DEBUG_TB_CHECK
1375 tb_page_check();
1376#endif
pbrookc8a706f2008-06-02 16:16:42 +00001377 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001378}
1379
bellarda513fe12003-05-27 23:29:48 +00001380/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1381 tb[1].tc_ptr. Return NULL if not found */
1382TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1383{
1384 int m_min, m_max, m;
1385 unsigned long v;
1386 TranslationBlock *tb;
1387
1388 if (nb_tbs <= 0)
1389 return NULL;
1390 if (tc_ptr < (unsigned long)code_gen_buffer ||
1391 tc_ptr >= (unsigned long)code_gen_ptr)
1392 return NULL;
1393 /* binary search (cf Knuth) */
1394 m_min = 0;
1395 m_max = nb_tbs - 1;
1396 while (m_min <= m_max) {
1397 m = (m_min + m_max) >> 1;
1398 tb = &tbs[m];
1399 v = (unsigned long)tb->tc_ptr;
1400 if (v == tc_ptr)
1401 return tb;
1402 else if (tc_ptr < v) {
1403 m_max = m - 1;
1404 } else {
1405 m_min = m + 1;
1406 }
ths5fafdf22007-09-16 21:08:06 +00001407 }
bellarda513fe12003-05-27 23:29:48 +00001408 return &tbs[m_max];
1409}
bellard75012672003-06-21 13:11:07 +00001410
bellardea041c02003-06-25 16:16:50 +00001411static void tb_reset_jump_recursive(TranslationBlock *tb);
1412
1413static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1414{
1415 TranslationBlock *tb1, *tb_next, **ptb;
1416 unsigned int n1;
1417
1418 tb1 = tb->jmp_next[n];
1419 if (tb1 != NULL) {
1420 /* find head of list */
1421 for(;;) {
1422 n1 = (long)tb1 & 3;
1423 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1424 if (n1 == 2)
1425 break;
1426 tb1 = tb1->jmp_next[n1];
1427 }
1428 /* we are now sure now that tb jumps to tb1 */
1429 tb_next = tb1;
1430
1431 /* remove tb from the jmp_first list */
1432 ptb = &tb_next->jmp_first;
1433 for(;;) {
1434 tb1 = *ptb;
1435 n1 = (long)tb1 & 3;
1436 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1437 if (n1 == n && tb1 == tb)
1438 break;
1439 ptb = &tb1->jmp_next[n1];
1440 }
1441 *ptb = tb->jmp_next[n];
1442 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001443
bellardea041c02003-06-25 16:16:50 +00001444 /* suppress the jump to next tb in generated code */
1445 tb_reset_jump(tb, n);
1446
bellard01243112004-01-04 15:48:17 +00001447 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001448 tb_reset_jump_recursive(tb_next);
1449 }
1450}
1451
1452static void tb_reset_jump_recursive(TranslationBlock *tb)
1453{
1454 tb_reset_jump_recursive2(tb, 0);
1455 tb_reset_jump_recursive2(tb, 1);
1456}
1457
bellard1fddef42005-04-17 19:16:13 +00001458#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001459#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001460static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001461{
1462 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1463}
1464#else
Andreas Färber9349b4f2012-03-14 01:38:32 +01001465static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
bellardd720b932004-04-25 17:57:43 +00001466{
Anthony Liguoric227f092009-10-01 16:12:16 -05001467 target_phys_addr_t addr;
Anthony Liguoric227f092009-10-01 16:12:16 -05001468 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001469 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001470
pbrookc2f07f82006-04-08 17:14:56 +00001471 addr = cpu_get_phys_page_debug(env, pc);
Avi Kivity06ef3522012-02-13 16:11:22 +02001472 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001473 if (!(memory_region_is_ram(section->mr)
1474 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001475 return;
1476 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001477 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1478 + section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001479 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001480}
bellardc27004e2005-01-03 23:35:10 +00001481#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001482#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001483
Paul Brookc527ee82010-03-01 03:31:14 +00001484#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001485void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001486
1487{
1488}
1489
Andreas Färber9349b4f2012-03-14 01:38:32 +01001490int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001491 int flags, CPUWatchpoint **watchpoint)
1492{
1493 return -ENOSYS;
1494}
1495#else
pbrook6658ffb2007-03-16 23:58:11 +00001496/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001497int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001498 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001499{
aliguorib4051332008-11-18 20:14:20 +00001500 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001501 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001502
aliguorib4051332008-11-18 20:14:20 +00001503 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001504 if ((len & (len - 1)) || (addr & ~len_mask) ||
1505 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001506 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1507 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1508 return -EINVAL;
1509 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001510 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001511
aliguoria1d1bb32008-11-18 20:07:32 +00001512 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001513 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001514 wp->flags = flags;
1515
aliguori2dc9f412008-11-18 20:56:59 +00001516 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001517 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001518 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001519 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001520 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001521
pbrook6658ffb2007-03-16 23:58:11 +00001522 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001523
1524 if (watchpoint)
1525 *watchpoint = wp;
1526 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001527}
1528
aliguoria1d1bb32008-11-18 20:07:32 +00001529/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001530int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001531 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001532{
aliguorib4051332008-11-18 20:14:20 +00001533 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001534 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001535
Blue Swirl72cf2d42009-09-12 07:36:22 +00001536 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001537 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001538 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001539 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001540 return 0;
1541 }
1542 }
aliguoria1d1bb32008-11-18 20:07:32 +00001543 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001544}
1545
aliguoria1d1bb32008-11-18 20:07:32 +00001546/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001547void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001548{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001549 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001550
aliguoria1d1bb32008-11-18 20:07:32 +00001551 tlb_flush_page(env, watchpoint->vaddr);
1552
Anthony Liguori7267c092011-08-20 22:09:37 -05001553 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001554}
1555
aliguoria1d1bb32008-11-18 20:07:32 +00001556/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001557void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001558{
aliguoric0ce9982008-11-25 22:13:57 +00001559 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001560
Blue Swirl72cf2d42009-09-12 07:36:22 +00001561 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001562 if (wp->flags & mask)
1563 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001564 }
aliguoria1d1bb32008-11-18 20:07:32 +00001565}
Paul Brookc527ee82010-03-01 03:31:14 +00001566#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001567
1568/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001569int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001570 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001571{
bellard1fddef42005-04-17 19:16:13 +00001572#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001573 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001574
Anthony Liguori7267c092011-08-20 22:09:37 -05001575 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001576
1577 bp->pc = pc;
1578 bp->flags = flags;
1579
aliguori2dc9f412008-11-18 20:56:59 +00001580 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001581 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001582 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001583 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001584 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001585
1586 breakpoint_invalidate(env, pc);
1587
1588 if (breakpoint)
1589 *breakpoint = bp;
1590 return 0;
1591#else
1592 return -ENOSYS;
1593#endif
1594}
1595
1596/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001597int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001598{
1599#if defined(TARGET_HAS_ICE)
1600 CPUBreakpoint *bp;
1601
Blue Swirl72cf2d42009-09-12 07:36:22 +00001602 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001603 if (bp->pc == pc && bp->flags == flags) {
1604 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001605 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001606 }
bellard4c3a88a2003-07-26 12:06:08 +00001607 }
aliguoria1d1bb32008-11-18 20:07:32 +00001608 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001609#else
aliguoria1d1bb32008-11-18 20:07:32 +00001610 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001611#endif
1612}
1613
aliguoria1d1bb32008-11-18 20:07:32 +00001614/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001615void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001616{
bellard1fddef42005-04-17 19:16:13 +00001617#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001618 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001619
aliguoria1d1bb32008-11-18 20:07:32 +00001620 breakpoint_invalidate(env, breakpoint->pc);
1621
Anthony Liguori7267c092011-08-20 22:09:37 -05001622 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001623#endif
1624}
1625
1626/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001627void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001628{
1629#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001630 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001631
Blue Swirl72cf2d42009-09-12 07:36:22 +00001632 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001633 if (bp->flags & mask)
1634 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001635 }
bellard4c3a88a2003-07-26 12:06:08 +00001636#endif
1637}
1638
bellardc33a3462003-07-29 20:50:33 +00001639/* enable or disable single step mode. EXCP_DEBUG is returned by the
1640 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001641void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001642{
bellard1fddef42005-04-17 19:16:13 +00001643#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001644 if (env->singlestep_enabled != enabled) {
1645 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001646 if (kvm_enabled())
1647 kvm_update_guest_debug(env, 0);
1648 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001649 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001650 /* XXX: only flush what is necessary */
1651 tb_flush(env);
1652 }
bellardc33a3462003-07-29 20:50:33 +00001653 }
1654#endif
1655}
1656
bellard34865132003-10-05 14:28:56 +00001657/* enable or disable low levels log */
1658void cpu_set_log(int log_flags)
1659{
1660 loglevel = log_flags;
1661 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001662 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001663 if (!logfile) {
1664 perror(logfilename);
1665 _exit(1);
1666 }
bellard9fa3e852004-01-04 18:06:42 +00001667#if !defined(CONFIG_SOFTMMU)
1668 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1669 {
blueswir1b55266b2008-09-20 08:07:15 +00001670 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001671 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1672 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001673#elif defined(_WIN32)
1674 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1675 setvbuf(logfile, NULL, _IONBF, 0);
1676#else
bellard34865132003-10-05 14:28:56 +00001677 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001678#endif
pbrooke735b912007-06-30 13:53:24 +00001679 log_append = 1;
1680 }
1681 if (!loglevel && logfile) {
1682 fclose(logfile);
1683 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001684 }
1685}
1686
1687void cpu_set_log_filename(const char *filename)
1688{
1689 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001690 if (logfile) {
1691 fclose(logfile);
1692 logfile = NULL;
1693 }
1694 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001695}
bellardc33a3462003-07-29 20:50:33 +00001696
Andreas Färber9349b4f2012-03-14 01:38:32 +01001697static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001698{
pbrookd5975362008-06-07 20:50:51 +00001699 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1700 problem and hope the cpu will stop of its own accord. For userspace
1701 emulation this often isn't actually as bad as it sounds. Often
1702 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001703 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001704 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001705
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001706 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001707 tb = env->current_tb;
1708 /* if the cpu is currently executing code, we must unlink it and
1709 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001710 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001711 env->current_tb = NULL;
1712 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001713 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001714 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001715}
1716
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001717#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001718/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001719static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001720{
1721 int old_mask;
1722
1723 old_mask = env->interrupt_request;
1724 env->interrupt_request |= mask;
1725
aliguori8edac962009-04-24 18:03:45 +00001726 /*
1727 * If called from iothread context, wake the target cpu in
1728 * case its halted.
1729 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001730 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001731 qemu_cpu_kick(env);
1732 return;
1733 }
aliguori8edac962009-04-24 18:03:45 +00001734
pbrook2e70f6e2008-06-29 01:03:05 +00001735 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001736 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001737 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001738 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001739 cpu_abort(env, "Raised interrupt while not in I/O function");
1740 }
pbrook2e70f6e2008-06-29 01:03:05 +00001741 } else {
aurel323098dba2009-03-07 21:28:24 +00001742 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001743 }
1744}
1745
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001746CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1747
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001748#else /* CONFIG_USER_ONLY */
1749
Andreas Färber9349b4f2012-03-14 01:38:32 +01001750void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001751{
1752 env->interrupt_request |= mask;
1753 cpu_unlink_tb(env);
1754}
1755#endif /* CONFIG_USER_ONLY */
1756
Andreas Färber9349b4f2012-03-14 01:38:32 +01001757void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001758{
1759 env->interrupt_request &= ~mask;
1760}
1761
Andreas Färber9349b4f2012-03-14 01:38:32 +01001762void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001763{
1764 env->exit_request = 1;
1765 cpu_unlink_tb(env);
1766}
1767
blueswir1c7cd6a32008-10-02 18:27:46 +00001768const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001769 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001770 "show generated host assembly code for each compiled TB" },
1771 { CPU_LOG_TB_IN_ASM, "in_asm",
1772 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001773 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001774 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001775 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001776 "show micro ops "
1777#ifdef TARGET_I386
1778 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001779#endif
blueswir1e01a1152008-03-14 17:37:11 +00001780 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001781 { CPU_LOG_INT, "int",
1782 "show interrupts/exceptions in short format" },
1783 { CPU_LOG_EXEC, "exec",
1784 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001785 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001786 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001787#ifdef TARGET_I386
1788 { CPU_LOG_PCALL, "pcall",
1789 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001790 { CPU_LOG_RESET, "cpu_reset",
1791 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001792#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001793#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001794 { CPU_LOG_IOPORT, "ioport",
1795 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001796#endif
bellardf193c792004-03-21 17:06:25 +00001797 { 0, NULL, NULL },
1798};
1799
1800static int cmp1(const char *s1, int n, const char *s2)
1801{
1802 if (strlen(s2) != n)
1803 return 0;
1804 return memcmp(s1, s2, n) == 0;
1805}
ths3b46e622007-09-17 08:09:54 +00001806
bellardf193c792004-03-21 17:06:25 +00001807/* takes a comma separated list of log masks. Return 0 if error. */
1808int cpu_str_to_log_mask(const char *str)
1809{
blueswir1c7cd6a32008-10-02 18:27:46 +00001810 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001811 int mask;
1812 const char *p, *p1;
1813
1814 p = str;
1815 mask = 0;
1816 for(;;) {
1817 p1 = strchr(p, ',');
1818 if (!p1)
1819 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001820 if(cmp1(p,p1-p,"all")) {
1821 for(item = cpu_log_items; item->mask != 0; item++) {
1822 mask |= item->mask;
1823 }
1824 } else {
1825 for(item = cpu_log_items; item->mask != 0; item++) {
1826 if (cmp1(p, p1 - p, item->name))
1827 goto found;
1828 }
1829 return 0;
bellardf193c792004-03-21 17:06:25 +00001830 }
bellardf193c792004-03-21 17:06:25 +00001831 found:
1832 mask |= item->mask;
1833 if (*p1 != ',')
1834 break;
1835 p = p1 + 1;
1836 }
1837 return mask;
1838}
bellardea041c02003-06-25 16:16:50 +00001839
Andreas Färber9349b4f2012-03-14 01:38:32 +01001840void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001841{
1842 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001843 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001844
1845 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001846 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001847 fprintf(stderr, "qemu: fatal: ");
1848 vfprintf(stderr, fmt, ap);
1849 fprintf(stderr, "\n");
1850#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001851 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1852#else
1853 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001854#endif
aliguori93fcfe32009-01-15 22:34:14 +00001855 if (qemu_log_enabled()) {
1856 qemu_log("qemu: fatal: ");
1857 qemu_log_vprintf(fmt, ap2);
1858 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001859#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001860 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001861#else
aliguori93fcfe32009-01-15 22:34:14 +00001862 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001863#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001864 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001865 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001866 }
pbrook493ae1f2007-11-23 16:53:59 +00001867 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001868 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001869#if defined(CONFIG_USER_ONLY)
1870 {
1871 struct sigaction act;
1872 sigfillset(&act.sa_mask);
1873 act.sa_handler = SIG_DFL;
1874 sigaction(SIGABRT, &act, NULL);
1875 }
1876#endif
bellard75012672003-06-21 13:11:07 +00001877 abort();
1878}
1879
Andreas Färber9349b4f2012-03-14 01:38:32 +01001880CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001881{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001882 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1883 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001884 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001885#if defined(TARGET_HAS_ICE)
1886 CPUBreakpoint *bp;
1887 CPUWatchpoint *wp;
1888#endif
1889
Andreas Färber9349b4f2012-03-14 01:38:32 +01001890 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001891
1892 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001893 new_env->next_cpu = next_cpu;
1894 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001895
1896 /* Clone all break/watchpoints.
1897 Note: Once we support ptrace with hw-debug register access, make sure
1898 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001899 QTAILQ_INIT(&env->breakpoints);
1900 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001901#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001902 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001903 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1904 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001905 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001906 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1907 wp->flags, NULL);
1908 }
1909#endif
1910
thsc5be9f02007-02-28 20:20:53 +00001911 return new_env;
1912}
1913
bellard01243112004-01-04 15:48:17 +00001914#if !defined(CONFIG_USER_ONLY)
1915
Andreas Färber9349b4f2012-03-14 01:38:32 +01001916static inline void tlb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001917{
1918 unsigned int i;
1919
1920 /* Discard jump cache entries for any tb which might potentially
1921 overlap the flushed page. */
1922 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1923 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001924 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001925
1926 i = tb_jmp_cache_hash_page(addr);
1927 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001928 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001929}
1930
Igor Kovalenko08738982009-07-12 02:15:40 +04001931static CPUTLBEntry s_cputlb_empty_entry = {
1932 .addr_read = -1,
1933 .addr_write = -1,
1934 .addr_code = -1,
1935 .addend = -1,
1936};
1937
Peter Maydell771124e2012-01-17 13:23:13 +00001938/* NOTE:
1939 * If flush_global is true (the usual case), flush all tlb entries.
1940 * If flush_global is false, flush (at least) all tlb entries not
1941 * marked global.
1942 *
1943 * Since QEMU doesn't currently implement a global/not-global flag
1944 * for tlb entries, at the moment tlb_flush() will also flush all
1945 * tlb entries in the flush_global == false case. This is OK because
1946 * CPU architectures generally permit an implementation to drop
1947 * entries from the TLB at any time, so flushing more entries than
1948 * required is only an efficiency issue, not a correctness issue.
1949 */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001950void tlb_flush(CPUArchState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001951{
bellard33417e72003-08-10 21:47:01 +00001952 int i;
bellard01243112004-01-04 15:48:17 +00001953
bellard9fa3e852004-01-04 18:06:42 +00001954#if defined(DEBUG_TLB)
1955 printf("tlb_flush:\n");
1956#endif
bellard01243112004-01-04 15:48:17 +00001957 /* must reset current TB so that interrupts cannot modify the
1958 links while we are modifying them */
1959 env->current_tb = NULL;
1960
bellard33417e72003-08-10 21:47:01 +00001961 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001962 int mmu_idx;
1963 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001964 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001965 }
bellard33417e72003-08-10 21:47:01 +00001966 }
bellard9fa3e852004-01-04 18:06:42 +00001967
bellard8a40a182005-11-20 10:35:40 +00001968 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001969
Paul Brookd4c430a2010-03-17 02:14:28 +00001970 env->tlb_flush_addr = -1;
1971 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001972 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001973}
1974
bellard274da6b2004-05-20 21:56:27 +00001975static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001976{
ths5fafdf22007-09-16 21:08:06 +00001977 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001978 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001979 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001980 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001981 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001982 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001983 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001984 }
bellard61382a52003-10-27 21:22:23 +00001985}
1986
Andreas Färber9349b4f2012-03-14 01:38:32 +01001987void tlb_flush_page(CPUArchState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001988{
bellard8a40a182005-11-20 10:35:40 +00001989 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001990 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001991
bellard9fa3e852004-01-04 18:06:42 +00001992#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001993 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001994#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001995 /* Check if we need to flush due to large pages. */
1996 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1997#if defined(DEBUG_TLB)
1998 printf("tlb_flush_page: forced full flush ("
1999 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2000 env->tlb_flush_addr, env->tlb_flush_mask);
2001#endif
2002 tlb_flush(env, 1);
2003 return;
2004 }
bellard01243112004-01-04 15:48:17 +00002005 /* must reset current TB so that interrupts cannot modify the
2006 links while we are modifying them */
2007 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002008
bellard61382a52003-10-27 21:22:23 +00002009 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002010 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002011 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2012 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002013
edgar_igl5c751e92008-05-06 08:44:21 +00002014 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002015}
2016
bellard9fa3e852004-01-04 18:06:42 +00002017/* update the TLBs so that writes to code in the virtual page 'addr'
2018 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002019static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002020{
ths5fafdf22007-09-16 21:08:06 +00002021 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002022 ram_addr + TARGET_PAGE_SIZE,
2023 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002024}
2025
bellard9fa3e852004-01-04 18:06:42 +00002026/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002027 tested for self modifying code */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002028static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002029 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002030{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002031 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002032}
2033
Avi Kivity7859cc62012-03-14 16:19:39 +02002034static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
2035{
2036 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
2037}
2038
ths5fafdf22007-09-16 21:08:06 +00002039static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002040 unsigned long start, unsigned long length)
2041{
2042 unsigned long addr;
Avi Kivity7859cc62012-03-14 16:19:39 +02002043 if (tlb_is_dirty_ram(tlb_entry)) {
bellard84b7b8e2005-11-28 21:19:04 +00002044 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002045 if ((addr - start) < length) {
Avi Kivity7859cc62012-03-14 16:19:39 +02002046 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002047 }
2048 }
2049}
2050
pbrook5579c7f2009-04-11 14:47:08 +00002051/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002052void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002053 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002054{
Andreas Färber9349b4f2012-03-14 01:38:32 +01002055 CPUArchState *env;
bellard4f2ac232004-04-26 19:44:02 +00002056 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002057 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002058
2059 start &= TARGET_PAGE_MASK;
2060 end = TARGET_PAGE_ALIGN(end);
2061
2062 length = end - start;
2063 if (length == 0)
2064 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002065 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002066
bellard1ccde1c2004-02-06 19:46:14 +00002067 /* we modify the TLB cache so that the dirty bit will be set again
2068 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002069 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002070 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002071 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002072 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002073 != (end - 1) - start) {
2074 abort();
2075 }
2076
bellard6a00d602005-11-21 23:25:50 +00002077 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002078 int mmu_idx;
2079 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2080 for(i = 0; i < CPU_TLB_SIZE; i++)
2081 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2082 start1, length);
2083 }
bellard6a00d602005-11-21 23:25:50 +00002084 }
bellard1ccde1c2004-02-06 19:46:14 +00002085}
2086
aliguori74576192008-10-06 14:02:03 +00002087int cpu_physical_memory_set_dirty_tracking(int enable)
2088{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002089 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002090 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002091 return ret;
aliguori74576192008-10-06 14:02:03 +00002092}
2093
bellard3a7d9292005-08-21 09:26:42 +00002094static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2095{
Anthony Liguoric227f092009-10-01 16:12:16 -05002096 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002097 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002098
Avi Kivity7859cc62012-03-14 16:19:39 +02002099 if (tlb_is_dirty_ram(tlb_entry)) {
pbrook5579c7f2009-04-11 14:47:08 +00002100 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2101 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002102 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002103 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002104 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002105 }
2106 }
2107}
2108
2109/* update the TLB according to the current state of the dirty bits */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002110void cpu_tlb_update_dirty(CPUArchState *env)
bellard3a7d9292005-08-21 09:26:42 +00002111{
2112 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002113 int mmu_idx;
2114 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2115 for(i = 0; i < CPU_TLB_SIZE; i++)
2116 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2117 }
bellard3a7d9292005-08-21 09:26:42 +00002118}
2119
pbrook0f459d12008-06-09 00:20:13 +00002120static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002121{
pbrook0f459d12008-06-09 00:20:13 +00002122 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2123 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002124}
2125
pbrook0f459d12008-06-09 00:20:13 +00002126/* update the TLB corresponding to virtual page vaddr
2127 so that it is no longer dirty */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002128static inline void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002129{
bellard1ccde1c2004-02-06 19:46:14 +00002130 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002131 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002132
pbrook0f459d12008-06-09 00:20:13 +00002133 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002134 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002135 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2136 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002137}
2138
Paul Brookd4c430a2010-03-17 02:14:28 +00002139/* Our TLB does not support large pages, so remember the area covered by
2140 large pages and trigger a full TLB flush if these are invalidated. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002141static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
Paul Brookd4c430a2010-03-17 02:14:28 +00002142 target_ulong size)
2143{
2144 target_ulong mask = ~(size - 1);
2145
2146 if (env->tlb_flush_addr == (target_ulong)-1) {
2147 env->tlb_flush_addr = vaddr & mask;
2148 env->tlb_flush_mask = mask;
2149 return;
2150 }
2151 /* Extend the existing region to include the new page.
2152 This is a compromise between unnecessary flushes and the cost
2153 of maintaining a full variable size TLB. */
2154 mask &= env->tlb_flush_mask;
2155 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2156 mask <<= 1;
2157 }
2158 env->tlb_flush_addr &= mask;
2159 env->tlb_flush_mask = mask;
2160}
2161
Avi Kivity06ef3522012-02-13 16:11:22 +02002162static bool is_ram_rom(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002163{
Avi Kivity06ef3522012-02-13 16:11:22 +02002164 return memory_region_is_ram(s->mr);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002165}
2166
Avi Kivity06ef3522012-02-13 16:11:22 +02002167static bool is_romd(MemoryRegionSection *s)
Avi Kivity75c578d2012-01-02 15:40:52 +02002168{
Avi Kivity06ef3522012-02-13 16:11:22 +02002169 MemoryRegion *mr = s->mr;
Avi Kivity75c578d2012-01-02 15:40:52 +02002170
Avi Kivity75c578d2012-01-02 15:40:52 +02002171 return mr->rom_device && mr->readable;
2172}
2173
Avi Kivity06ef3522012-02-13 16:11:22 +02002174static bool is_ram_rom_romd(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002175{
Avi Kivity06ef3522012-02-13 16:11:22 +02002176 return is_ram_rom(s) || is_romd(s);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002177}
2178
Paul Brookd4c430a2010-03-17 02:14:28 +00002179/* Add a new TLB entry. At most one entry for a given virtual address
2180 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2181 supplied size is only used by tlb_flush_page. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002182void tlb_set_page(CPUArchState *env, target_ulong vaddr,
Paul Brookd4c430a2010-03-17 02:14:28 +00002183 target_phys_addr_t paddr, int prot,
2184 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002185{
Avi Kivityf3705d52012-03-08 16:16:34 +02002186 MemoryRegionSection *section;
bellard9fa3e852004-01-04 18:06:42 +00002187 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002188 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002189 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002190 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002191 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002192 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002193 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002194
Paul Brookd4c430a2010-03-17 02:14:28 +00002195 assert(size >= TARGET_PAGE_SIZE);
2196 if (size != TARGET_PAGE_SIZE) {
2197 tlb_add_large_page(env, vaddr, size);
2198 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002199 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002200#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002201 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2202 " prot=%x idx=%d pd=0x%08lx\n",
2203 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002204#endif
2205
pbrook0f459d12008-06-09 00:20:13 +00002206 address = vaddr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002207 if (!is_ram_rom_romd(section)) {
pbrook0f459d12008-06-09 00:20:13 +00002208 /* IO memory case (romd handled later) */
2209 address |= TLB_MMIO;
2210 }
Avi Kivityf3705d52012-03-08 16:16:34 +02002211 if (is_ram_rom_romd(section)) {
2212 addend = (unsigned long)memory_region_get_ram_ptr(section->mr)
2213 + section_addr(section, paddr);
Avi Kivity06ef3522012-02-13 16:11:22 +02002214 } else {
2215 addend = 0;
2216 }
Avi Kivityf3705d52012-03-08 16:16:34 +02002217 if (is_ram_rom(section)) {
pbrook0f459d12008-06-09 00:20:13 +00002218 /* Normal RAM. */
Avi Kivityf3705d52012-03-08 16:16:34 +02002219 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2220 + section_addr(section, paddr);
2221 if (!section->readonly)
Avi Kivityaa102232012-03-08 17:06:55 +02002222 iotlb |= phys_section_notdirty;
pbrook0f459d12008-06-09 00:20:13 +00002223 else
Avi Kivityaa102232012-03-08 17:06:55 +02002224 iotlb |= phys_section_rom;
pbrook0f459d12008-06-09 00:20:13 +00002225 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002226 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002227 It would be nice to pass an offset from the base address
2228 of that region. This would avoid having to special case RAM,
2229 and avoid full address decoding in every device.
2230 We can't use the high bits of pd for this because
2231 IO_MEM_ROMD uses these as a ram address. */
Avi Kivityaa102232012-03-08 17:06:55 +02002232 iotlb = section - phys_sections;
Avi Kivityf3705d52012-03-08 16:16:34 +02002233 iotlb += section_addr(section, paddr);
pbrook0f459d12008-06-09 00:20:13 +00002234 }
pbrook6658ffb2007-03-16 23:58:11 +00002235
pbrook0f459d12008-06-09 00:20:13 +00002236 code_address = address;
2237 /* Make accesses to pages with watchpoints go via the
2238 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002239 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002240 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002241 /* Avoid trapping reads of pages with a write breakpoint. */
2242 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivityaa102232012-03-08 17:06:55 +02002243 iotlb = phys_section_watch + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002244 address |= TLB_MMIO;
2245 break;
2246 }
pbrook6658ffb2007-03-16 23:58:11 +00002247 }
pbrook0f459d12008-06-09 00:20:13 +00002248 }
balrogd79acba2007-06-26 20:01:13 +00002249
pbrook0f459d12008-06-09 00:20:13 +00002250 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2251 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2252 te = &env->tlb_table[mmu_idx][index];
2253 te->addend = addend - vaddr;
2254 if (prot & PAGE_READ) {
2255 te->addr_read = address;
2256 } else {
2257 te->addr_read = -1;
2258 }
edgar_igl5c751e92008-05-06 08:44:21 +00002259
pbrook0f459d12008-06-09 00:20:13 +00002260 if (prot & PAGE_EXEC) {
2261 te->addr_code = code_address;
2262 } else {
2263 te->addr_code = -1;
2264 }
2265 if (prot & PAGE_WRITE) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002266 if ((memory_region_is_ram(section->mr) && section->readonly)
2267 || is_romd(section)) {
pbrook0f459d12008-06-09 00:20:13 +00002268 /* Write access calls the I/O callback. */
2269 te->addr_write = address | TLB_MMIO;
Avi Kivityf3705d52012-03-08 16:16:34 +02002270 } else if (memory_region_is_ram(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002271 && !cpu_physical_memory_is_dirty(
Avi Kivityf3705d52012-03-08 16:16:34 +02002272 section->mr->ram_addr
2273 + section_addr(section, paddr))) {
pbrook0f459d12008-06-09 00:20:13 +00002274 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002275 } else {
pbrook0f459d12008-06-09 00:20:13 +00002276 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002277 }
pbrook0f459d12008-06-09 00:20:13 +00002278 } else {
2279 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002280 }
bellard9fa3e852004-01-04 18:06:42 +00002281}
2282
bellard01243112004-01-04 15:48:17 +00002283#else
2284
Andreas Färber9349b4f2012-03-14 01:38:32 +01002285void tlb_flush(CPUArchState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002286{
2287}
2288
Andreas Färber9349b4f2012-03-14 01:38:32 +01002289void tlb_flush_page(CPUArchState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002290{
2291}
2292
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002293/*
2294 * Walks guest process memory "regions" one by one
2295 * and calls callback function 'fn' for each region.
2296 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002297
2298struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002299{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002300 walk_memory_regions_fn fn;
2301 void *priv;
2302 unsigned long start;
2303 int prot;
2304};
bellard9fa3e852004-01-04 18:06:42 +00002305
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002306static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002307 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002308{
2309 if (data->start != -1ul) {
2310 int rc = data->fn(data->priv, data->start, end, data->prot);
2311 if (rc != 0) {
2312 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002313 }
bellard33417e72003-08-10 21:47:01 +00002314 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002315
2316 data->start = (new_prot ? end : -1ul);
2317 data->prot = new_prot;
2318
2319 return 0;
2320}
2321
2322static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002323 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002324{
Paul Brookb480d9b2010-03-12 23:23:29 +00002325 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002326 int i, rc;
2327
2328 if (*lp == NULL) {
2329 return walk_memory_regions_end(data, base, 0);
2330 }
2331
2332 if (level == 0) {
2333 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002334 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002335 int prot = pd[i].flags;
2336
2337 pa = base | (i << TARGET_PAGE_BITS);
2338 if (prot != data->prot) {
2339 rc = walk_memory_regions_end(data, pa, prot);
2340 if (rc != 0) {
2341 return rc;
2342 }
2343 }
2344 }
2345 } else {
2346 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002347 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002348 pa = base | ((abi_ulong)i <<
2349 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002350 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2351 if (rc != 0) {
2352 return rc;
2353 }
2354 }
2355 }
2356
2357 return 0;
2358}
2359
2360int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2361{
2362 struct walk_memory_regions_data data;
2363 unsigned long i;
2364
2365 data.fn = fn;
2366 data.priv = priv;
2367 data.start = -1ul;
2368 data.prot = 0;
2369
2370 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002371 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002372 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2373 if (rc != 0) {
2374 return rc;
2375 }
2376 }
2377
2378 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002379}
2380
Paul Brookb480d9b2010-03-12 23:23:29 +00002381static int dump_region(void *priv, abi_ulong start,
2382 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002383{
2384 FILE *f = (FILE *)priv;
2385
Paul Brookb480d9b2010-03-12 23:23:29 +00002386 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2387 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002388 start, end, end - start,
2389 ((prot & PAGE_READ) ? 'r' : '-'),
2390 ((prot & PAGE_WRITE) ? 'w' : '-'),
2391 ((prot & PAGE_EXEC) ? 'x' : '-'));
2392
2393 return (0);
2394}
2395
2396/* dump memory mappings */
2397void page_dump(FILE *f)
2398{
2399 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2400 "start", "end", "size", "prot");
2401 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002402}
2403
pbrook53a59602006-03-25 19:31:22 +00002404int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002405{
bellard9fa3e852004-01-04 18:06:42 +00002406 PageDesc *p;
2407
2408 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002409 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002410 return 0;
2411 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002412}
2413
Richard Henderson376a7902010-03-10 15:57:04 -08002414/* Modify the flags of a page and invalidate the code if necessary.
2415 The flag PAGE_WRITE_ORG is positioned automatically depending
2416 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002417void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002418{
Richard Henderson376a7902010-03-10 15:57:04 -08002419 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002420
Richard Henderson376a7902010-03-10 15:57:04 -08002421 /* This function should never be called with addresses outside the
2422 guest address space. If this assert fires, it probably indicates
2423 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002424#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2425 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002426#endif
2427 assert(start < end);
2428
bellard9fa3e852004-01-04 18:06:42 +00002429 start = start & TARGET_PAGE_MASK;
2430 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002431
2432 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002433 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002434 }
2435
2436 for (addr = start, len = end - start;
2437 len != 0;
2438 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2439 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2440
2441 /* If the write protection bit is set, then we invalidate
2442 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002443 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002444 (flags & PAGE_WRITE) &&
2445 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002446 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002447 }
2448 p->flags = flags;
2449 }
bellard9fa3e852004-01-04 18:06:42 +00002450}
2451
ths3d97b402007-11-02 19:02:07 +00002452int page_check_range(target_ulong start, target_ulong len, int flags)
2453{
2454 PageDesc *p;
2455 target_ulong end;
2456 target_ulong addr;
2457
Richard Henderson376a7902010-03-10 15:57:04 -08002458 /* This function should never be called with addresses outside the
2459 guest address space. If this assert fires, it probably indicates
2460 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002461#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2462 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002463#endif
2464
Richard Henderson3e0650a2010-03-29 10:54:42 -07002465 if (len == 0) {
2466 return 0;
2467 }
Richard Henderson376a7902010-03-10 15:57:04 -08002468 if (start + len - 1 < start) {
2469 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002470 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002471 }
balrog55f280c2008-10-28 10:24:11 +00002472
ths3d97b402007-11-02 19:02:07 +00002473 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2474 start = start & TARGET_PAGE_MASK;
2475
Richard Henderson376a7902010-03-10 15:57:04 -08002476 for (addr = start, len = end - start;
2477 len != 0;
2478 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002479 p = page_find(addr >> TARGET_PAGE_BITS);
2480 if( !p )
2481 return -1;
2482 if( !(p->flags & PAGE_VALID) )
2483 return -1;
2484
bellarddae32702007-11-14 10:51:00 +00002485 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002486 return -1;
bellarddae32702007-11-14 10:51:00 +00002487 if (flags & PAGE_WRITE) {
2488 if (!(p->flags & PAGE_WRITE_ORG))
2489 return -1;
2490 /* unprotect the page if it was put read-only because it
2491 contains translated code */
2492 if (!(p->flags & PAGE_WRITE)) {
2493 if (!page_unprotect(addr, 0, NULL))
2494 return -1;
2495 }
2496 return 0;
2497 }
ths3d97b402007-11-02 19:02:07 +00002498 }
2499 return 0;
2500}
2501
bellard9fa3e852004-01-04 18:06:42 +00002502/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002503 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002504int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002505{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002506 unsigned int prot;
2507 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002508 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002509
pbrookc8a706f2008-06-02 16:16:42 +00002510 /* Technically this isn't safe inside a signal handler. However we
2511 know this only ever happens in a synchronous SEGV handler, so in
2512 practice it seems to be ok. */
2513 mmap_lock();
2514
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002515 p = page_find(address >> TARGET_PAGE_BITS);
2516 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002517 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002518 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002519 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002520
bellard9fa3e852004-01-04 18:06:42 +00002521 /* if the page was really writable, then we change its
2522 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002523 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2524 host_start = address & qemu_host_page_mask;
2525 host_end = host_start + qemu_host_page_size;
2526
2527 prot = 0;
2528 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2529 p = page_find(addr >> TARGET_PAGE_BITS);
2530 p->flags |= PAGE_WRITE;
2531 prot |= p->flags;
2532
bellard9fa3e852004-01-04 18:06:42 +00002533 /* and since the content will be modified, we must invalidate
2534 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002535 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002536#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002537 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002538#endif
bellard9fa3e852004-01-04 18:06:42 +00002539 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002540 mprotect((void *)g2h(host_start), qemu_host_page_size,
2541 prot & PAGE_BITS);
2542
2543 mmap_unlock();
2544 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002545 }
pbrookc8a706f2008-06-02 16:16:42 +00002546 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002547 return 0;
2548}
2549
Andreas Färber9349b4f2012-03-14 01:38:32 +01002550static inline void tlb_set_dirty(CPUArchState *env,
bellard6a00d602005-11-21 23:25:50 +00002551 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002552{
2553}
bellard9fa3e852004-01-04 18:06:42 +00002554#endif /* defined(CONFIG_USER_ONLY) */
2555
pbrooke2eef172008-06-08 01:09:01 +00002556#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002557
Paul Brookc04b2b72010-03-01 03:31:14 +00002558#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2559typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002560 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002561 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002562 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002563} subpage_t;
2564
Anthony Liguoric227f092009-10-01 16:12:16 -05002565static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002566 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002567static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002568static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002569{
Avi Kivity5312bd82012-02-12 18:32:55 +02002570 MemoryRegionSection *section = &phys_sections[section_index];
2571 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002572
2573 if (mr->subpage) {
2574 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2575 memory_region_destroy(&subpage->iomem);
2576 g_free(subpage);
2577 }
2578}
2579
Avi Kivity4346ae32012-02-10 17:00:01 +02002580static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002581{
2582 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002583 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002584
Avi Kivityc19e8802012-02-13 20:25:31 +02002585 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002586 return;
2587 }
2588
Avi Kivityc19e8802012-02-13 20:25:31 +02002589 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002590 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002591 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002592 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002593 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002594 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002595 }
Avi Kivity54688b12012-02-09 17:34:32 +02002596 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002597 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002598 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002599}
2600
2601static void destroy_all_mappings(void)
2602{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002603 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002604 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002605}
2606
Avi Kivity5312bd82012-02-12 18:32:55 +02002607static uint16_t phys_section_add(MemoryRegionSection *section)
2608{
2609 if (phys_sections_nb == phys_sections_nb_alloc) {
2610 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2611 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2612 phys_sections_nb_alloc);
2613 }
2614 phys_sections[phys_sections_nb] = *section;
2615 return phys_sections_nb++;
2616}
2617
2618static void phys_sections_clear(void)
2619{
2620 phys_sections_nb = 0;
2621}
2622
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002623/* register physical memory.
2624 For RAM, 'size' must be a multiple of the target page size.
2625 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002626 io memory page. The address used when calling the IO function is
2627 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002628 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002629 before calculating this offset. This should not be a problem unless
2630 the low bits of start_addr and region_offset differ. */
Avi Kivity0f0cb162012-02-13 17:14:32 +02002631static void register_subpage(MemoryRegionSection *section)
2632{
2633 subpage_t *subpage;
2634 target_phys_addr_t base = section->offset_within_address_space
2635 & TARGET_PAGE_MASK;
Avi Kivityf3705d52012-03-08 16:16:34 +02002636 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002637 MemoryRegionSection subsection = {
2638 .offset_within_address_space = base,
2639 .size = TARGET_PAGE_SIZE,
2640 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002641 target_phys_addr_t start, end;
2642
Avi Kivityf3705d52012-03-08 16:16:34 +02002643 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002644
Avi Kivityf3705d52012-03-08 16:16:34 +02002645 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002646 subpage = subpage_init(base);
2647 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002648 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2649 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002650 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002651 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002652 }
2653 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2654 end = start + section->size;
2655 subpage_register(subpage, start, end, phys_section_add(section));
2656}
2657
2658
2659static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002660{
Avi Kivitydd811242012-01-02 12:17:03 +02002661 target_phys_addr_t start_addr = section->offset_within_address_space;
2662 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002663 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002664 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002665
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002666 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002667
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002668 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002669 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2670 section_index);
bellard33417e72003-08-10 21:47:01 +00002671}
2672
Avi Kivity0f0cb162012-02-13 17:14:32 +02002673void cpu_register_physical_memory_log(MemoryRegionSection *section,
2674 bool readonly)
2675{
2676 MemoryRegionSection now = *section, remain = *section;
2677
2678 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2679 || (now.size < TARGET_PAGE_SIZE)) {
2680 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2681 - now.offset_within_address_space,
2682 now.size);
2683 register_subpage(&now);
2684 remain.size -= now.size;
2685 remain.offset_within_address_space += now.size;
2686 remain.offset_within_region += now.size;
2687 }
2688 now = remain;
2689 now.size &= TARGET_PAGE_MASK;
2690 if (now.size) {
2691 register_multipage(&now);
2692 remain.size -= now.size;
2693 remain.offset_within_address_space += now.size;
2694 remain.offset_within_region += now.size;
2695 }
2696 now = remain;
2697 if (now.size) {
2698 register_subpage(&now);
2699 }
2700}
2701
2702
Anthony Liguoric227f092009-10-01 16:12:16 -05002703void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002704{
2705 if (kvm_enabled())
2706 kvm_coalesce_mmio_region(addr, size);
2707}
2708
Anthony Liguoric227f092009-10-01 16:12:16 -05002709void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002710{
2711 if (kvm_enabled())
2712 kvm_uncoalesce_mmio_region(addr, size);
2713}
2714
Sheng Yang62a27442010-01-26 19:21:16 +08002715void qemu_flush_coalesced_mmio_buffer(void)
2716{
2717 if (kvm_enabled())
2718 kvm_flush_coalesced_mmio_buffer();
2719}
2720
Marcelo Tosattic9027602010-03-01 20:25:08 -03002721#if defined(__linux__) && !defined(TARGET_S390X)
2722
2723#include <sys/vfs.h>
2724
2725#define HUGETLBFS_MAGIC 0x958458f6
2726
2727static long gethugepagesize(const char *path)
2728{
2729 struct statfs fs;
2730 int ret;
2731
2732 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002733 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002734 } while (ret != 0 && errno == EINTR);
2735
2736 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002737 perror(path);
2738 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002739 }
2740
2741 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002742 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002743
2744 return fs.f_bsize;
2745}
2746
Alex Williamson04b16652010-07-02 11:13:17 -06002747static void *file_ram_alloc(RAMBlock *block,
2748 ram_addr_t memory,
2749 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002750{
2751 char *filename;
2752 void *area;
2753 int fd;
2754#ifdef MAP_POPULATE
2755 int flags;
2756#endif
2757 unsigned long hpagesize;
2758
2759 hpagesize = gethugepagesize(path);
2760 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002761 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002762 }
2763
2764 if (memory < hpagesize) {
2765 return NULL;
2766 }
2767
2768 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2769 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2770 return NULL;
2771 }
2772
2773 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002774 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002775 }
2776
2777 fd = mkstemp(filename);
2778 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002779 perror("unable to create backing store for hugepages");
2780 free(filename);
2781 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002782 }
2783 unlink(filename);
2784 free(filename);
2785
2786 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2787
2788 /*
2789 * ftruncate is not supported by hugetlbfs in older
2790 * hosts, so don't bother bailing out on errors.
2791 * If anything goes wrong with it under other filesystems,
2792 * mmap will fail.
2793 */
2794 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002795 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002796
2797#ifdef MAP_POPULATE
2798 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2799 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2800 * to sidestep this quirk.
2801 */
2802 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2803 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2804#else
2805 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2806#endif
2807 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002808 perror("file_ram_alloc: can't mmap RAM pages");
2809 close(fd);
2810 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002811 }
Alex Williamson04b16652010-07-02 11:13:17 -06002812 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002813 return area;
2814}
2815#endif
2816
Alex Williamsond17b5282010-06-25 11:08:38 -06002817static ram_addr_t find_ram_offset(ram_addr_t size)
2818{
Alex Williamson04b16652010-07-02 11:13:17 -06002819 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002820 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002821
2822 if (QLIST_EMPTY(&ram_list.blocks))
2823 return 0;
2824
2825 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002826 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002827
2828 end = block->offset + block->length;
2829
2830 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2831 if (next_block->offset >= end) {
2832 next = MIN(next, next_block->offset);
2833 }
2834 }
2835 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002836 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002837 mingap = next - end;
2838 }
2839 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002840
2841 if (offset == RAM_ADDR_MAX) {
2842 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2843 (uint64_t)size);
2844 abort();
2845 }
2846
Alex Williamson04b16652010-07-02 11:13:17 -06002847 return offset;
2848}
2849
2850static ram_addr_t last_ram_offset(void)
2851{
Alex Williamsond17b5282010-06-25 11:08:38 -06002852 RAMBlock *block;
2853 ram_addr_t last = 0;
2854
2855 QLIST_FOREACH(block, &ram_list.blocks, next)
2856 last = MAX(last, block->offset + block->length);
2857
2858 return last;
2859}
2860
Avi Kivityc5705a72011-12-20 15:59:12 +02002861void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002862{
2863 RAMBlock *new_block, *block;
2864
Avi Kivityc5705a72011-12-20 15:59:12 +02002865 new_block = NULL;
2866 QLIST_FOREACH(block, &ram_list.blocks, next) {
2867 if (block->offset == addr) {
2868 new_block = block;
2869 break;
2870 }
2871 }
2872 assert(new_block);
2873 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002874
2875 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2876 char *id = dev->parent_bus->info->get_dev_path(dev);
2877 if (id) {
2878 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002879 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002880 }
2881 }
2882 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2883
2884 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002885 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002886 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2887 new_block->idstr);
2888 abort();
2889 }
2890 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002891}
2892
2893ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2894 MemoryRegion *mr)
2895{
2896 RAMBlock *new_block;
2897
2898 size = TARGET_PAGE_ALIGN(size);
2899 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002900
Avi Kivity7c637362011-12-21 13:09:49 +02002901 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002902 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002903 if (host) {
2904 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002905 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002906 } else {
2907 if (mem_path) {
2908#if defined (__linux__) && !defined(TARGET_S390X)
2909 new_block->host = file_ram_alloc(new_block, size, mem_path);
2910 if (!new_block->host) {
2911 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002912 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002913 }
2914#else
2915 fprintf(stderr, "-mem-path option unsupported\n");
2916 exit(1);
2917#endif
2918 } else {
2919#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002920 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2921 an system defined value, which is at least 256GB. Larger systems
2922 have larger values. We put the guest between the end of data
2923 segment (system break) and this value. We use 32GB as a base to
2924 have enough room for the system break to grow. */
2925 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002926 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002927 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002928 if (new_block->host == MAP_FAILED) {
2929 fprintf(stderr, "Allocating RAM failed\n");
2930 abort();
2931 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002932#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002933 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002934 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002935 } else {
2936 new_block->host = qemu_vmalloc(size);
2937 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002938#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002939 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002940 }
2941 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002942 new_block->length = size;
2943
2944 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2945
Anthony Liguori7267c092011-08-20 22:09:37 -05002946 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002947 last_ram_offset() >> TARGET_PAGE_BITS);
2948 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2949 0xff, size >> TARGET_PAGE_BITS);
2950
2951 if (kvm_enabled())
2952 kvm_setup_guest_memory(new_block->host, size);
2953
2954 return new_block->offset;
2955}
2956
Avi Kivityc5705a72011-12-20 15:59:12 +02002957ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002958{
Avi Kivityc5705a72011-12-20 15:59:12 +02002959 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002960}
bellarde9a1ab12007-02-08 23:08:38 +00002961
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002962void qemu_ram_free_from_ptr(ram_addr_t addr)
2963{
2964 RAMBlock *block;
2965
2966 QLIST_FOREACH(block, &ram_list.blocks, next) {
2967 if (addr == block->offset) {
2968 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002969 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002970 return;
2971 }
2972 }
2973}
2974
Anthony Liguoric227f092009-10-01 16:12:16 -05002975void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002976{
Alex Williamson04b16652010-07-02 11:13:17 -06002977 RAMBlock *block;
2978
2979 QLIST_FOREACH(block, &ram_list.blocks, next) {
2980 if (addr == block->offset) {
2981 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002982 if (block->flags & RAM_PREALLOC_MASK) {
2983 ;
2984 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002985#if defined (__linux__) && !defined(TARGET_S390X)
2986 if (block->fd) {
2987 munmap(block->host, block->length);
2988 close(block->fd);
2989 } else {
2990 qemu_vfree(block->host);
2991 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002992#else
2993 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002994#endif
2995 } else {
2996#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2997 munmap(block->host, block->length);
2998#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002999 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003000 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01003001 } else {
3002 qemu_vfree(block->host);
3003 }
Alex Williamson04b16652010-07-02 11:13:17 -06003004#endif
3005 }
Anthony Liguori7267c092011-08-20 22:09:37 -05003006 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06003007 return;
3008 }
3009 }
3010
bellarde9a1ab12007-02-08 23:08:38 +00003011}
3012
Huang Yingcd19cfa2011-03-02 08:56:19 +01003013#ifndef _WIN32
3014void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3015{
3016 RAMBlock *block;
3017 ram_addr_t offset;
3018 int flags;
3019 void *area, *vaddr;
3020
3021 QLIST_FOREACH(block, &ram_list.blocks, next) {
3022 offset = addr - block->offset;
3023 if (offset < block->length) {
3024 vaddr = block->host + offset;
3025 if (block->flags & RAM_PREALLOC_MASK) {
3026 ;
3027 } else {
3028 flags = MAP_FIXED;
3029 munmap(vaddr, length);
3030 if (mem_path) {
3031#if defined(__linux__) && !defined(TARGET_S390X)
3032 if (block->fd) {
3033#ifdef MAP_POPULATE
3034 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3035 MAP_PRIVATE;
3036#else
3037 flags |= MAP_PRIVATE;
3038#endif
3039 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3040 flags, block->fd, offset);
3041 } else {
3042 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3043 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3044 flags, -1, 0);
3045 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003046#else
3047 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003048#endif
3049 } else {
3050#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3051 flags |= MAP_SHARED | MAP_ANONYMOUS;
3052 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3053 flags, -1, 0);
3054#else
3055 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3056 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3057 flags, -1, 0);
3058#endif
3059 }
3060 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003061 fprintf(stderr, "Could not remap addr: "
3062 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003063 length, addr);
3064 exit(1);
3065 }
3066 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3067 }
3068 return;
3069 }
3070 }
3071}
3072#endif /* !_WIN32 */
3073
pbrookdc828ca2009-04-09 22:21:07 +00003074/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003075 With the exception of the softmmu code in this file, this should
3076 only be used for local memory (e.g. video ram) that the device owns,
3077 and knows it isn't going to access beyond the end of the block.
3078
3079 It should not be used for general purpose DMA.
3080 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3081 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003082void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003083{
pbrook94a6b542009-04-11 17:15:54 +00003084 RAMBlock *block;
3085
Alex Williamsonf471a172010-06-11 11:11:42 -06003086 QLIST_FOREACH(block, &ram_list.blocks, next) {
3087 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003088 /* Move this entry to to start of the list. */
3089 if (block != QLIST_FIRST(&ram_list.blocks)) {
3090 QLIST_REMOVE(block, next);
3091 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3092 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003093 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003094 /* We need to check if the requested address is in the RAM
3095 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003096 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003097 */
3098 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003099 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003100 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003101 block->host =
3102 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003103 }
3104 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003105 return block->host + (addr - block->offset);
3106 }
pbrook94a6b542009-04-11 17:15:54 +00003107 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003108
3109 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3110 abort();
3111
3112 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003113}
3114
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003115/* Return a host pointer to ram allocated with qemu_ram_alloc.
3116 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3117 */
3118void *qemu_safe_ram_ptr(ram_addr_t addr)
3119{
3120 RAMBlock *block;
3121
3122 QLIST_FOREACH(block, &ram_list.blocks, next) {
3123 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003124 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003125 /* We need to check if the requested address is in the RAM
3126 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003127 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003128 */
3129 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003130 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003131 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003132 block->host =
3133 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003134 }
3135 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003136 return block->host + (addr - block->offset);
3137 }
3138 }
3139
3140 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3141 abort();
3142
3143 return NULL;
3144}
3145
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003146/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3147 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003148void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003149{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003150 if (*size == 0) {
3151 return NULL;
3152 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003153 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003154 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003155 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003156 RAMBlock *block;
3157
3158 QLIST_FOREACH(block, &ram_list.blocks, next) {
3159 if (addr - block->offset < block->length) {
3160 if (addr - block->offset + *size > block->length)
3161 *size = block->length - addr + block->offset;
3162 return block->host + (addr - block->offset);
3163 }
3164 }
3165
3166 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3167 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003168 }
3169}
3170
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003171void qemu_put_ram_ptr(void *addr)
3172{
3173 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003174}
3175
Marcelo Tosattie8902612010-10-11 15:31:19 -03003176int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003177{
pbrook94a6b542009-04-11 17:15:54 +00003178 RAMBlock *block;
3179 uint8_t *host = ptr;
3180
Jan Kiszka868bb332011-06-21 22:59:09 +02003181 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003182 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003183 return 0;
3184 }
3185
Alex Williamsonf471a172010-06-11 11:11:42 -06003186 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003187 /* This case append when the block is not mapped. */
3188 if (block->host == NULL) {
3189 continue;
3190 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003191 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003192 *ram_addr = block->offset + (host - block->host);
3193 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003194 }
pbrook94a6b542009-04-11 17:15:54 +00003195 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003196
Marcelo Tosattie8902612010-10-11 15:31:19 -03003197 return -1;
3198}
Alex Williamsonf471a172010-06-11 11:11:42 -06003199
Marcelo Tosattie8902612010-10-11 15:31:19 -03003200/* Some of the softmmu routines need to translate from a host pointer
3201 (typically a TLB entry) back to a ram offset. */
3202ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3203{
3204 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003205
Marcelo Tosattie8902612010-10-11 15:31:19 -03003206 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3207 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3208 abort();
3209 }
3210 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003211}
3212
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003213static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3214 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003215{
pbrook67d3b952006-12-18 05:03:52 +00003216#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003217 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003218#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003219#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003220 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003221#endif
3222 return 0;
3223}
3224
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003225static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3226 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003227{
3228#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003229 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003230#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003231#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003232 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003233#endif
3234}
3235
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003236static const MemoryRegionOps unassigned_mem_ops = {
3237 .read = unassigned_mem_read,
3238 .write = unassigned_mem_write,
3239 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003240};
3241
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003242static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3243 unsigned size)
3244{
3245 abort();
3246}
3247
3248static void error_mem_write(void *opaque, target_phys_addr_t addr,
3249 uint64_t value, unsigned size)
3250{
3251 abort();
3252}
3253
3254static const MemoryRegionOps error_mem_ops = {
3255 .read = error_mem_read,
3256 .write = error_mem_write,
3257 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003258};
3259
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003260static const MemoryRegionOps rom_mem_ops = {
3261 .read = error_mem_read,
3262 .write = unassigned_mem_write,
3263 .endianness = DEVICE_NATIVE_ENDIAN,
3264};
3265
3266static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3267 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003268{
bellard3a7d9292005-08-21 09:26:42 +00003269 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003270 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003271 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3272#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003273 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003274 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003275#endif
3276 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003277 switch (size) {
3278 case 1:
3279 stb_p(qemu_get_ram_ptr(ram_addr), val);
3280 break;
3281 case 2:
3282 stw_p(qemu_get_ram_ptr(ram_addr), val);
3283 break;
3284 case 4:
3285 stl_p(qemu_get_ram_ptr(ram_addr), val);
3286 break;
3287 default:
3288 abort();
3289 }
bellardf23db162005-08-21 19:12:28 +00003290 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003291 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003292 /* we remove the notdirty callback only if the code has been
3293 flushed */
3294 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003295 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003296}
3297
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003298static const MemoryRegionOps notdirty_mem_ops = {
3299 .read = error_mem_read,
3300 .write = notdirty_mem_write,
3301 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003302};
3303
pbrook0f459d12008-06-09 00:20:13 +00003304/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003305static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003306{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003307 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003308 target_ulong pc, cs_base;
3309 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003310 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003311 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003312 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003313
aliguori06d55cc2008-11-18 20:24:06 +00003314 if (env->watchpoint_hit) {
3315 /* We re-entered the check after replacing the TB. Now raise
3316 * the debug interrupt so that is will trigger after the
3317 * current instruction. */
3318 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3319 return;
3320 }
pbrook2e70f6e2008-06-29 01:03:05 +00003321 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003322 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003323 if ((vaddr == (wp->vaddr & len_mask) ||
3324 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003325 wp->flags |= BP_WATCHPOINT_HIT;
3326 if (!env->watchpoint_hit) {
3327 env->watchpoint_hit = wp;
3328 tb = tb_find_pc(env->mem_io_pc);
3329 if (!tb) {
3330 cpu_abort(env, "check_watchpoint: could not find TB for "
3331 "pc=%p", (void *)env->mem_io_pc);
3332 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003333 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003334 tb_phys_invalidate(tb, -1);
3335 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3336 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04003337 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00003338 } else {
3339 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3340 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04003341 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00003342 }
aliguori06d55cc2008-11-18 20:24:06 +00003343 }
aliguori6e140f22008-11-18 20:37:55 +00003344 } else {
3345 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003346 }
3347 }
3348}
3349
pbrook6658ffb2007-03-16 23:58:11 +00003350/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3351 so these check for a hit then pass through to the normal out-of-line
3352 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003353static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3354 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003355{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003356 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3357 switch (size) {
3358 case 1: return ldub_phys(addr);
3359 case 2: return lduw_phys(addr);
3360 case 4: return ldl_phys(addr);
3361 default: abort();
3362 }
pbrook6658ffb2007-03-16 23:58:11 +00003363}
3364
Avi Kivity1ec9b902012-01-02 12:47:48 +02003365static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3366 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003367{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003368 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3369 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003370 case 1:
3371 stb_phys(addr, val);
3372 break;
3373 case 2:
3374 stw_phys(addr, val);
3375 break;
3376 case 4:
3377 stl_phys(addr, val);
3378 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003379 default: abort();
3380 }
pbrook6658ffb2007-03-16 23:58:11 +00003381}
3382
Avi Kivity1ec9b902012-01-02 12:47:48 +02003383static const MemoryRegionOps watch_mem_ops = {
3384 .read = watch_mem_read,
3385 .write = watch_mem_write,
3386 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003387};
pbrook6658ffb2007-03-16 23:58:11 +00003388
Avi Kivity70c68e42012-01-02 12:32:48 +02003389static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3390 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003391{
Avi Kivity70c68e42012-01-02 12:32:48 +02003392 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003393 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003394 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003395#if defined(DEBUG_SUBPAGE)
3396 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3397 mmio, len, addr, idx);
3398#endif
blueswir1db7b5422007-05-26 17:36:03 +00003399
Avi Kivity5312bd82012-02-12 18:32:55 +02003400 section = &phys_sections[mmio->sub_section[idx]];
3401 addr += mmio->base;
3402 addr -= section->offset_within_address_space;
3403 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003404 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003405}
3406
Avi Kivity70c68e42012-01-02 12:32:48 +02003407static void subpage_write(void *opaque, target_phys_addr_t addr,
3408 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003409{
Avi Kivity70c68e42012-01-02 12:32:48 +02003410 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003411 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003412 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003413#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003414 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3415 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003416 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003417#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003418
Avi Kivity5312bd82012-02-12 18:32:55 +02003419 section = &phys_sections[mmio->sub_section[idx]];
3420 addr += mmio->base;
3421 addr -= section->offset_within_address_space;
3422 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003423 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003424}
3425
Avi Kivity70c68e42012-01-02 12:32:48 +02003426static const MemoryRegionOps subpage_ops = {
3427 .read = subpage_read,
3428 .write = subpage_write,
3429 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003430};
3431
Avi Kivityde712f92012-01-02 12:41:07 +02003432static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3433 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003434{
3435 ram_addr_t raddr = addr;
3436 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003437 switch (size) {
3438 case 1: return ldub_p(ptr);
3439 case 2: return lduw_p(ptr);
3440 case 4: return ldl_p(ptr);
3441 default: abort();
3442 }
Andreas Färber56384e82011-11-30 16:26:21 +01003443}
3444
Avi Kivityde712f92012-01-02 12:41:07 +02003445static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3446 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003447{
3448 ram_addr_t raddr = addr;
3449 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003450 switch (size) {
3451 case 1: return stb_p(ptr, value);
3452 case 2: return stw_p(ptr, value);
3453 case 4: return stl_p(ptr, value);
3454 default: abort();
3455 }
Andreas Färber56384e82011-11-30 16:26:21 +01003456}
3457
Avi Kivityde712f92012-01-02 12:41:07 +02003458static const MemoryRegionOps subpage_ram_ops = {
3459 .read = subpage_ram_read,
3460 .write = subpage_ram_write,
3461 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003462};
3463
Anthony Liguoric227f092009-10-01 16:12:16 -05003464static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003465 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003466{
3467 int idx, eidx;
3468
3469 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3470 return -1;
3471 idx = SUBPAGE_IDX(start);
3472 eidx = SUBPAGE_IDX(end);
3473#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003474 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003475 mmio, start, end, idx, eidx, memory);
3476#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003477 if (memory_region_is_ram(phys_sections[section].mr)) {
3478 MemoryRegionSection new_section = phys_sections[section];
3479 new_section.mr = &io_mem_subpage_ram;
3480 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003481 }
blueswir1db7b5422007-05-26 17:36:03 +00003482 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003483 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003484 }
3485
3486 return 0;
3487}
3488
Avi Kivity0f0cb162012-02-13 17:14:32 +02003489static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003490{
Anthony Liguoric227f092009-10-01 16:12:16 -05003491 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003492
Anthony Liguori7267c092011-08-20 22:09:37 -05003493 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003494
3495 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003496 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3497 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003498 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003499#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003500 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3501 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003502#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003503 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003504
3505 return mmio;
3506}
3507
Avi Kivity5312bd82012-02-12 18:32:55 +02003508static uint16_t dummy_section(MemoryRegion *mr)
3509{
3510 MemoryRegionSection section = {
3511 .mr = mr,
3512 .offset_within_address_space = 0,
3513 .offset_within_region = 0,
3514 .size = UINT64_MAX,
3515 };
3516
3517 return phys_section_add(&section);
3518}
3519
Avi Kivity37ec01d2012-03-08 18:08:35 +02003520MemoryRegion *iotlb_to_region(target_phys_addr_t index)
Avi Kivityaa102232012-03-08 17:06:55 +02003521{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003522 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003523}
3524
Avi Kivitye9179ce2009-06-14 11:38:52 +03003525static void io_mem_init(void)
3526{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003527 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003528 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3529 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3530 "unassigned", UINT64_MAX);
3531 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3532 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003533 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3534 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003535 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3536 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003537}
3538
Avi Kivity50c1e142012-02-08 21:36:02 +02003539static void core_begin(MemoryListener *listener)
3540{
Avi Kivity54688b12012-02-09 17:34:32 +02003541 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003542 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003543 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003544 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003545 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3546 phys_section_rom = dummy_section(&io_mem_rom);
3547 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003548}
3549
3550static void core_commit(MemoryListener *listener)
3551{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003552 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003553
3554 /* since each CPU stores ram addresses in its TLB cache, we must
3555 reset the modified entries */
3556 /* XXX: slow ! */
3557 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3558 tlb_flush(env, 1);
3559 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003560}
3561
Avi Kivity93632742012-02-08 16:54:16 +02003562static void core_region_add(MemoryListener *listener,
3563 MemoryRegionSection *section)
3564{
Avi Kivity4855d412012-02-08 21:16:05 +02003565 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003566}
3567
3568static void core_region_del(MemoryListener *listener,
3569 MemoryRegionSection *section)
3570{
Avi Kivity93632742012-02-08 16:54:16 +02003571}
3572
Avi Kivity50c1e142012-02-08 21:36:02 +02003573static void core_region_nop(MemoryListener *listener,
3574 MemoryRegionSection *section)
3575{
Avi Kivity54688b12012-02-09 17:34:32 +02003576 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003577}
3578
Avi Kivity93632742012-02-08 16:54:16 +02003579static void core_log_start(MemoryListener *listener,
3580 MemoryRegionSection *section)
3581{
3582}
3583
3584static void core_log_stop(MemoryListener *listener,
3585 MemoryRegionSection *section)
3586{
3587}
3588
3589static void core_log_sync(MemoryListener *listener,
3590 MemoryRegionSection *section)
3591{
3592}
3593
3594static void core_log_global_start(MemoryListener *listener)
3595{
3596 cpu_physical_memory_set_dirty_tracking(1);
3597}
3598
3599static void core_log_global_stop(MemoryListener *listener)
3600{
3601 cpu_physical_memory_set_dirty_tracking(0);
3602}
3603
3604static void core_eventfd_add(MemoryListener *listener,
3605 MemoryRegionSection *section,
3606 bool match_data, uint64_t data, int fd)
3607{
3608}
3609
3610static void core_eventfd_del(MemoryListener *listener,
3611 MemoryRegionSection *section,
3612 bool match_data, uint64_t data, int fd)
3613{
3614}
3615
Avi Kivity50c1e142012-02-08 21:36:02 +02003616static void io_begin(MemoryListener *listener)
3617{
3618}
3619
3620static void io_commit(MemoryListener *listener)
3621{
3622}
3623
Avi Kivity4855d412012-02-08 21:16:05 +02003624static void io_region_add(MemoryListener *listener,
3625 MemoryRegionSection *section)
3626{
Avi Kivitya2d33522012-03-05 17:40:12 +02003627 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3628
3629 mrio->mr = section->mr;
3630 mrio->offset = section->offset_within_region;
3631 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003632 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003633 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003634}
3635
3636static void io_region_del(MemoryListener *listener,
3637 MemoryRegionSection *section)
3638{
3639 isa_unassign_ioport(section->offset_within_address_space, section->size);
3640}
3641
Avi Kivity50c1e142012-02-08 21:36:02 +02003642static void io_region_nop(MemoryListener *listener,
3643 MemoryRegionSection *section)
3644{
3645}
3646
Avi Kivity4855d412012-02-08 21:16:05 +02003647static void io_log_start(MemoryListener *listener,
3648 MemoryRegionSection *section)
3649{
3650}
3651
3652static void io_log_stop(MemoryListener *listener,
3653 MemoryRegionSection *section)
3654{
3655}
3656
3657static void io_log_sync(MemoryListener *listener,
3658 MemoryRegionSection *section)
3659{
3660}
3661
3662static void io_log_global_start(MemoryListener *listener)
3663{
3664}
3665
3666static void io_log_global_stop(MemoryListener *listener)
3667{
3668}
3669
3670static void io_eventfd_add(MemoryListener *listener,
3671 MemoryRegionSection *section,
3672 bool match_data, uint64_t data, int fd)
3673{
3674}
3675
3676static void io_eventfd_del(MemoryListener *listener,
3677 MemoryRegionSection *section,
3678 bool match_data, uint64_t data, int fd)
3679{
3680}
3681
Avi Kivity93632742012-02-08 16:54:16 +02003682static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003683 .begin = core_begin,
3684 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003685 .region_add = core_region_add,
3686 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003687 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003688 .log_start = core_log_start,
3689 .log_stop = core_log_stop,
3690 .log_sync = core_log_sync,
3691 .log_global_start = core_log_global_start,
3692 .log_global_stop = core_log_global_stop,
3693 .eventfd_add = core_eventfd_add,
3694 .eventfd_del = core_eventfd_del,
3695 .priority = 0,
3696};
3697
Avi Kivity4855d412012-02-08 21:16:05 +02003698static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003699 .begin = io_begin,
3700 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003701 .region_add = io_region_add,
3702 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003703 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003704 .log_start = io_log_start,
3705 .log_stop = io_log_stop,
3706 .log_sync = io_log_sync,
3707 .log_global_start = io_log_global_start,
3708 .log_global_stop = io_log_global_stop,
3709 .eventfd_add = io_eventfd_add,
3710 .eventfd_del = io_eventfd_del,
3711 .priority = 0,
3712};
3713
Avi Kivity62152b82011-07-26 14:26:14 +03003714static void memory_map_init(void)
3715{
Anthony Liguori7267c092011-08-20 22:09:37 -05003716 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003717 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003718 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003719
Anthony Liguori7267c092011-08-20 22:09:37 -05003720 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003721 memory_region_init(system_io, "io", 65536);
3722 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003723
Avi Kivity4855d412012-02-08 21:16:05 +02003724 memory_listener_register(&core_memory_listener, system_memory);
3725 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003726}
3727
3728MemoryRegion *get_system_memory(void)
3729{
3730 return system_memory;
3731}
3732
Avi Kivity309cb472011-08-08 16:09:03 +03003733MemoryRegion *get_system_io(void)
3734{
3735 return system_io;
3736}
3737
pbrooke2eef172008-06-08 01:09:01 +00003738#endif /* !defined(CONFIG_USER_ONLY) */
3739
bellard13eb76e2004-01-24 15:23:36 +00003740/* physical memory access (slow version, mainly for debug) */
3741#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003742int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003743 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003744{
3745 int l, flags;
3746 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003747 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003748
3749 while (len > 0) {
3750 page = addr & TARGET_PAGE_MASK;
3751 l = (page + TARGET_PAGE_SIZE) - addr;
3752 if (l > len)
3753 l = len;
3754 flags = page_get_flags(page);
3755 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003756 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003757 if (is_write) {
3758 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003759 return -1;
bellard579a97f2007-11-11 14:26:47 +00003760 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003761 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003762 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003763 memcpy(p, buf, l);
3764 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003765 } else {
3766 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003767 return -1;
bellard579a97f2007-11-11 14:26:47 +00003768 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003769 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003770 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003771 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003772 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003773 }
3774 len -= l;
3775 buf += l;
3776 addr += l;
3777 }
Paul Brooka68fe892010-03-01 00:08:59 +00003778 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003779}
bellard8df1cd02005-01-28 22:37:22 +00003780
bellard13eb76e2004-01-24 15:23:36 +00003781#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003782void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003783 int len, int is_write)
3784{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003785 int l;
bellard13eb76e2004-01-24 15:23:36 +00003786 uint8_t *ptr;
3787 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003788 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003789 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003790
bellard13eb76e2004-01-24 15:23:36 +00003791 while (len > 0) {
3792 page = addr & TARGET_PAGE_MASK;
3793 l = (page + TARGET_PAGE_SIZE) - addr;
3794 if (l > len)
3795 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003796 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003797
bellard13eb76e2004-01-24 15:23:36 +00003798 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003799 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003800 target_phys_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003801 addr1 = section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003802 /* XXX: could force cpu_single_env to NULL to avoid
3803 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003804 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003805 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003806 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003807 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003808 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003809 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003810 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003811 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003812 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003813 l = 2;
3814 } else {
bellard1c213d12005-09-03 10:49:04 +00003815 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003816 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003817 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003818 l = 1;
3819 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003820 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003821 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003822 addr1 = memory_region_get_ram_addr(section->mr)
3823 + section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003824 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003825 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003826 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003827 if (!cpu_physical_memory_is_dirty(addr1)) {
3828 /* invalidate code */
3829 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3830 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003831 cpu_physical_memory_set_dirty_flags(
3832 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003833 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003834 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003835 }
3836 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003837 if (!is_ram_rom_romd(section)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003838 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003839 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003840 addr1 = section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003841 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003842 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003843 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003844 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003845 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003846 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003847 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003848 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003849 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003850 l = 2;
3851 } else {
bellard1c213d12005-09-03 10:49:04 +00003852 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003853 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003854 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003855 l = 1;
3856 }
3857 } else {
3858 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003859 ptr = qemu_get_ram_ptr(section->mr->ram_addr)
3860 + section_addr(section, addr);
3861 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003862 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003863 }
3864 }
3865 len -= l;
3866 buf += l;
3867 addr += l;
3868 }
3869}
bellard8df1cd02005-01-28 22:37:22 +00003870
bellardd0ecd2a2006-04-23 17:14:48 +00003871/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003872void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003873 const uint8_t *buf, int len)
3874{
3875 int l;
3876 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003877 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003878 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003879
bellardd0ecd2a2006-04-23 17:14:48 +00003880 while (len > 0) {
3881 page = addr & TARGET_PAGE_MASK;
3882 l = (page + TARGET_PAGE_SIZE) - addr;
3883 if (l > len)
3884 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003885 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003886
Avi Kivityf3705d52012-03-08 16:16:34 +02003887 if (!is_ram_rom_romd(section)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003888 /* do nothing */
3889 } else {
3890 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003891 addr1 = memory_region_get_ram_addr(section->mr)
3892 + section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003893 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003894 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003895 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003896 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003897 }
3898 len -= l;
3899 buf += l;
3900 addr += l;
3901 }
3902}
3903
aliguori6d16c2f2009-01-22 16:59:11 +00003904typedef struct {
3905 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003906 target_phys_addr_t addr;
3907 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003908} BounceBuffer;
3909
3910static BounceBuffer bounce;
3911
aliguoriba223c22009-01-22 16:59:16 +00003912typedef struct MapClient {
3913 void *opaque;
3914 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003915 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003916} MapClient;
3917
Blue Swirl72cf2d42009-09-12 07:36:22 +00003918static QLIST_HEAD(map_client_list, MapClient) map_client_list
3919 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003920
3921void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3922{
Anthony Liguori7267c092011-08-20 22:09:37 -05003923 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003924
3925 client->opaque = opaque;
3926 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003927 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003928 return client;
3929}
3930
3931void cpu_unregister_map_client(void *_client)
3932{
3933 MapClient *client = (MapClient *)_client;
3934
Blue Swirl72cf2d42009-09-12 07:36:22 +00003935 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003936 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003937}
3938
3939static void cpu_notify_map_clients(void)
3940{
3941 MapClient *client;
3942
Blue Swirl72cf2d42009-09-12 07:36:22 +00003943 while (!QLIST_EMPTY(&map_client_list)) {
3944 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003945 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003946 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003947 }
3948}
3949
aliguori6d16c2f2009-01-22 16:59:11 +00003950/* Map a physical memory region into a host virtual address.
3951 * May map a subset of the requested range, given by and returned in *plen.
3952 * May return NULL if resources needed to perform the mapping are exhausted.
3953 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003954 * Use cpu_register_map_client() to know when retrying the map operation is
3955 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003956 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003957void *cpu_physical_memory_map(target_phys_addr_t addr,
3958 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003959 int is_write)
3960{
Anthony Liguoric227f092009-10-01 16:12:16 -05003961 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003962 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003963 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003964 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003965 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003966 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003967 ram_addr_t rlen;
3968 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003969
3970 while (len > 0) {
3971 page = addr & TARGET_PAGE_MASK;
3972 l = (page + TARGET_PAGE_SIZE) - addr;
3973 if (l > len)
3974 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003975 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003976
Avi Kivityf3705d52012-03-08 16:16:34 +02003977 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003978 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003979 break;
3980 }
3981 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3982 bounce.addr = addr;
3983 bounce.len = l;
3984 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003985 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003986 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003987
3988 *plen = l;
3989 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003990 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003991 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003992 raddr = memory_region_get_ram_addr(section->mr)
3993 + section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003994 }
aliguori6d16c2f2009-01-22 16:59:11 +00003995
3996 len -= l;
3997 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003998 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003999 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004000 rlen = todo;
4001 ret = qemu_ram_ptr_length(raddr, &rlen);
4002 *plen = rlen;
4003 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004004}
4005
4006/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4007 * Will also mark the memory as dirty if is_write == 1. access_len gives
4008 * the amount of memory that was actually read or written by the caller.
4009 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004010void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4011 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004012{
4013 if (buffer != bounce.buffer) {
4014 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004015 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004016 while (access_len) {
4017 unsigned l;
4018 l = TARGET_PAGE_SIZE;
4019 if (l > access_len)
4020 l = access_len;
4021 if (!cpu_physical_memory_is_dirty(addr1)) {
4022 /* invalidate code */
4023 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4024 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004025 cpu_physical_memory_set_dirty_flags(
4026 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004027 }
4028 addr1 += l;
4029 access_len -= l;
4030 }
4031 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004032 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004033 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004034 }
aliguori6d16c2f2009-01-22 16:59:11 +00004035 return;
4036 }
4037 if (is_write) {
4038 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4039 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004040 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004041 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004042 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004043}
bellardd0ecd2a2006-04-23 17:14:48 +00004044
bellard8df1cd02005-01-28 22:37:22 +00004045/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004046static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4047 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004048{
bellard8df1cd02005-01-28 22:37:22 +00004049 uint8_t *ptr;
4050 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02004051 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004052
Avi Kivity06ef3522012-02-13 16:11:22 +02004053 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004054
Avi Kivityf3705d52012-03-08 16:16:34 +02004055 if (!is_ram_rom_romd(section)) {
bellard8df1cd02005-01-28 22:37:22 +00004056 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004057 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004058 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004059#if defined(TARGET_WORDS_BIGENDIAN)
4060 if (endian == DEVICE_LITTLE_ENDIAN) {
4061 val = bswap32(val);
4062 }
4063#else
4064 if (endian == DEVICE_BIG_ENDIAN) {
4065 val = bswap32(val);
4066 }
4067#endif
bellard8df1cd02005-01-28 22:37:22 +00004068 } else {
4069 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004070 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004071 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004072 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004073 switch (endian) {
4074 case DEVICE_LITTLE_ENDIAN:
4075 val = ldl_le_p(ptr);
4076 break;
4077 case DEVICE_BIG_ENDIAN:
4078 val = ldl_be_p(ptr);
4079 break;
4080 default:
4081 val = ldl_p(ptr);
4082 break;
4083 }
bellard8df1cd02005-01-28 22:37:22 +00004084 }
4085 return val;
4086}
4087
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004088uint32_t ldl_phys(target_phys_addr_t addr)
4089{
4090 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4091}
4092
4093uint32_t ldl_le_phys(target_phys_addr_t addr)
4094{
4095 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4096}
4097
4098uint32_t ldl_be_phys(target_phys_addr_t addr)
4099{
4100 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4101}
4102
bellard84b7b8e2005-11-28 21:19:04 +00004103/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004104static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4105 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004106{
bellard84b7b8e2005-11-28 21:19:04 +00004107 uint8_t *ptr;
4108 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02004109 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00004110
Avi Kivity06ef3522012-02-13 16:11:22 +02004111 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004112
Avi Kivityf3705d52012-03-08 16:16:34 +02004113 if (!is_ram_rom_romd(section)) {
bellard84b7b8e2005-11-28 21:19:04 +00004114 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004115 addr = section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004116
4117 /* XXX This is broken when device endian != cpu endian.
4118 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004119#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02004120 val = io_mem_read(section->mr, addr, 4) << 32;
4121 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004122#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02004123 val = io_mem_read(section->mr, addr, 4);
4124 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004125#endif
4126 } else {
4127 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004128 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004129 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004130 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004131 switch (endian) {
4132 case DEVICE_LITTLE_ENDIAN:
4133 val = ldq_le_p(ptr);
4134 break;
4135 case DEVICE_BIG_ENDIAN:
4136 val = ldq_be_p(ptr);
4137 break;
4138 default:
4139 val = ldq_p(ptr);
4140 break;
4141 }
bellard84b7b8e2005-11-28 21:19:04 +00004142 }
4143 return val;
4144}
4145
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004146uint64_t ldq_phys(target_phys_addr_t addr)
4147{
4148 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4149}
4150
4151uint64_t ldq_le_phys(target_phys_addr_t addr)
4152{
4153 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4154}
4155
4156uint64_t ldq_be_phys(target_phys_addr_t addr)
4157{
4158 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4159}
4160
bellardaab33092005-10-30 20:48:42 +00004161/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004162uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004163{
4164 uint8_t val;
4165 cpu_physical_memory_read(addr, &val, 1);
4166 return val;
4167}
4168
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004169/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004170static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4171 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004172{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004173 uint8_t *ptr;
4174 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02004175 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004176
Avi Kivity06ef3522012-02-13 16:11:22 +02004177 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004178
Avi Kivityf3705d52012-03-08 16:16:34 +02004179 if (!is_ram_rom_romd(section)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004180 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004181 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004182 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004183#if defined(TARGET_WORDS_BIGENDIAN)
4184 if (endian == DEVICE_LITTLE_ENDIAN) {
4185 val = bswap16(val);
4186 }
4187#else
4188 if (endian == DEVICE_BIG_ENDIAN) {
4189 val = bswap16(val);
4190 }
4191#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004192 } else {
4193 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004194 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004195 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004196 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004197 switch (endian) {
4198 case DEVICE_LITTLE_ENDIAN:
4199 val = lduw_le_p(ptr);
4200 break;
4201 case DEVICE_BIG_ENDIAN:
4202 val = lduw_be_p(ptr);
4203 break;
4204 default:
4205 val = lduw_p(ptr);
4206 break;
4207 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004208 }
4209 return val;
bellardaab33092005-10-30 20:48:42 +00004210}
4211
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004212uint32_t lduw_phys(target_phys_addr_t addr)
4213{
4214 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4215}
4216
4217uint32_t lduw_le_phys(target_phys_addr_t addr)
4218{
4219 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4220}
4221
4222uint32_t lduw_be_phys(target_phys_addr_t addr)
4223{
4224 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4225}
4226
bellard8df1cd02005-01-28 22:37:22 +00004227/* warning: addr must be aligned. The ram page is not masked as dirty
4228 and the code inside is not invalidated. It is useful if the dirty
4229 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004230void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004231{
bellard8df1cd02005-01-28 22:37:22 +00004232 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004233 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004234
Avi Kivity06ef3522012-02-13 16:11:22 +02004235 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004236
Avi Kivityf3705d52012-03-08 16:16:34 +02004237 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004238 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004239 if (memory_region_is_ram(section->mr)) {
4240 section = &phys_sections[phys_section_rom];
4241 }
4242 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004243 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004244 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004245 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004246 + section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00004247 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004248 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004249
4250 if (unlikely(in_migration)) {
4251 if (!cpu_physical_memory_is_dirty(addr1)) {
4252 /* invalidate code */
4253 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4254 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004255 cpu_physical_memory_set_dirty_flags(
4256 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004257 }
4258 }
bellard8df1cd02005-01-28 22:37:22 +00004259 }
4260}
4261
Anthony Liguoric227f092009-10-01 16:12:16 -05004262void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004263{
j_mayerbc98a7e2007-04-04 07:55:12 +00004264 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004265 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00004266
Avi Kivity06ef3522012-02-13 16:11:22 +02004267 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004268
Avi Kivityf3705d52012-03-08 16:16:34 +02004269 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004270 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004271 if (memory_region_is_ram(section->mr)) {
4272 section = &phys_sections[phys_section_rom];
4273 }
j_mayerbc98a7e2007-04-04 07:55:12 +00004274#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02004275 io_mem_write(section->mr, addr, val >> 32, 4);
4276 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004277#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02004278 io_mem_write(section->mr, addr, (uint32_t)val, 4);
4279 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004280#endif
4281 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004282 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004283 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004284 + section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00004285 stq_p(ptr, val);
4286 }
4287}
4288
bellard8df1cd02005-01-28 22:37:22 +00004289/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004290static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4291 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004292{
bellard8df1cd02005-01-28 22:37:22 +00004293 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004294 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004295
Avi Kivity06ef3522012-02-13 16:11:22 +02004296 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004297
Avi Kivityf3705d52012-03-08 16:16:34 +02004298 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004299 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004300 if (memory_region_is_ram(section->mr)) {
4301 section = &phys_sections[phys_section_rom];
4302 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004303#if defined(TARGET_WORDS_BIGENDIAN)
4304 if (endian == DEVICE_LITTLE_ENDIAN) {
4305 val = bswap32(val);
4306 }
4307#else
4308 if (endian == DEVICE_BIG_ENDIAN) {
4309 val = bswap32(val);
4310 }
4311#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004312 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004313 } else {
4314 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004315 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4316 + section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00004317 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004318 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004319 switch (endian) {
4320 case DEVICE_LITTLE_ENDIAN:
4321 stl_le_p(ptr, val);
4322 break;
4323 case DEVICE_BIG_ENDIAN:
4324 stl_be_p(ptr, val);
4325 break;
4326 default:
4327 stl_p(ptr, val);
4328 break;
4329 }
bellard3a7d9292005-08-21 09:26:42 +00004330 if (!cpu_physical_memory_is_dirty(addr1)) {
4331 /* invalidate code */
4332 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4333 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004334 cpu_physical_memory_set_dirty_flags(addr1,
4335 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004336 }
bellard8df1cd02005-01-28 22:37:22 +00004337 }
4338}
4339
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004340void stl_phys(target_phys_addr_t addr, uint32_t val)
4341{
4342 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4343}
4344
4345void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4346{
4347 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4348}
4349
4350void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4351{
4352 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4353}
4354
bellardaab33092005-10-30 20:48:42 +00004355/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004356void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004357{
4358 uint8_t v = val;
4359 cpu_physical_memory_write(addr, &v, 1);
4360}
4361
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004362/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004363static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4364 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004365{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004366 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004367 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004368
Avi Kivity06ef3522012-02-13 16:11:22 +02004369 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004370
Avi Kivityf3705d52012-03-08 16:16:34 +02004371 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004372 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004373 if (memory_region_is_ram(section->mr)) {
4374 section = &phys_sections[phys_section_rom];
4375 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004376#if defined(TARGET_WORDS_BIGENDIAN)
4377 if (endian == DEVICE_LITTLE_ENDIAN) {
4378 val = bswap16(val);
4379 }
4380#else
4381 if (endian == DEVICE_BIG_ENDIAN) {
4382 val = bswap16(val);
4383 }
4384#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004385 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004386 } else {
4387 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004388 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4389 + section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004390 /* RAM case */
4391 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004392 switch (endian) {
4393 case DEVICE_LITTLE_ENDIAN:
4394 stw_le_p(ptr, val);
4395 break;
4396 case DEVICE_BIG_ENDIAN:
4397 stw_be_p(ptr, val);
4398 break;
4399 default:
4400 stw_p(ptr, val);
4401 break;
4402 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004403 if (!cpu_physical_memory_is_dirty(addr1)) {
4404 /* invalidate code */
4405 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4406 /* set dirty bit */
4407 cpu_physical_memory_set_dirty_flags(addr1,
4408 (0xff & ~CODE_DIRTY_FLAG));
4409 }
4410 }
bellardaab33092005-10-30 20:48:42 +00004411}
4412
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004413void stw_phys(target_phys_addr_t addr, uint32_t val)
4414{
4415 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4416}
4417
4418void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4419{
4420 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4421}
4422
4423void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4424{
4425 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4426}
4427
bellardaab33092005-10-30 20:48:42 +00004428/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004429void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004430{
4431 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004432 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004433}
4434
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004435void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4436{
4437 val = cpu_to_le64(val);
4438 cpu_physical_memory_write(addr, &val, 8);
4439}
4440
4441void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4442{
4443 val = cpu_to_be64(val);
4444 cpu_physical_memory_write(addr, &val, 8);
4445}
4446
aliguori5e2972f2009-03-28 17:51:36 +00004447/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004448int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004449 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004450{
4451 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004452 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004453 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004454
4455 while (len > 0) {
4456 page = addr & TARGET_PAGE_MASK;
4457 phys_addr = cpu_get_phys_page_debug(env, page);
4458 /* if no physical page mapped, return an error */
4459 if (phys_addr == -1)
4460 return -1;
4461 l = (page + TARGET_PAGE_SIZE) - addr;
4462 if (l > len)
4463 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004464 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004465 if (is_write)
4466 cpu_physical_memory_write_rom(phys_addr, buf, l);
4467 else
aliguori5e2972f2009-03-28 17:51:36 +00004468 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004469 len -= l;
4470 buf += l;
4471 addr += l;
4472 }
4473 return 0;
4474}
Paul Brooka68fe892010-03-01 00:08:59 +00004475#endif
bellard13eb76e2004-01-24 15:23:36 +00004476
pbrook2e70f6e2008-06-29 01:03:05 +00004477/* in deterministic execution mode, instructions doing device I/Os
4478 must be at the end of the TB */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004479void cpu_io_recompile(CPUArchState *env, void *retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004480{
4481 TranslationBlock *tb;
4482 uint32_t n, cflags;
4483 target_ulong pc, cs_base;
4484 uint64_t flags;
4485
4486 tb = tb_find_pc((unsigned long)retaddr);
4487 if (!tb) {
4488 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4489 retaddr);
4490 }
4491 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004492 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004493 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004494 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004495 n = n - env->icount_decr.u16.low;
4496 /* Generate a new TB ending on the I/O insn. */
4497 n++;
4498 /* On MIPS and SH, delay slot instructions can only be restarted if
4499 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004500 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004501 branch. */
4502#if defined(TARGET_MIPS)
4503 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4504 env->active_tc.PC -= 4;
4505 env->icount_decr.u16.low++;
4506 env->hflags &= ~MIPS_HFLAG_BMASK;
4507 }
4508#elif defined(TARGET_SH4)
4509 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4510 && n > 1) {
4511 env->pc -= 2;
4512 env->icount_decr.u16.low++;
4513 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4514 }
4515#endif
4516 /* This should never happen. */
4517 if (n > CF_COUNT_MASK)
4518 cpu_abort(env, "TB too big during recompile");
4519
4520 cflags = n | CF_LAST_IO;
4521 pc = tb->pc;
4522 cs_base = tb->cs_base;
4523 flags = tb->flags;
4524 tb_phys_invalidate(tb, -1);
4525 /* FIXME: In theory this could raise an exception. In practice
4526 we have already translated the block once so it's probably ok. */
4527 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004528 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004529 the first in the TB) then we end up generating a whole new TB and
4530 repeating the fault, which is horribly inefficient.
4531 Better would be to execute just this insn uncached, or generate a
4532 second new TB. */
4533 cpu_resume_from_signal(env, NULL);
4534}
4535
Paul Brookb3755a92010-03-12 16:54:58 +00004536#if !defined(CONFIG_USER_ONLY)
4537
Stefan Weil055403b2010-10-22 23:03:32 +02004538void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004539{
4540 int i, target_code_size, max_target_code_size;
4541 int direct_jmp_count, direct_jmp2_count, cross_page;
4542 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004543
bellarde3db7222005-01-26 22:00:47 +00004544 target_code_size = 0;
4545 max_target_code_size = 0;
4546 cross_page = 0;
4547 direct_jmp_count = 0;
4548 direct_jmp2_count = 0;
4549 for(i = 0; i < nb_tbs; i++) {
4550 tb = &tbs[i];
4551 target_code_size += tb->size;
4552 if (tb->size > max_target_code_size)
4553 max_target_code_size = tb->size;
4554 if (tb->page_addr[1] != -1)
4555 cross_page++;
4556 if (tb->tb_next_offset[0] != 0xffff) {
4557 direct_jmp_count++;
4558 if (tb->tb_next_offset[1] != 0xffff) {
4559 direct_jmp2_count++;
4560 }
4561 }
4562 }
4563 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004564 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004565 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004566 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4567 cpu_fprintf(f, "TB count %d/%d\n",
4568 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004569 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004570 nb_tbs ? target_code_size / nb_tbs : 0,
4571 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004572 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004573 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4574 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004575 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4576 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004577 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4578 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004579 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004580 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4581 direct_jmp2_count,
4582 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004583 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004584 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4585 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4586 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004587 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004588}
4589
Avi Kivityd39e8222012-01-01 23:35:10 +02004590/* NOTE: this function can trigger an exception */
4591/* NOTE2: the returned address is not exactly the physical address: it
4592 is the offset relative to phys_ram_base */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004593tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
Avi Kivityd39e8222012-01-01 23:35:10 +02004594{
4595 int mmu_idx, page_index, pd;
4596 void *p;
Avi Kivity37ec01d2012-03-08 18:08:35 +02004597 MemoryRegion *mr;
Avi Kivityd39e8222012-01-01 23:35:10 +02004598
4599 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4600 mmu_idx = cpu_mmu_index(env1);
4601 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4602 (addr & TARGET_PAGE_MASK))) {
Blue Swirle141ab52011-09-18 14:55:46 +00004603#ifdef CONFIG_TCG_PASS_AREG0
4604 cpu_ldub_code(env1, addr);
4605#else
Avi Kivityd39e8222012-01-01 23:35:10 +02004606 ldub_code(addr);
Blue Swirle141ab52011-09-18 14:55:46 +00004607#endif
Avi Kivityd39e8222012-01-01 23:35:10 +02004608 }
Avi Kivityce5d64c2012-03-08 18:50:18 +02004609 pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
Avi Kivity37ec01d2012-03-08 18:08:35 +02004610 mr = iotlb_to_region(pd);
4611 if (mr != &io_mem_ram && mr != &io_mem_rom
4612 && mr != &io_mem_notdirty && !mr->rom_device) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004613#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4614 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4615#else
4616 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4617#endif
4618 }
4619 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4620 return qemu_ram_addr_from_host_nofail(p);
4621}
4622
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004623/*
4624 * A helper function for the _utterly broken_ virtio device model to find out if
4625 * it's running on a big endian machine. Don't do this at home kids!
4626 */
4627bool virtio_is_big_endian(void);
4628bool virtio_is_big_endian(void)
4629{
4630#if defined(TARGET_WORDS_BIGENDIAN)
4631 return true;
4632#else
4633 return false;
4634#endif
4635}
4636
bellard61382a52003-10-27 21:22:23 +00004637#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004638#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004639#define GETPC() NULL
4640#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004641#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004642
4643#define SHIFT 0
4644#include "softmmu_template.h"
4645
4646#define SHIFT 1
4647#include "softmmu_template.h"
4648
4649#define SHIFT 2
4650#include "softmmu_template.h"
4651
4652#define SHIFT 3
4653#include "softmmu_template.h"
4654
4655#undef env
4656
4657#endif