blob: be392e2764344cc9a298ac77aacc9e16f63766f5 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
Andreas Färber9349b4f2012-03-14 01:38:32 +0100126CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100129DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Avi Kivity3eef53d2012-02-10 14:57:31 +0200163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200189typedef struct PhysPageEntry PhysPageEntry;
190
Avi Kivity5312bd82012-02-12 18:32:55 +0200191static MemoryRegionSection *phys_sections;
192static unsigned phys_sections_nb, phys_sections_nb_alloc;
193static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200194static uint16_t phys_section_notdirty;
195static uint16_t phys_section_rom;
196static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200197
Avi Kivity4346ae32012-02-10 17:00:01 +0200198struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200199 uint16_t is_leaf : 1;
200 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
201 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200202};
203
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200204/* Simple allocator for PhysPageEntry nodes */
205static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
206static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
207
Avi Kivity07f07b32012-02-13 20:45:32 +0200208#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200209
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800210/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200211 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200212static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000213
pbrooke2eef172008-06-08 01:09:01 +0000214static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300215static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000216
Avi Kivity1ec9b902012-01-02 12:47:48 +0200217static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000218#endif
bellard33417e72003-08-10 21:47:01 +0000219
bellard34865132003-10-05 14:28:56 +0000220/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200221#ifdef WIN32
222static const char *logfilename = "qemu.log";
223#else
blueswir1d9b630f2008-10-05 09:57:08 +0000224static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200225#endif
bellard34865132003-10-05 14:28:56 +0000226FILE *logfile;
227int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000228static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000229
bellarde3db7222005-01-26 22:00:47 +0000230/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000231#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000232static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000233#endif
bellarde3db7222005-01-26 22:00:47 +0000234static int tb_flush_count;
235static int tb_phys_invalidate_count;
236
bellard7cb69ca2008-05-10 10:55:51 +0000237#ifdef _WIN32
238static void map_exec(void *addr, long size)
239{
240 DWORD old_protect;
241 VirtualProtect(addr, size,
242 PAGE_EXECUTE_READWRITE, &old_protect);
243
244}
245#else
246static void map_exec(void *addr, long size)
247{
bellard43694152008-05-29 09:35:57 +0000248 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000249
bellard43694152008-05-29 09:35:57 +0000250 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000251 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000252 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000253
254 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000255 end += page_size - 1;
256 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000257
258 mprotect((void *)start, end - start,
259 PROT_READ | PROT_WRITE | PROT_EXEC);
260}
261#endif
262
bellardb346ff42003-06-15 20:05:50 +0000263static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000264{
bellard83fb7ad2004-07-05 21:25:26 +0000265 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000266 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000267#ifdef _WIN32
268 {
269 SYSTEM_INFO system_info;
270
271 GetSystemInfo(&system_info);
272 qemu_real_host_page_size = system_info.dwPageSize;
273 }
274#else
275 qemu_real_host_page_size = getpagesize();
276#endif
bellard83fb7ad2004-07-05 21:25:26 +0000277 if (qemu_host_page_size == 0)
278 qemu_host_page_size = qemu_real_host_page_size;
279 if (qemu_host_page_size < TARGET_PAGE_SIZE)
280 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000281 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000282
Paul Brook2e9a5712010-05-05 16:32:59 +0100283#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000284 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100285#ifdef HAVE_KINFO_GETVMMAP
286 struct kinfo_vmentry *freep;
287 int i, cnt;
288
289 freep = kinfo_getvmmap(getpid(), &cnt);
290 if (freep) {
291 mmap_lock();
292 for (i = 0; i < cnt; i++) {
293 unsigned long startaddr, endaddr;
294
295 startaddr = freep[i].kve_start;
296 endaddr = freep[i].kve_end;
297 if (h2g_valid(startaddr)) {
298 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
299
300 if (h2g_valid(endaddr)) {
301 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100303 } else {
304#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
305 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200306 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100307#endif
308 }
309 }
310 }
311 free(freep);
312 mmap_unlock();
313 }
314#else
balrog50a95692007-12-12 01:16:23 +0000315 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000316
pbrook07765902008-05-31 16:33:53 +0000317 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800318
Aurelien Jarnofd436902010-04-10 17:20:36 +0200319 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000320 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800321 mmap_lock();
322
balrog50a95692007-12-12 01:16:23 +0000323 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800324 unsigned long startaddr, endaddr;
325 int n;
326
327 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
328
329 if (n == 2 && h2g_valid(startaddr)) {
330 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
331
332 if (h2g_valid(endaddr)) {
333 endaddr = h2g(endaddr);
334 } else {
335 endaddr = ~0ul;
336 }
337 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000338 }
339 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800340
balrog50a95692007-12-12 01:16:23 +0000341 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800342 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000343 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100344#endif
balrog50a95692007-12-12 01:16:23 +0000345 }
346#endif
bellard54936002003-05-13 00:25:15 +0000347}
348
Paul Brook41c1b1c2010-03-12 16:54:58 +0000349static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000350{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000351 PageDesc *pd;
352 void **lp;
353 int i;
354
pbrook17e23772008-06-09 13:47:45 +0000355#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500356 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800357# define ALLOC(P, SIZE) \
358 do { \
359 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
360 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800361 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000362#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800363# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500364 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000365#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800366
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800367 /* Level 1. Always allocated. */
368 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
369
370 /* Level 2..N-1. */
371 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
372 void **p = *lp;
373
374 if (p == NULL) {
375 if (!alloc) {
376 return NULL;
377 }
378 ALLOC(p, sizeof(void *) * L2_SIZE);
379 *lp = p;
380 }
381
382 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000383 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800384
385 pd = *lp;
386 if (pd == NULL) {
387 if (!alloc) {
388 return NULL;
389 }
390 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
391 *lp = pd;
392 }
393
394#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800395
396 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000397}
398
Paul Brook41c1b1c2010-03-12 16:54:58 +0000399static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000400{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800401 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000402}
403
Paul Brook6d9a1302010-02-28 23:55:53 +0000404#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200405
Avi Kivityf7bf5462012-02-13 20:12:05 +0200406static void phys_map_node_reserve(unsigned nodes)
407{
408 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
409 typedef PhysPageEntry Node[L2_SIZE];
410 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
411 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
412 phys_map_nodes_nb + nodes);
413 phys_map_nodes = g_renew(Node, phys_map_nodes,
414 phys_map_nodes_nb_alloc);
415 }
416}
417
418static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200419{
420 unsigned i;
421 uint16_t ret;
422
Avi Kivityf7bf5462012-02-13 20:12:05 +0200423 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200424 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200425 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200426 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200427 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200428 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200429 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200430 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200431}
432
433static void phys_map_nodes_reset(void)
434{
435 phys_map_nodes_nb = 0;
436}
437
Avi Kivityf7bf5462012-02-13 20:12:05 +0200438
Avi Kivity29990972012-02-13 20:21:20 +0200439static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
440 target_phys_addr_t *nb, uint16_t leaf,
441 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200442{
443 PhysPageEntry *p;
444 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200445 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200446
Avi Kivity07f07b32012-02-13 20:45:32 +0200447 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200448 lp->ptr = phys_map_node_alloc();
449 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200450 if (level == 0) {
451 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200452 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200453 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200454 }
455 }
456 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200457 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200458 }
Avi Kivity29990972012-02-13 20:21:20 +0200459 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200460
Avi Kivity29990972012-02-13 20:21:20 +0200461 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200462 if ((*index & (step - 1)) == 0 && *nb >= step) {
463 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200464 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200465 *index += step;
466 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200467 } else {
468 phys_page_set_level(lp, index, nb, leaf, level - 1);
469 }
470 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200471 }
472}
473
Avi Kivity29990972012-02-13 20:21:20 +0200474static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
475 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000476{
Avi Kivity29990972012-02-13 20:21:20 +0200477 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200478 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000479
Avi Kivity29990972012-02-13 20:21:20 +0200480 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000481}
482
Avi Kivityf3705d52012-03-08 16:16:34 +0200483static MemoryRegionSection *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000484{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200485 PhysPageEntry lp = phys_map;
486 PhysPageEntry *p;
487 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200488 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200489
Avi Kivity07f07b32012-02-13 20:45:32 +0200490 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200491 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200492 goto not_found;
493 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200494 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200495 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200496 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200497
Avi Kivityc19e8802012-02-13 20:25:31 +0200498 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200499not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200500 return &phys_sections[s_index];
501}
502
503static target_phys_addr_t section_addr(MemoryRegionSection *section,
504 target_phys_addr_t addr)
505{
506 addr -= section->offset_within_address_space;
507 addr += section->offset_within_region;
508 return addr;
bellard92e873b2004-05-21 14:52:29 +0000509}
510
Anthony Liguoric227f092009-10-01 16:12:16 -0500511static void tlb_protect_code(ram_addr_t ram_addr);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100512static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000513 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000514#define mmap_lock() do { } while(0)
515#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000516#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000517
bellard43694152008-05-29 09:35:57 +0000518#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
519
520#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100521/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000522 user mode. It will change when a dedicated libc will be used */
523#define USE_STATIC_CODE_GEN_BUFFER
524#endif
525
526#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200527static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
528 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000529#endif
530
blueswir18fcd3692008-08-17 20:26:25 +0000531static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000532{
bellard43694152008-05-29 09:35:57 +0000533#ifdef USE_STATIC_CODE_GEN_BUFFER
534 code_gen_buffer = static_code_gen_buffer;
535 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
536 map_exec(code_gen_buffer, code_gen_buffer_size);
537#else
bellard26a5f132008-05-28 12:30:31 +0000538 code_gen_buffer_size = tb_size;
539 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000540#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000541 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
542#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100543 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000544 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000545#endif
bellard26a5f132008-05-28 12:30:31 +0000546 }
547 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
548 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
549 /* The code gen buffer location may have constraints depending on
550 the host cpu and OS */
551#if defined(__linux__)
552 {
553 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000554 void *start = NULL;
555
bellard26a5f132008-05-28 12:30:31 +0000556 flags = MAP_PRIVATE | MAP_ANONYMOUS;
557#if defined(__x86_64__)
558 flags |= MAP_32BIT;
559 /* Cannot map more than that */
560 if (code_gen_buffer_size > (800 * 1024 * 1024))
561 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000562#elif defined(__sparc_v9__)
563 // Map the buffer below 2G, so we can use direct calls and branches
564 flags |= MAP_FIXED;
565 start = (void *) 0x60000000UL;
566 if (code_gen_buffer_size > (512 * 1024 * 1024))
567 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000568#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100569 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000570 if (code_gen_buffer_size > 16 * 1024 * 1024)
571 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700572#elif defined(__s390x__)
573 /* Map the buffer so that we can use direct calls and branches. */
574 /* We have a +- 4GB range on the branches; leave some slop. */
575 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
576 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
577 }
578 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000579#endif
blueswir1141ac462008-07-26 15:05:57 +0000580 code_gen_buffer = mmap(start, code_gen_buffer_size,
581 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000582 flags, -1, 0);
583 if (code_gen_buffer == MAP_FAILED) {
584 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
585 exit(1);
586 }
587 }
Bradcbb608a2010-12-20 21:25:40 -0500588#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000589 || defined(__DragonFly__) || defined(__OpenBSD__) \
590 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000591 {
592 int flags;
593 void *addr = NULL;
594 flags = MAP_PRIVATE | MAP_ANONYMOUS;
595#if defined(__x86_64__)
596 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
597 * 0x40000000 is free */
598 flags |= MAP_FIXED;
599 addr = (void *)0x40000000;
600 /* Cannot map more than that */
601 if (code_gen_buffer_size > (800 * 1024 * 1024))
602 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000603#elif defined(__sparc_v9__)
604 // Map the buffer below 2G, so we can use direct calls and branches
605 flags |= MAP_FIXED;
606 addr = (void *) 0x60000000UL;
607 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
608 code_gen_buffer_size = (512 * 1024 * 1024);
609 }
aliguori06e67a82008-09-27 15:32:41 +0000610#endif
611 code_gen_buffer = mmap(addr, code_gen_buffer_size,
612 PROT_WRITE | PROT_READ | PROT_EXEC,
613 flags, -1, 0);
614 if (code_gen_buffer == MAP_FAILED) {
615 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
616 exit(1);
617 }
618 }
bellard26a5f132008-05-28 12:30:31 +0000619#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500620 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000621 map_exec(code_gen_buffer, code_gen_buffer_size);
622#endif
bellard43694152008-05-29 09:35:57 +0000623#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000624 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100625 code_gen_buffer_max_size = code_gen_buffer_size -
626 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000627 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500628 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000629}
630
631/* Must be called before using the QEMU cpus. 'tb_size' is the size
632 (in bytes) allocated to the translation buffer. Zero means default
633 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200634void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000635{
bellard26a5f132008-05-28 12:30:31 +0000636 cpu_gen_init();
637 code_gen_alloc(tb_size);
638 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000639 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700640#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
641 /* There's no guest base to take into account, so go ahead and
642 initialize the prologue now. */
643 tcg_prologue_init(&tcg_ctx);
644#endif
bellard26a5f132008-05-28 12:30:31 +0000645}
646
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200647bool tcg_enabled(void)
648{
649 return code_gen_buffer != NULL;
650}
651
652void cpu_exec_init_all(void)
653{
654#if !defined(CONFIG_USER_ONLY)
655 memory_map_init();
656 io_mem_init();
657#endif
658}
659
pbrook9656f322008-07-01 20:01:19 +0000660#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
661
Juan Quintelae59fb372009-09-29 22:48:21 +0200662static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200663{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100664 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200665
aurel323098dba2009-03-07 21:28:24 +0000666 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
667 version_id is increased. */
668 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000669 tlb_flush(env, 1);
670
671 return 0;
672}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200673
674static const VMStateDescription vmstate_cpu_common = {
675 .name = "cpu_common",
676 .version_id = 1,
677 .minimum_version_id = 1,
678 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200679 .post_load = cpu_common_post_load,
680 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100681 VMSTATE_UINT32(halted, CPUArchState),
682 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200683 VMSTATE_END_OF_LIST()
684 }
685};
pbrook9656f322008-07-01 20:01:19 +0000686#endif
687
Andreas Färber9349b4f2012-03-14 01:38:32 +0100688CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400689{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100690 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400691
692 while (env) {
693 if (env->cpu_index == cpu)
694 break;
695 env = env->next_cpu;
696 }
697
698 return env;
699}
700
Andreas Färber9349b4f2012-03-14 01:38:32 +0100701void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000702{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100703 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000704 int cpu_index;
705
pbrookc2764712009-03-07 15:24:59 +0000706#if defined(CONFIG_USER_ONLY)
707 cpu_list_lock();
708#endif
bellard6a00d602005-11-21 23:25:50 +0000709 env->next_cpu = NULL;
710 penv = &first_cpu;
711 cpu_index = 0;
712 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700713 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000714 cpu_index++;
715 }
716 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000717 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000718 QTAILQ_INIT(&env->breakpoints);
719 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100720#ifndef CONFIG_USER_ONLY
721 env->thread_id = qemu_get_thread_id();
722#endif
bellard6a00d602005-11-21 23:25:50 +0000723 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000724#if defined(CONFIG_USER_ONLY)
725 cpu_list_unlock();
726#endif
pbrookb3c77242008-06-30 16:31:04 +0000727#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600728 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
729 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000730 cpu_save, cpu_load, env);
731#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000732}
733
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100734/* Allocate a new translation block. Flush the translation buffer if
735 too many translation blocks or too much generated code. */
736static TranslationBlock *tb_alloc(target_ulong pc)
737{
738 TranslationBlock *tb;
739
740 if (nb_tbs >= code_gen_max_blocks ||
741 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
742 return NULL;
743 tb = &tbs[nb_tbs++];
744 tb->pc = pc;
745 tb->cflags = 0;
746 return tb;
747}
748
749void tb_free(TranslationBlock *tb)
750{
751 /* In practice this is mostly used for single use temporary TB
752 Ignore the hard cases and just back up if this TB happens to
753 be the last one generated. */
754 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
755 code_gen_ptr = tb->tc_ptr;
756 nb_tbs--;
757 }
758}
759
bellard9fa3e852004-01-04 18:06:42 +0000760static inline void invalidate_page_bitmap(PageDesc *p)
761{
762 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500763 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000764 p->code_bitmap = NULL;
765 }
766 p->code_write_count = 0;
767}
768
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800769/* Set to NULL all the 'first_tb' fields in all PageDescs. */
770
771static void page_flush_tb_1 (int level, void **lp)
772{
773 int i;
774
775 if (*lp == NULL) {
776 return;
777 }
778 if (level == 0) {
779 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000780 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800781 pd[i].first_tb = NULL;
782 invalidate_page_bitmap(pd + i);
783 }
784 } else {
785 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000786 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800787 page_flush_tb_1 (level - 1, pp + i);
788 }
789 }
790}
791
bellardfd6ce8f2003-05-14 19:00:11 +0000792static void page_flush_tb(void)
793{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800794 int i;
795 for (i = 0; i < V_L1_SIZE; i++) {
796 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000797 }
798}
799
800/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000801/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100802void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000803{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100804 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000805#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000806 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
807 (unsigned long)(code_gen_ptr - code_gen_buffer),
808 nb_tbs, nb_tbs > 0 ?
809 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000810#endif
bellard26a5f132008-05-28 12:30:31 +0000811 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000812 cpu_abort(env1, "Internal error: code buffer overflow\n");
813
bellardfd6ce8f2003-05-14 19:00:11 +0000814 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000815
bellard6a00d602005-11-21 23:25:50 +0000816 for(env = first_cpu; env != NULL; env = env->next_cpu) {
817 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
818 }
bellard9fa3e852004-01-04 18:06:42 +0000819
bellard8a8a6082004-10-03 13:36:49 +0000820 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000821 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000822
bellardfd6ce8f2003-05-14 19:00:11 +0000823 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000824 /* XXX: flush processor icache at this point if cache flush is
825 expensive */
bellarde3db7222005-01-26 22:00:47 +0000826 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000827}
828
829#ifdef DEBUG_TB_CHECK
830
j_mayerbc98a7e2007-04-04 07:55:12 +0000831static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000832{
833 TranslationBlock *tb;
834 int i;
835 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000836 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
837 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000838 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
839 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000840 printf("ERROR invalidate: address=" TARGET_FMT_lx
841 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000842 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000843 }
844 }
845 }
846}
847
848/* verify that all the pages have correct rights for code */
849static void tb_page_check(void)
850{
851 TranslationBlock *tb;
852 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000853
pbrook99773bd2006-04-16 15:14:59 +0000854 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
855 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000856 flags1 = page_get_flags(tb->pc);
857 flags2 = page_get_flags(tb->pc + tb->size - 1);
858 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
859 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000860 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000861 }
862 }
863 }
864}
865
866#endif
867
868/* invalidate one TB */
869static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
870 int next_offset)
871{
872 TranslationBlock *tb1;
873 for(;;) {
874 tb1 = *ptb;
875 if (tb1 == tb) {
876 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
877 break;
878 }
879 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
880 }
881}
882
bellard9fa3e852004-01-04 18:06:42 +0000883static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
884{
885 TranslationBlock *tb1;
886 unsigned int n1;
887
888 for(;;) {
889 tb1 = *ptb;
890 n1 = (long)tb1 & 3;
891 tb1 = (TranslationBlock *)((long)tb1 & ~3);
892 if (tb1 == tb) {
893 *ptb = tb1->page_next[n1];
894 break;
895 }
896 ptb = &tb1->page_next[n1];
897 }
898}
899
bellardd4e81642003-05-25 16:46:15 +0000900static inline void tb_jmp_remove(TranslationBlock *tb, int n)
901{
902 TranslationBlock *tb1, **ptb;
903 unsigned int n1;
904
905 ptb = &tb->jmp_next[n];
906 tb1 = *ptb;
907 if (tb1) {
908 /* find tb(n) in circular list */
909 for(;;) {
910 tb1 = *ptb;
911 n1 = (long)tb1 & 3;
912 tb1 = (TranslationBlock *)((long)tb1 & ~3);
913 if (n1 == n && tb1 == tb)
914 break;
915 if (n1 == 2) {
916 ptb = &tb1->jmp_first;
917 } else {
918 ptb = &tb1->jmp_next[n1];
919 }
920 }
921 /* now we can suppress tb(n) from the list */
922 *ptb = tb->jmp_next[n];
923
924 tb->jmp_next[n] = NULL;
925 }
926}
927
928/* reset the jump entry 'n' of a TB so that it is not chained to
929 another TB */
930static inline void tb_reset_jump(TranslationBlock *tb, int n)
931{
932 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
933}
934
Paul Brook41c1b1c2010-03-12 16:54:58 +0000935void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000936{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100937 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000938 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000939 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000940 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000941 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000942
bellard9fa3e852004-01-04 18:06:42 +0000943 /* remove the TB from the hash list */
944 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
945 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000946 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000947 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000948
bellard9fa3e852004-01-04 18:06:42 +0000949 /* remove the TB from the page list */
950 if (tb->page_addr[0] != page_addr) {
951 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
952 tb_page_remove(&p->first_tb, tb);
953 invalidate_page_bitmap(p);
954 }
955 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
956 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
957 tb_page_remove(&p->first_tb, tb);
958 invalidate_page_bitmap(p);
959 }
960
bellard8a40a182005-11-20 10:35:40 +0000961 tb_invalidated_flag = 1;
962
963 /* remove the TB from the hash list */
964 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000965 for(env = first_cpu; env != NULL; env = env->next_cpu) {
966 if (env->tb_jmp_cache[h] == tb)
967 env->tb_jmp_cache[h] = NULL;
968 }
bellard8a40a182005-11-20 10:35:40 +0000969
970 /* suppress this TB from the two jump lists */
971 tb_jmp_remove(tb, 0);
972 tb_jmp_remove(tb, 1);
973
974 /* suppress any remaining jumps to this TB */
975 tb1 = tb->jmp_first;
976 for(;;) {
977 n1 = (long)tb1 & 3;
978 if (n1 == 2)
979 break;
980 tb1 = (TranslationBlock *)((long)tb1 & ~3);
981 tb2 = tb1->jmp_next[n1];
982 tb_reset_jump(tb1, n1);
983 tb1->jmp_next[n1] = NULL;
984 tb1 = tb2;
985 }
986 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
987
bellarde3db7222005-01-26 22:00:47 +0000988 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000989}
990
991static inline void set_bits(uint8_t *tab, int start, int len)
992{
993 int end, mask, end1;
994
995 end = start + len;
996 tab += start >> 3;
997 mask = 0xff << (start & 7);
998 if ((start & ~7) == (end & ~7)) {
999 if (start < end) {
1000 mask &= ~(0xff << (end & 7));
1001 *tab |= mask;
1002 }
1003 } else {
1004 *tab++ |= mask;
1005 start = (start + 8) & ~7;
1006 end1 = end & ~7;
1007 while (start < end1) {
1008 *tab++ = 0xff;
1009 start += 8;
1010 }
1011 if (start < end) {
1012 mask = ~(0xff << (end & 7));
1013 *tab |= mask;
1014 }
1015 }
1016}
1017
1018static void build_page_bitmap(PageDesc *p)
1019{
1020 int n, tb_start, tb_end;
1021 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001022
Anthony Liguori7267c092011-08-20 22:09:37 -05001023 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001024
1025 tb = p->first_tb;
1026 while (tb != NULL) {
1027 n = (long)tb & 3;
1028 tb = (TranslationBlock *)((long)tb & ~3);
1029 /* NOTE: this is subtle as a TB may span two physical pages */
1030 if (n == 0) {
1031 /* NOTE: tb_end may be after the end of the page, but
1032 it is not a problem */
1033 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1034 tb_end = tb_start + tb->size;
1035 if (tb_end > TARGET_PAGE_SIZE)
1036 tb_end = TARGET_PAGE_SIZE;
1037 } else {
1038 tb_start = 0;
1039 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1040 }
1041 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1042 tb = tb->page_next[n];
1043 }
1044}
1045
Andreas Färber9349b4f2012-03-14 01:38:32 +01001046TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001047 target_ulong pc, target_ulong cs_base,
1048 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001049{
1050 TranslationBlock *tb;
1051 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001052 tb_page_addr_t phys_pc, phys_page2;
1053 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001054 int code_gen_size;
1055
Paul Brook41c1b1c2010-03-12 16:54:58 +00001056 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001057 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001058 if (!tb) {
1059 /* flush must be done */
1060 tb_flush(env);
1061 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001062 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001063 /* Don't forget to invalidate previous TB info. */
1064 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001065 }
1066 tc_ptr = code_gen_ptr;
1067 tb->tc_ptr = tc_ptr;
1068 tb->cs_base = cs_base;
1069 tb->flags = flags;
1070 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001071 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001072 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001073
bellardd720b932004-04-25 17:57:43 +00001074 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001075 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001076 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001077 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001078 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001079 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001080 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001081 return tb;
bellardd720b932004-04-25 17:57:43 +00001082}
ths3b46e622007-09-17 08:09:54 +00001083
bellard9fa3e852004-01-04 18:06:42 +00001084/* invalidate all TBs which intersect with the target physical page
1085 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001086 the same physical page. 'is_cpu_write_access' should be true if called
1087 from a real cpu write access: the virtual CPU will exit the current
1088 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001089void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001090 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001091{
aliguori6b917542008-11-18 19:46:41 +00001092 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001093 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001094 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001095 PageDesc *p;
1096 int n;
1097#ifdef TARGET_HAS_PRECISE_SMC
1098 int current_tb_not_found = is_cpu_write_access;
1099 TranslationBlock *current_tb = NULL;
1100 int current_tb_modified = 0;
1101 target_ulong current_pc = 0;
1102 target_ulong current_cs_base = 0;
1103 int current_flags = 0;
1104#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001105
1106 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001107 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001108 return;
ths5fafdf22007-09-16 21:08:06 +00001109 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001110 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1111 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001112 /* build code bitmap */
1113 build_page_bitmap(p);
1114 }
1115
1116 /* we remove all the TBs in the range [start, end[ */
1117 /* XXX: see if in some cases it could be faster to invalidate all the code */
1118 tb = p->first_tb;
1119 while (tb != NULL) {
1120 n = (long)tb & 3;
1121 tb = (TranslationBlock *)((long)tb & ~3);
1122 tb_next = tb->page_next[n];
1123 /* NOTE: this is subtle as a TB may span two physical pages */
1124 if (n == 0) {
1125 /* NOTE: tb_end may be after the end of the page, but
1126 it is not a problem */
1127 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1128 tb_end = tb_start + tb->size;
1129 } else {
1130 tb_start = tb->page_addr[1];
1131 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1132 }
1133 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001134#ifdef TARGET_HAS_PRECISE_SMC
1135 if (current_tb_not_found) {
1136 current_tb_not_found = 0;
1137 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001138 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001139 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001140 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001141 }
1142 }
1143 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001144 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001145 /* If we are modifying the current TB, we must stop
1146 its execution. We could be more precise by checking
1147 that the modification is after the current PC, but it
1148 would require a specialized function to partially
1149 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001150
bellardd720b932004-04-25 17:57:43 +00001151 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001152 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001153 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1154 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001155 }
1156#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001157 /* we need to do that to handle the case where a signal
1158 occurs while doing tb_phys_invalidate() */
1159 saved_tb = NULL;
1160 if (env) {
1161 saved_tb = env->current_tb;
1162 env->current_tb = NULL;
1163 }
bellard9fa3e852004-01-04 18:06:42 +00001164 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001165 if (env) {
1166 env->current_tb = saved_tb;
1167 if (env->interrupt_request && env->current_tb)
1168 cpu_interrupt(env, env->interrupt_request);
1169 }
bellard9fa3e852004-01-04 18:06:42 +00001170 }
1171 tb = tb_next;
1172 }
1173#if !defined(CONFIG_USER_ONLY)
1174 /* if no code remaining, no need to continue to use slow writes */
1175 if (!p->first_tb) {
1176 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001177 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001178 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001179 }
1180 }
1181#endif
1182#ifdef TARGET_HAS_PRECISE_SMC
1183 if (current_tb_modified) {
1184 /* we generate a block containing just the instruction
1185 modifying the memory. It will ensure that it cannot modify
1186 itself */
bellardea1c1802004-06-14 18:56:36 +00001187 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001188 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001189 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001190 }
1191#endif
1192}
1193
1194/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001195static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001196{
1197 PageDesc *p;
1198 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001199#if 0
bellarda4193c82004-06-03 14:01:43 +00001200 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001201 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1202 cpu_single_env->mem_io_vaddr, len,
1203 cpu_single_env->eip,
1204 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001205 }
1206#endif
bellard9fa3e852004-01-04 18:06:42 +00001207 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001208 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001209 return;
1210 if (p->code_bitmap) {
1211 offset = start & ~TARGET_PAGE_MASK;
1212 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1213 if (b & ((1 << len) - 1))
1214 goto do_invalidate;
1215 } else {
1216 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001217 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001218 }
1219}
1220
bellard9fa3e852004-01-04 18:06:42 +00001221#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001222static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001223 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001224{
aliguori6b917542008-11-18 19:46:41 +00001225 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001226 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001227 int n;
bellardd720b932004-04-25 17:57:43 +00001228#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001229 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001230 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001231 int current_tb_modified = 0;
1232 target_ulong current_pc = 0;
1233 target_ulong current_cs_base = 0;
1234 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001235#endif
bellard9fa3e852004-01-04 18:06:42 +00001236
1237 addr &= TARGET_PAGE_MASK;
1238 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001239 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001240 return;
1241 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001242#ifdef TARGET_HAS_PRECISE_SMC
1243 if (tb && pc != 0) {
1244 current_tb = tb_find_pc(pc);
1245 }
1246#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001247 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001248 n = (long)tb & 3;
1249 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001250#ifdef TARGET_HAS_PRECISE_SMC
1251 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001252 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001253 /* If we are modifying the current TB, we must stop
1254 its execution. We could be more precise by checking
1255 that the modification is after the current PC, but it
1256 would require a specialized function to partially
1257 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001258
bellardd720b932004-04-25 17:57:43 +00001259 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001260 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001261 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1262 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001263 }
1264#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001265 tb_phys_invalidate(tb, addr);
1266 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001267 }
1268 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001269#ifdef TARGET_HAS_PRECISE_SMC
1270 if (current_tb_modified) {
1271 /* we generate a block containing just the instruction
1272 modifying the memory. It will ensure that it cannot modify
1273 itself */
bellardea1c1802004-06-14 18:56:36 +00001274 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001275 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001276 cpu_resume_from_signal(env, puc);
1277 }
1278#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001279}
bellard9fa3e852004-01-04 18:06:42 +00001280#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001281
1282/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001283static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001284 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001285{
1286 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001287#ifndef CONFIG_USER_ONLY
1288 bool page_already_protected;
1289#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001290
bellard9fa3e852004-01-04 18:06:42 +00001291 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001292 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001293 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001294#ifndef CONFIG_USER_ONLY
1295 page_already_protected = p->first_tb != NULL;
1296#endif
bellard9fa3e852004-01-04 18:06:42 +00001297 p->first_tb = (TranslationBlock *)((long)tb | n);
1298 invalidate_page_bitmap(p);
1299
bellard107db442004-06-22 18:48:46 +00001300#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001301
bellard9fa3e852004-01-04 18:06:42 +00001302#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001303 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001304 target_ulong addr;
1305 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001306 int prot;
1307
bellardfd6ce8f2003-05-14 19:00:11 +00001308 /* force the host page as non writable (writes will have a
1309 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001310 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001311 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001312 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1313 addr += TARGET_PAGE_SIZE) {
1314
1315 p2 = page_find (addr >> TARGET_PAGE_BITS);
1316 if (!p2)
1317 continue;
1318 prot |= p2->flags;
1319 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001320 }
ths5fafdf22007-09-16 21:08:06 +00001321 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001322 (prot & PAGE_BITS) & ~PAGE_WRITE);
1323#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001324 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001325 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001326#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001327 }
bellard9fa3e852004-01-04 18:06:42 +00001328#else
1329 /* if some code is already present, then the pages are already
1330 protected. So we handle the case where only the first TB is
1331 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001332 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001333 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001334 }
1335#endif
bellardd720b932004-04-25 17:57:43 +00001336
1337#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001338}
1339
bellard9fa3e852004-01-04 18:06:42 +00001340/* add a new TB and link it to the physical page tables. phys_page2 is
1341 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001342void tb_link_page(TranslationBlock *tb,
1343 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001344{
bellard9fa3e852004-01-04 18:06:42 +00001345 unsigned int h;
1346 TranslationBlock **ptb;
1347
pbrookc8a706f2008-06-02 16:16:42 +00001348 /* Grab the mmap lock to stop another thread invalidating this TB
1349 before we are done. */
1350 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001351 /* add in the physical hash table */
1352 h = tb_phys_hash_func(phys_pc);
1353 ptb = &tb_phys_hash[h];
1354 tb->phys_hash_next = *ptb;
1355 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001356
1357 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001358 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1359 if (phys_page2 != -1)
1360 tb_alloc_page(tb, 1, phys_page2);
1361 else
1362 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001363
bellardd4e81642003-05-25 16:46:15 +00001364 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1365 tb->jmp_next[0] = NULL;
1366 tb->jmp_next[1] = NULL;
1367
1368 /* init original jump addresses */
1369 if (tb->tb_next_offset[0] != 0xffff)
1370 tb_reset_jump(tb, 0);
1371 if (tb->tb_next_offset[1] != 0xffff)
1372 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001373
1374#ifdef DEBUG_TB_CHECK
1375 tb_page_check();
1376#endif
pbrookc8a706f2008-06-02 16:16:42 +00001377 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001378}
1379
bellarda513fe12003-05-27 23:29:48 +00001380/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1381 tb[1].tc_ptr. Return NULL if not found */
1382TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1383{
1384 int m_min, m_max, m;
1385 unsigned long v;
1386 TranslationBlock *tb;
1387
1388 if (nb_tbs <= 0)
1389 return NULL;
1390 if (tc_ptr < (unsigned long)code_gen_buffer ||
1391 tc_ptr >= (unsigned long)code_gen_ptr)
1392 return NULL;
1393 /* binary search (cf Knuth) */
1394 m_min = 0;
1395 m_max = nb_tbs - 1;
1396 while (m_min <= m_max) {
1397 m = (m_min + m_max) >> 1;
1398 tb = &tbs[m];
1399 v = (unsigned long)tb->tc_ptr;
1400 if (v == tc_ptr)
1401 return tb;
1402 else if (tc_ptr < v) {
1403 m_max = m - 1;
1404 } else {
1405 m_min = m + 1;
1406 }
ths5fafdf22007-09-16 21:08:06 +00001407 }
bellarda513fe12003-05-27 23:29:48 +00001408 return &tbs[m_max];
1409}
bellard75012672003-06-21 13:11:07 +00001410
bellardea041c02003-06-25 16:16:50 +00001411static void tb_reset_jump_recursive(TranslationBlock *tb);
1412
1413static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1414{
1415 TranslationBlock *tb1, *tb_next, **ptb;
1416 unsigned int n1;
1417
1418 tb1 = tb->jmp_next[n];
1419 if (tb1 != NULL) {
1420 /* find head of list */
1421 for(;;) {
1422 n1 = (long)tb1 & 3;
1423 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1424 if (n1 == 2)
1425 break;
1426 tb1 = tb1->jmp_next[n1];
1427 }
1428 /* we are now sure now that tb jumps to tb1 */
1429 tb_next = tb1;
1430
1431 /* remove tb from the jmp_first list */
1432 ptb = &tb_next->jmp_first;
1433 for(;;) {
1434 tb1 = *ptb;
1435 n1 = (long)tb1 & 3;
1436 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1437 if (n1 == n && tb1 == tb)
1438 break;
1439 ptb = &tb1->jmp_next[n1];
1440 }
1441 *ptb = tb->jmp_next[n];
1442 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001443
bellardea041c02003-06-25 16:16:50 +00001444 /* suppress the jump to next tb in generated code */
1445 tb_reset_jump(tb, n);
1446
bellard01243112004-01-04 15:48:17 +00001447 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001448 tb_reset_jump_recursive(tb_next);
1449 }
1450}
1451
1452static void tb_reset_jump_recursive(TranslationBlock *tb)
1453{
1454 tb_reset_jump_recursive2(tb, 0);
1455 tb_reset_jump_recursive2(tb, 1);
1456}
1457
bellard1fddef42005-04-17 19:16:13 +00001458#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001459#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001460static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001461{
1462 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1463}
1464#else
Andreas Färber9349b4f2012-03-14 01:38:32 +01001465static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
bellardd720b932004-04-25 17:57:43 +00001466{
Anthony Liguoric227f092009-10-01 16:12:16 -05001467 target_phys_addr_t addr;
Anthony Liguoric227f092009-10-01 16:12:16 -05001468 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001469 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001470
pbrookc2f07f82006-04-08 17:14:56 +00001471 addr = cpu_get_phys_page_debug(env, pc);
Avi Kivity06ef3522012-02-13 16:11:22 +02001472 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001473 if (!(memory_region_is_ram(section->mr)
1474 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001475 return;
1476 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001477 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1478 + section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001479 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001480}
bellardc27004e2005-01-03 23:35:10 +00001481#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001482#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001483
Paul Brookc527ee82010-03-01 03:31:14 +00001484#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001485void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001486
1487{
1488}
1489
Andreas Färber9349b4f2012-03-14 01:38:32 +01001490int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001491 int flags, CPUWatchpoint **watchpoint)
1492{
1493 return -ENOSYS;
1494}
1495#else
pbrook6658ffb2007-03-16 23:58:11 +00001496/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001497int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001498 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001499{
aliguorib4051332008-11-18 20:14:20 +00001500 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001501 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001502
aliguorib4051332008-11-18 20:14:20 +00001503 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001504 if ((len & (len - 1)) || (addr & ~len_mask) ||
1505 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001506 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1507 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1508 return -EINVAL;
1509 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001510 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001511
aliguoria1d1bb32008-11-18 20:07:32 +00001512 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001513 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001514 wp->flags = flags;
1515
aliguori2dc9f412008-11-18 20:56:59 +00001516 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001517 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001518 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001519 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001520 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001521
pbrook6658ffb2007-03-16 23:58:11 +00001522 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001523
1524 if (watchpoint)
1525 *watchpoint = wp;
1526 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001527}
1528
aliguoria1d1bb32008-11-18 20:07:32 +00001529/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001530int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001531 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001532{
aliguorib4051332008-11-18 20:14:20 +00001533 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001534 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001535
Blue Swirl72cf2d42009-09-12 07:36:22 +00001536 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001537 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001538 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001539 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001540 return 0;
1541 }
1542 }
aliguoria1d1bb32008-11-18 20:07:32 +00001543 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001544}
1545
aliguoria1d1bb32008-11-18 20:07:32 +00001546/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001547void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001548{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001549 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001550
aliguoria1d1bb32008-11-18 20:07:32 +00001551 tlb_flush_page(env, watchpoint->vaddr);
1552
Anthony Liguori7267c092011-08-20 22:09:37 -05001553 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001554}
1555
aliguoria1d1bb32008-11-18 20:07:32 +00001556/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001557void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001558{
aliguoric0ce9982008-11-25 22:13:57 +00001559 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001560
Blue Swirl72cf2d42009-09-12 07:36:22 +00001561 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001562 if (wp->flags & mask)
1563 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001564 }
aliguoria1d1bb32008-11-18 20:07:32 +00001565}
Paul Brookc527ee82010-03-01 03:31:14 +00001566#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001567
1568/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001569int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001570 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001571{
bellard1fddef42005-04-17 19:16:13 +00001572#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001573 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001574
Anthony Liguori7267c092011-08-20 22:09:37 -05001575 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001576
1577 bp->pc = pc;
1578 bp->flags = flags;
1579
aliguori2dc9f412008-11-18 20:56:59 +00001580 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001581 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001582 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001583 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001584 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001585
1586 breakpoint_invalidate(env, pc);
1587
1588 if (breakpoint)
1589 *breakpoint = bp;
1590 return 0;
1591#else
1592 return -ENOSYS;
1593#endif
1594}
1595
1596/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001597int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001598{
1599#if defined(TARGET_HAS_ICE)
1600 CPUBreakpoint *bp;
1601
Blue Swirl72cf2d42009-09-12 07:36:22 +00001602 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001603 if (bp->pc == pc && bp->flags == flags) {
1604 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001605 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001606 }
bellard4c3a88a2003-07-26 12:06:08 +00001607 }
aliguoria1d1bb32008-11-18 20:07:32 +00001608 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001609#else
aliguoria1d1bb32008-11-18 20:07:32 +00001610 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001611#endif
1612}
1613
aliguoria1d1bb32008-11-18 20:07:32 +00001614/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001615void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001616{
bellard1fddef42005-04-17 19:16:13 +00001617#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001618 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001619
aliguoria1d1bb32008-11-18 20:07:32 +00001620 breakpoint_invalidate(env, breakpoint->pc);
1621
Anthony Liguori7267c092011-08-20 22:09:37 -05001622 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001623#endif
1624}
1625
1626/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001627void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001628{
1629#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001630 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001631
Blue Swirl72cf2d42009-09-12 07:36:22 +00001632 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001633 if (bp->flags & mask)
1634 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001635 }
bellard4c3a88a2003-07-26 12:06:08 +00001636#endif
1637}
1638
bellardc33a3462003-07-29 20:50:33 +00001639/* enable or disable single step mode. EXCP_DEBUG is returned by the
1640 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001641void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001642{
bellard1fddef42005-04-17 19:16:13 +00001643#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001644 if (env->singlestep_enabled != enabled) {
1645 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001646 if (kvm_enabled())
1647 kvm_update_guest_debug(env, 0);
1648 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001649 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001650 /* XXX: only flush what is necessary */
1651 tb_flush(env);
1652 }
bellardc33a3462003-07-29 20:50:33 +00001653 }
1654#endif
1655}
1656
bellard34865132003-10-05 14:28:56 +00001657/* enable or disable low levels log */
1658void cpu_set_log(int log_flags)
1659{
1660 loglevel = log_flags;
1661 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001662 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001663 if (!logfile) {
1664 perror(logfilename);
1665 _exit(1);
1666 }
bellard9fa3e852004-01-04 18:06:42 +00001667#if !defined(CONFIG_SOFTMMU)
1668 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1669 {
blueswir1b55266b2008-09-20 08:07:15 +00001670 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001671 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1672 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001673#elif defined(_WIN32)
1674 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1675 setvbuf(logfile, NULL, _IONBF, 0);
1676#else
bellard34865132003-10-05 14:28:56 +00001677 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001678#endif
pbrooke735b912007-06-30 13:53:24 +00001679 log_append = 1;
1680 }
1681 if (!loglevel && logfile) {
1682 fclose(logfile);
1683 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001684 }
1685}
1686
1687void cpu_set_log_filename(const char *filename)
1688{
1689 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001690 if (logfile) {
1691 fclose(logfile);
1692 logfile = NULL;
1693 }
1694 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001695}
bellardc33a3462003-07-29 20:50:33 +00001696
Andreas Färber9349b4f2012-03-14 01:38:32 +01001697static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001698{
pbrookd5975362008-06-07 20:50:51 +00001699 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1700 problem and hope the cpu will stop of its own accord. For userspace
1701 emulation this often isn't actually as bad as it sounds. Often
1702 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001703 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001704 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001705
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001706 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001707 tb = env->current_tb;
1708 /* if the cpu is currently executing code, we must unlink it and
1709 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001710 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001711 env->current_tb = NULL;
1712 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001713 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001714 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001715}
1716
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001717#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001718/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001719static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001720{
1721 int old_mask;
1722
1723 old_mask = env->interrupt_request;
1724 env->interrupt_request |= mask;
1725
aliguori8edac962009-04-24 18:03:45 +00001726 /*
1727 * If called from iothread context, wake the target cpu in
1728 * case its halted.
1729 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001730 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001731 qemu_cpu_kick(env);
1732 return;
1733 }
aliguori8edac962009-04-24 18:03:45 +00001734
pbrook2e70f6e2008-06-29 01:03:05 +00001735 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001736 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001737 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001738 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001739 cpu_abort(env, "Raised interrupt while not in I/O function");
1740 }
pbrook2e70f6e2008-06-29 01:03:05 +00001741 } else {
aurel323098dba2009-03-07 21:28:24 +00001742 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001743 }
1744}
1745
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001746CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1747
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001748#else /* CONFIG_USER_ONLY */
1749
Andreas Färber9349b4f2012-03-14 01:38:32 +01001750void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001751{
1752 env->interrupt_request |= mask;
1753 cpu_unlink_tb(env);
1754}
1755#endif /* CONFIG_USER_ONLY */
1756
Andreas Färber9349b4f2012-03-14 01:38:32 +01001757void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001758{
1759 env->interrupt_request &= ~mask;
1760}
1761
Andreas Färber9349b4f2012-03-14 01:38:32 +01001762void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001763{
1764 env->exit_request = 1;
1765 cpu_unlink_tb(env);
1766}
1767
blueswir1c7cd6a32008-10-02 18:27:46 +00001768const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001769 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001770 "show generated host assembly code for each compiled TB" },
1771 { CPU_LOG_TB_IN_ASM, "in_asm",
1772 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001773 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001774 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001775 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001776 "show micro ops "
1777#ifdef TARGET_I386
1778 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001779#endif
blueswir1e01a1152008-03-14 17:37:11 +00001780 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001781 { CPU_LOG_INT, "int",
1782 "show interrupts/exceptions in short format" },
1783 { CPU_LOG_EXEC, "exec",
1784 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001785 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001786 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001787#ifdef TARGET_I386
1788 { CPU_LOG_PCALL, "pcall",
1789 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001790 { CPU_LOG_RESET, "cpu_reset",
1791 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001792#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001793#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001794 { CPU_LOG_IOPORT, "ioport",
1795 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001796#endif
bellardf193c792004-03-21 17:06:25 +00001797 { 0, NULL, NULL },
1798};
1799
1800static int cmp1(const char *s1, int n, const char *s2)
1801{
1802 if (strlen(s2) != n)
1803 return 0;
1804 return memcmp(s1, s2, n) == 0;
1805}
ths3b46e622007-09-17 08:09:54 +00001806
bellardf193c792004-03-21 17:06:25 +00001807/* takes a comma separated list of log masks. Return 0 if error. */
1808int cpu_str_to_log_mask(const char *str)
1809{
blueswir1c7cd6a32008-10-02 18:27:46 +00001810 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001811 int mask;
1812 const char *p, *p1;
1813
1814 p = str;
1815 mask = 0;
1816 for(;;) {
1817 p1 = strchr(p, ',');
1818 if (!p1)
1819 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001820 if(cmp1(p,p1-p,"all")) {
1821 for(item = cpu_log_items; item->mask != 0; item++) {
1822 mask |= item->mask;
1823 }
1824 } else {
1825 for(item = cpu_log_items; item->mask != 0; item++) {
1826 if (cmp1(p, p1 - p, item->name))
1827 goto found;
1828 }
1829 return 0;
bellardf193c792004-03-21 17:06:25 +00001830 }
bellardf193c792004-03-21 17:06:25 +00001831 found:
1832 mask |= item->mask;
1833 if (*p1 != ',')
1834 break;
1835 p = p1 + 1;
1836 }
1837 return mask;
1838}
bellardea041c02003-06-25 16:16:50 +00001839
Andreas Färber9349b4f2012-03-14 01:38:32 +01001840void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001841{
1842 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001843 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001844
1845 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001846 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001847 fprintf(stderr, "qemu: fatal: ");
1848 vfprintf(stderr, fmt, ap);
1849 fprintf(stderr, "\n");
1850#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001851 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1852#else
1853 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001854#endif
aliguori93fcfe32009-01-15 22:34:14 +00001855 if (qemu_log_enabled()) {
1856 qemu_log("qemu: fatal: ");
1857 qemu_log_vprintf(fmt, ap2);
1858 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001859#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001860 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001861#else
aliguori93fcfe32009-01-15 22:34:14 +00001862 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001863#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001864 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001865 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001866 }
pbrook493ae1f2007-11-23 16:53:59 +00001867 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001868 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001869#if defined(CONFIG_USER_ONLY)
1870 {
1871 struct sigaction act;
1872 sigfillset(&act.sa_mask);
1873 act.sa_handler = SIG_DFL;
1874 sigaction(SIGABRT, &act, NULL);
1875 }
1876#endif
bellard75012672003-06-21 13:11:07 +00001877 abort();
1878}
1879
Andreas Färber9349b4f2012-03-14 01:38:32 +01001880CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001881{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001882 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1883 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001884 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001885#if defined(TARGET_HAS_ICE)
1886 CPUBreakpoint *bp;
1887 CPUWatchpoint *wp;
1888#endif
1889
Andreas Färber9349b4f2012-03-14 01:38:32 +01001890 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001891
1892 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001893 new_env->next_cpu = next_cpu;
1894 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001895
1896 /* Clone all break/watchpoints.
1897 Note: Once we support ptrace with hw-debug register access, make sure
1898 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001899 QTAILQ_INIT(&env->breakpoints);
1900 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001901#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001902 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001903 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1904 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001905 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001906 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1907 wp->flags, NULL);
1908 }
1909#endif
1910
thsc5be9f02007-02-28 20:20:53 +00001911 return new_env;
1912}
1913
bellard01243112004-01-04 15:48:17 +00001914#if !defined(CONFIG_USER_ONLY)
1915
Andreas Färber9349b4f2012-03-14 01:38:32 +01001916static inline void tlb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001917{
1918 unsigned int i;
1919
1920 /* Discard jump cache entries for any tb which might potentially
1921 overlap the flushed page. */
1922 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1923 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001924 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001925
1926 i = tb_jmp_cache_hash_page(addr);
1927 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001928 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001929}
1930
Igor Kovalenko08738982009-07-12 02:15:40 +04001931static CPUTLBEntry s_cputlb_empty_entry = {
1932 .addr_read = -1,
1933 .addr_write = -1,
1934 .addr_code = -1,
1935 .addend = -1,
1936};
1937
Peter Maydell771124e2012-01-17 13:23:13 +00001938/* NOTE:
1939 * If flush_global is true (the usual case), flush all tlb entries.
1940 * If flush_global is false, flush (at least) all tlb entries not
1941 * marked global.
1942 *
1943 * Since QEMU doesn't currently implement a global/not-global flag
1944 * for tlb entries, at the moment tlb_flush() will also flush all
1945 * tlb entries in the flush_global == false case. This is OK because
1946 * CPU architectures generally permit an implementation to drop
1947 * entries from the TLB at any time, so flushing more entries than
1948 * required is only an efficiency issue, not a correctness issue.
1949 */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001950void tlb_flush(CPUArchState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001951{
bellard33417e72003-08-10 21:47:01 +00001952 int i;
bellard01243112004-01-04 15:48:17 +00001953
bellard9fa3e852004-01-04 18:06:42 +00001954#if defined(DEBUG_TLB)
1955 printf("tlb_flush:\n");
1956#endif
bellard01243112004-01-04 15:48:17 +00001957 /* must reset current TB so that interrupts cannot modify the
1958 links while we are modifying them */
1959 env->current_tb = NULL;
1960
bellard33417e72003-08-10 21:47:01 +00001961 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001962 int mmu_idx;
1963 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001964 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001965 }
bellard33417e72003-08-10 21:47:01 +00001966 }
bellard9fa3e852004-01-04 18:06:42 +00001967
bellard8a40a182005-11-20 10:35:40 +00001968 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001969
Paul Brookd4c430a2010-03-17 02:14:28 +00001970 env->tlb_flush_addr = -1;
1971 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001972 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001973}
1974
bellard274da6b2004-05-20 21:56:27 +00001975static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001976{
ths5fafdf22007-09-16 21:08:06 +00001977 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001978 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001979 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001980 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001981 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001982 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001983 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001984 }
bellard61382a52003-10-27 21:22:23 +00001985}
1986
Andreas Färber9349b4f2012-03-14 01:38:32 +01001987void tlb_flush_page(CPUArchState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001988{
bellard8a40a182005-11-20 10:35:40 +00001989 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001990 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001991
bellard9fa3e852004-01-04 18:06:42 +00001992#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001993 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001994#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001995 /* Check if we need to flush due to large pages. */
1996 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1997#if defined(DEBUG_TLB)
1998 printf("tlb_flush_page: forced full flush ("
1999 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2000 env->tlb_flush_addr, env->tlb_flush_mask);
2001#endif
2002 tlb_flush(env, 1);
2003 return;
2004 }
bellard01243112004-01-04 15:48:17 +00002005 /* must reset current TB so that interrupts cannot modify the
2006 links while we are modifying them */
2007 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002008
bellard61382a52003-10-27 21:22:23 +00002009 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002010 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002011 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2012 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002013
edgar_igl5c751e92008-05-06 08:44:21 +00002014 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002015}
2016
bellard9fa3e852004-01-04 18:06:42 +00002017/* update the TLBs so that writes to code in the virtual page 'addr'
2018 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002019static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002020{
ths5fafdf22007-09-16 21:08:06 +00002021 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002022 ram_addr + TARGET_PAGE_SIZE,
2023 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002024}
2025
bellard9fa3e852004-01-04 18:06:42 +00002026/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002027 tested for self modifying code */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002028static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002029 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002030{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002031 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002032}
2033
ths5fafdf22007-09-16 21:08:06 +00002034static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002035 unsigned long start, unsigned long length)
2036{
2037 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002038 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00002039 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002040 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002041 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002042 }
2043 }
2044}
2045
pbrook5579c7f2009-04-11 14:47:08 +00002046/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002047void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002048 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002049{
Andreas Färber9349b4f2012-03-14 01:38:32 +01002050 CPUArchState *env;
bellard4f2ac232004-04-26 19:44:02 +00002051 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002052 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002053
2054 start &= TARGET_PAGE_MASK;
2055 end = TARGET_PAGE_ALIGN(end);
2056
2057 length = end - start;
2058 if (length == 0)
2059 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002060 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002061
bellard1ccde1c2004-02-06 19:46:14 +00002062 /* we modify the TLB cache so that the dirty bit will be set again
2063 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002064 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002065 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002066 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002067 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002068 != (end - 1) - start) {
2069 abort();
2070 }
2071
bellard6a00d602005-11-21 23:25:50 +00002072 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002073 int mmu_idx;
2074 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2075 for(i = 0; i < CPU_TLB_SIZE; i++)
2076 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2077 start1, length);
2078 }
bellard6a00d602005-11-21 23:25:50 +00002079 }
bellard1ccde1c2004-02-06 19:46:14 +00002080}
2081
aliguori74576192008-10-06 14:02:03 +00002082int cpu_physical_memory_set_dirty_tracking(int enable)
2083{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002084 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002085 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002086 return ret;
aliguori74576192008-10-06 14:02:03 +00002087}
2088
bellard3a7d9292005-08-21 09:26:42 +00002089static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2090{
Anthony Liguoric227f092009-10-01 16:12:16 -05002091 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002092 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002093
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002094 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002095 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2096 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002097 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002098 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002099 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002100 }
2101 }
2102}
2103
2104/* update the TLB according to the current state of the dirty bits */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002105void cpu_tlb_update_dirty(CPUArchState *env)
bellard3a7d9292005-08-21 09:26:42 +00002106{
2107 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002108 int mmu_idx;
2109 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2110 for(i = 0; i < CPU_TLB_SIZE; i++)
2111 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2112 }
bellard3a7d9292005-08-21 09:26:42 +00002113}
2114
pbrook0f459d12008-06-09 00:20:13 +00002115static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002116{
pbrook0f459d12008-06-09 00:20:13 +00002117 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2118 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002119}
2120
pbrook0f459d12008-06-09 00:20:13 +00002121/* update the TLB corresponding to virtual page vaddr
2122 so that it is no longer dirty */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002123static inline void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002124{
bellard1ccde1c2004-02-06 19:46:14 +00002125 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002126 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002127
pbrook0f459d12008-06-09 00:20:13 +00002128 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002129 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002130 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2131 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002132}
2133
Paul Brookd4c430a2010-03-17 02:14:28 +00002134/* Our TLB does not support large pages, so remember the area covered by
2135 large pages and trigger a full TLB flush if these are invalidated. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002136static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
Paul Brookd4c430a2010-03-17 02:14:28 +00002137 target_ulong size)
2138{
2139 target_ulong mask = ~(size - 1);
2140
2141 if (env->tlb_flush_addr == (target_ulong)-1) {
2142 env->tlb_flush_addr = vaddr & mask;
2143 env->tlb_flush_mask = mask;
2144 return;
2145 }
2146 /* Extend the existing region to include the new page.
2147 This is a compromise between unnecessary flushes and the cost
2148 of maintaining a full variable size TLB. */
2149 mask &= env->tlb_flush_mask;
2150 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2151 mask <<= 1;
2152 }
2153 env->tlb_flush_addr &= mask;
2154 env->tlb_flush_mask = mask;
2155}
2156
Avi Kivity06ef3522012-02-13 16:11:22 +02002157static bool is_ram_rom(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002158{
Avi Kivity06ef3522012-02-13 16:11:22 +02002159 return memory_region_is_ram(s->mr);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002160}
2161
Avi Kivity06ef3522012-02-13 16:11:22 +02002162static bool is_romd(MemoryRegionSection *s)
Avi Kivity75c578d2012-01-02 15:40:52 +02002163{
Avi Kivity06ef3522012-02-13 16:11:22 +02002164 MemoryRegion *mr = s->mr;
Avi Kivity75c578d2012-01-02 15:40:52 +02002165
Avi Kivity75c578d2012-01-02 15:40:52 +02002166 return mr->rom_device && mr->readable;
2167}
2168
Avi Kivity06ef3522012-02-13 16:11:22 +02002169static bool is_ram_rom_romd(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002170{
Avi Kivity06ef3522012-02-13 16:11:22 +02002171 return is_ram_rom(s) || is_romd(s);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002172}
2173
Paul Brookd4c430a2010-03-17 02:14:28 +00002174/* Add a new TLB entry. At most one entry for a given virtual address
2175 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2176 supplied size is only used by tlb_flush_page. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002177void tlb_set_page(CPUArchState *env, target_ulong vaddr,
Paul Brookd4c430a2010-03-17 02:14:28 +00002178 target_phys_addr_t paddr, int prot,
2179 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002180{
Avi Kivityf3705d52012-03-08 16:16:34 +02002181 MemoryRegionSection *section;
bellard9fa3e852004-01-04 18:06:42 +00002182 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002183 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002184 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002185 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002186 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002187 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002188 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002189
Paul Brookd4c430a2010-03-17 02:14:28 +00002190 assert(size >= TARGET_PAGE_SIZE);
2191 if (size != TARGET_PAGE_SIZE) {
2192 tlb_add_large_page(env, vaddr, size);
2193 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002194 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002195#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002196 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2197 " prot=%x idx=%d pd=0x%08lx\n",
2198 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002199#endif
2200
pbrook0f459d12008-06-09 00:20:13 +00002201 address = vaddr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002202 if (!is_ram_rom_romd(section)) {
pbrook0f459d12008-06-09 00:20:13 +00002203 /* IO memory case (romd handled later) */
2204 address |= TLB_MMIO;
2205 }
Avi Kivityf3705d52012-03-08 16:16:34 +02002206 if (is_ram_rom_romd(section)) {
2207 addend = (unsigned long)memory_region_get_ram_ptr(section->mr)
2208 + section_addr(section, paddr);
Avi Kivity06ef3522012-02-13 16:11:22 +02002209 } else {
2210 addend = 0;
2211 }
Avi Kivityf3705d52012-03-08 16:16:34 +02002212 if (is_ram_rom(section)) {
pbrook0f459d12008-06-09 00:20:13 +00002213 /* Normal RAM. */
Avi Kivityf3705d52012-03-08 16:16:34 +02002214 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2215 + section_addr(section, paddr);
2216 if (!section->readonly)
Avi Kivityaa102232012-03-08 17:06:55 +02002217 iotlb |= phys_section_notdirty;
pbrook0f459d12008-06-09 00:20:13 +00002218 else
Avi Kivityaa102232012-03-08 17:06:55 +02002219 iotlb |= phys_section_rom;
pbrook0f459d12008-06-09 00:20:13 +00002220 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002221 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002222 It would be nice to pass an offset from the base address
2223 of that region. This would avoid having to special case RAM,
2224 and avoid full address decoding in every device.
2225 We can't use the high bits of pd for this because
2226 IO_MEM_ROMD uses these as a ram address. */
Avi Kivityaa102232012-03-08 17:06:55 +02002227 iotlb = section - phys_sections;
Avi Kivityf3705d52012-03-08 16:16:34 +02002228 iotlb += section_addr(section, paddr);
pbrook0f459d12008-06-09 00:20:13 +00002229 }
pbrook6658ffb2007-03-16 23:58:11 +00002230
pbrook0f459d12008-06-09 00:20:13 +00002231 code_address = address;
2232 /* Make accesses to pages with watchpoints go via the
2233 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002234 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002235 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002236 /* Avoid trapping reads of pages with a write breakpoint. */
2237 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivityaa102232012-03-08 17:06:55 +02002238 iotlb = phys_section_watch + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002239 address |= TLB_MMIO;
2240 break;
2241 }
pbrook6658ffb2007-03-16 23:58:11 +00002242 }
pbrook0f459d12008-06-09 00:20:13 +00002243 }
balrogd79acba2007-06-26 20:01:13 +00002244
pbrook0f459d12008-06-09 00:20:13 +00002245 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2246 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2247 te = &env->tlb_table[mmu_idx][index];
2248 te->addend = addend - vaddr;
2249 if (prot & PAGE_READ) {
2250 te->addr_read = address;
2251 } else {
2252 te->addr_read = -1;
2253 }
edgar_igl5c751e92008-05-06 08:44:21 +00002254
pbrook0f459d12008-06-09 00:20:13 +00002255 if (prot & PAGE_EXEC) {
2256 te->addr_code = code_address;
2257 } else {
2258 te->addr_code = -1;
2259 }
2260 if (prot & PAGE_WRITE) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002261 if ((memory_region_is_ram(section->mr) && section->readonly)
2262 || is_romd(section)) {
pbrook0f459d12008-06-09 00:20:13 +00002263 /* Write access calls the I/O callback. */
2264 te->addr_write = address | TLB_MMIO;
Avi Kivityf3705d52012-03-08 16:16:34 +02002265 } else if (memory_region_is_ram(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002266 && !cpu_physical_memory_is_dirty(
Avi Kivityf3705d52012-03-08 16:16:34 +02002267 section->mr->ram_addr
2268 + section_addr(section, paddr))) {
pbrook0f459d12008-06-09 00:20:13 +00002269 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002270 } else {
pbrook0f459d12008-06-09 00:20:13 +00002271 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002272 }
pbrook0f459d12008-06-09 00:20:13 +00002273 } else {
2274 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002275 }
bellard9fa3e852004-01-04 18:06:42 +00002276}
2277
bellard01243112004-01-04 15:48:17 +00002278#else
2279
Andreas Färber9349b4f2012-03-14 01:38:32 +01002280void tlb_flush(CPUArchState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002281{
2282}
2283
Andreas Färber9349b4f2012-03-14 01:38:32 +01002284void tlb_flush_page(CPUArchState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002285{
2286}
2287
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002288/*
2289 * Walks guest process memory "regions" one by one
2290 * and calls callback function 'fn' for each region.
2291 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002292
2293struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002294{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002295 walk_memory_regions_fn fn;
2296 void *priv;
2297 unsigned long start;
2298 int prot;
2299};
bellard9fa3e852004-01-04 18:06:42 +00002300
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002301static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002302 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002303{
2304 if (data->start != -1ul) {
2305 int rc = data->fn(data->priv, data->start, end, data->prot);
2306 if (rc != 0) {
2307 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002308 }
bellard33417e72003-08-10 21:47:01 +00002309 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002310
2311 data->start = (new_prot ? end : -1ul);
2312 data->prot = new_prot;
2313
2314 return 0;
2315}
2316
2317static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002318 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002319{
Paul Brookb480d9b2010-03-12 23:23:29 +00002320 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002321 int i, rc;
2322
2323 if (*lp == NULL) {
2324 return walk_memory_regions_end(data, base, 0);
2325 }
2326
2327 if (level == 0) {
2328 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002329 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002330 int prot = pd[i].flags;
2331
2332 pa = base | (i << TARGET_PAGE_BITS);
2333 if (prot != data->prot) {
2334 rc = walk_memory_regions_end(data, pa, prot);
2335 if (rc != 0) {
2336 return rc;
2337 }
2338 }
2339 }
2340 } else {
2341 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002342 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002343 pa = base | ((abi_ulong)i <<
2344 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002345 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2346 if (rc != 0) {
2347 return rc;
2348 }
2349 }
2350 }
2351
2352 return 0;
2353}
2354
2355int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2356{
2357 struct walk_memory_regions_data data;
2358 unsigned long i;
2359
2360 data.fn = fn;
2361 data.priv = priv;
2362 data.start = -1ul;
2363 data.prot = 0;
2364
2365 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002366 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002367 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2368 if (rc != 0) {
2369 return rc;
2370 }
2371 }
2372
2373 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002374}
2375
Paul Brookb480d9b2010-03-12 23:23:29 +00002376static int dump_region(void *priv, abi_ulong start,
2377 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002378{
2379 FILE *f = (FILE *)priv;
2380
Paul Brookb480d9b2010-03-12 23:23:29 +00002381 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2382 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002383 start, end, end - start,
2384 ((prot & PAGE_READ) ? 'r' : '-'),
2385 ((prot & PAGE_WRITE) ? 'w' : '-'),
2386 ((prot & PAGE_EXEC) ? 'x' : '-'));
2387
2388 return (0);
2389}
2390
2391/* dump memory mappings */
2392void page_dump(FILE *f)
2393{
2394 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2395 "start", "end", "size", "prot");
2396 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002397}
2398
pbrook53a59602006-03-25 19:31:22 +00002399int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002400{
bellard9fa3e852004-01-04 18:06:42 +00002401 PageDesc *p;
2402
2403 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002404 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002405 return 0;
2406 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002407}
2408
Richard Henderson376a7902010-03-10 15:57:04 -08002409/* Modify the flags of a page and invalidate the code if necessary.
2410 The flag PAGE_WRITE_ORG is positioned automatically depending
2411 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002412void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002413{
Richard Henderson376a7902010-03-10 15:57:04 -08002414 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002415
Richard Henderson376a7902010-03-10 15:57:04 -08002416 /* This function should never be called with addresses outside the
2417 guest address space. If this assert fires, it probably indicates
2418 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002419#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2420 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002421#endif
2422 assert(start < end);
2423
bellard9fa3e852004-01-04 18:06:42 +00002424 start = start & TARGET_PAGE_MASK;
2425 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002426
2427 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002428 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002429 }
2430
2431 for (addr = start, len = end - start;
2432 len != 0;
2433 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2434 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2435
2436 /* If the write protection bit is set, then we invalidate
2437 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002438 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002439 (flags & PAGE_WRITE) &&
2440 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002441 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002442 }
2443 p->flags = flags;
2444 }
bellard9fa3e852004-01-04 18:06:42 +00002445}
2446
ths3d97b402007-11-02 19:02:07 +00002447int page_check_range(target_ulong start, target_ulong len, int flags)
2448{
2449 PageDesc *p;
2450 target_ulong end;
2451 target_ulong addr;
2452
Richard Henderson376a7902010-03-10 15:57:04 -08002453 /* This function should never be called with addresses outside the
2454 guest address space. If this assert fires, it probably indicates
2455 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002456#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2457 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002458#endif
2459
Richard Henderson3e0650a2010-03-29 10:54:42 -07002460 if (len == 0) {
2461 return 0;
2462 }
Richard Henderson376a7902010-03-10 15:57:04 -08002463 if (start + len - 1 < start) {
2464 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002465 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002466 }
balrog55f280c2008-10-28 10:24:11 +00002467
ths3d97b402007-11-02 19:02:07 +00002468 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2469 start = start & TARGET_PAGE_MASK;
2470
Richard Henderson376a7902010-03-10 15:57:04 -08002471 for (addr = start, len = end - start;
2472 len != 0;
2473 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002474 p = page_find(addr >> TARGET_PAGE_BITS);
2475 if( !p )
2476 return -1;
2477 if( !(p->flags & PAGE_VALID) )
2478 return -1;
2479
bellarddae32702007-11-14 10:51:00 +00002480 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002481 return -1;
bellarddae32702007-11-14 10:51:00 +00002482 if (flags & PAGE_WRITE) {
2483 if (!(p->flags & PAGE_WRITE_ORG))
2484 return -1;
2485 /* unprotect the page if it was put read-only because it
2486 contains translated code */
2487 if (!(p->flags & PAGE_WRITE)) {
2488 if (!page_unprotect(addr, 0, NULL))
2489 return -1;
2490 }
2491 return 0;
2492 }
ths3d97b402007-11-02 19:02:07 +00002493 }
2494 return 0;
2495}
2496
bellard9fa3e852004-01-04 18:06:42 +00002497/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002498 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002499int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002500{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002501 unsigned int prot;
2502 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002503 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002504
pbrookc8a706f2008-06-02 16:16:42 +00002505 /* Technically this isn't safe inside a signal handler. However we
2506 know this only ever happens in a synchronous SEGV handler, so in
2507 practice it seems to be ok. */
2508 mmap_lock();
2509
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002510 p = page_find(address >> TARGET_PAGE_BITS);
2511 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002512 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002513 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002514 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002515
bellard9fa3e852004-01-04 18:06:42 +00002516 /* if the page was really writable, then we change its
2517 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002518 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2519 host_start = address & qemu_host_page_mask;
2520 host_end = host_start + qemu_host_page_size;
2521
2522 prot = 0;
2523 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2524 p = page_find(addr >> TARGET_PAGE_BITS);
2525 p->flags |= PAGE_WRITE;
2526 prot |= p->flags;
2527
bellard9fa3e852004-01-04 18:06:42 +00002528 /* and since the content will be modified, we must invalidate
2529 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002530 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002531#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002532 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002533#endif
bellard9fa3e852004-01-04 18:06:42 +00002534 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002535 mprotect((void *)g2h(host_start), qemu_host_page_size,
2536 prot & PAGE_BITS);
2537
2538 mmap_unlock();
2539 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002540 }
pbrookc8a706f2008-06-02 16:16:42 +00002541 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002542 return 0;
2543}
2544
Andreas Färber9349b4f2012-03-14 01:38:32 +01002545static inline void tlb_set_dirty(CPUArchState *env,
bellard6a00d602005-11-21 23:25:50 +00002546 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002547{
2548}
bellard9fa3e852004-01-04 18:06:42 +00002549#endif /* defined(CONFIG_USER_ONLY) */
2550
pbrooke2eef172008-06-08 01:09:01 +00002551#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002552
Paul Brookc04b2b72010-03-01 03:31:14 +00002553#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2554typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002555 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002556 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002557 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002558} subpage_t;
2559
Anthony Liguoric227f092009-10-01 16:12:16 -05002560static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002561 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002562static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002563static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002564{
Avi Kivity5312bd82012-02-12 18:32:55 +02002565 MemoryRegionSection *section = &phys_sections[section_index];
2566 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002567
2568 if (mr->subpage) {
2569 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2570 memory_region_destroy(&subpage->iomem);
2571 g_free(subpage);
2572 }
2573}
2574
Avi Kivity4346ae32012-02-10 17:00:01 +02002575static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002576{
2577 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002578 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002579
Avi Kivityc19e8802012-02-13 20:25:31 +02002580 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002581 return;
2582 }
2583
Avi Kivityc19e8802012-02-13 20:25:31 +02002584 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002585 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002586 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002587 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002588 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002589 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002590 }
Avi Kivity54688b12012-02-09 17:34:32 +02002591 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002592 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002593 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002594}
2595
2596static void destroy_all_mappings(void)
2597{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002598 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002599 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002600}
2601
Avi Kivity5312bd82012-02-12 18:32:55 +02002602static uint16_t phys_section_add(MemoryRegionSection *section)
2603{
2604 if (phys_sections_nb == phys_sections_nb_alloc) {
2605 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2606 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2607 phys_sections_nb_alloc);
2608 }
2609 phys_sections[phys_sections_nb] = *section;
2610 return phys_sections_nb++;
2611}
2612
2613static void phys_sections_clear(void)
2614{
2615 phys_sections_nb = 0;
2616}
2617
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002618/* register physical memory.
2619 For RAM, 'size' must be a multiple of the target page size.
2620 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002621 io memory page. The address used when calling the IO function is
2622 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002623 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002624 before calculating this offset. This should not be a problem unless
2625 the low bits of start_addr and region_offset differ. */
Avi Kivity0f0cb162012-02-13 17:14:32 +02002626static void register_subpage(MemoryRegionSection *section)
2627{
2628 subpage_t *subpage;
2629 target_phys_addr_t base = section->offset_within_address_space
2630 & TARGET_PAGE_MASK;
Avi Kivityf3705d52012-03-08 16:16:34 +02002631 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002632 MemoryRegionSection subsection = {
2633 .offset_within_address_space = base,
2634 .size = TARGET_PAGE_SIZE,
2635 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002636 target_phys_addr_t start, end;
2637
Avi Kivityf3705d52012-03-08 16:16:34 +02002638 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002639
Avi Kivityf3705d52012-03-08 16:16:34 +02002640 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002641 subpage = subpage_init(base);
2642 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002643 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2644 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002645 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002646 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002647 }
2648 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2649 end = start + section->size;
2650 subpage_register(subpage, start, end, phys_section_add(section));
2651}
2652
2653
2654static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002655{
Avi Kivitydd811242012-01-02 12:17:03 +02002656 target_phys_addr_t start_addr = section->offset_within_address_space;
2657 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002658 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002659 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002660
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002661 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002662
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002663 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002664 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2665 section_index);
bellard33417e72003-08-10 21:47:01 +00002666}
2667
Avi Kivity0f0cb162012-02-13 17:14:32 +02002668void cpu_register_physical_memory_log(MemoryRegionSection *section,
2669 bool readonly)
2670{
2671 MemoryRegionSection now = *section, remain = *section;
2672
2673 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2674 || (now.size < TARGET_PAGE_SIZE)) {
2675 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2676 - now.offset_within_address_space,
2677 now.size);
2678 register_subpage(&now);
2679 remain.size -= now.size;
2680 remain.offset_within_address_space += now.size;
2681 remain.offset_within_region += now.size;
2682 }
2683 now = remain;
2684 now.size &= TARGET_PAGE_MASK;
2685 if (now.size) {
2686 register_multipage(&now);
2687 remain.size -= now.size;
2688 remain.offset_within_address_space += now.size;
2689 remain.offset_within_region += now.size;
2690 }
2691 now = remain;
2692 if (now.size) {
2693 register_subpage(&now);
2694 }
2695}
2696
2697
Anthony Liguoric227f092009-10-01 16:12:16 -05002698void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002699{
2700 if (kvm_enabled())
2701 kvm_coalesce_mmio_region(addr, size);
2702}
2703
Anthony Liguoric227f092009-10-01 16:12:16 -05002704void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002705{
2706 if (kvm_enabled())
2707 kvm_uncoalesce_mmio_region(addr, size);
2708}
2709
Sheng Yang62a27442010-01-26 19:21:16 +08002710void qemu_flush_coalesced_mmio_buffer(void)
2711{
2712 if (kvm_enabled())
2713 kvm_flush_coalesced_mmio_buffer();
2714}
2715
Marcelo Tosattic9027602010-03-01 20:25:08 -03002716#if defined(__linux__) && !defined(TARGET_S390X)
2717
2718#include <sys/vfs.h>
2719
2720#define HUGETLBFS_MAGIC 0x958458f6
2721
2722static long gethugepagesize(const char *path)
2723{
2724 struct statfs fs;
2725 int ret;
2726
2727 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002728 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002729 } while (ret != 0 && errno == EINTR);
2730
2731 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002732 perror(path);
2733 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002734 }
2735
2736 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002737 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002738
2739 return fs.f_bsize;
2740}
2741
Alex Williamson04b16652010-07-02 11:13:17 -06002742static void *file_ram_alloc(RAMBlock *block,
2743 ram_addr_t memory,
2744 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002745{
2746 char *filename;
2747 void *area;
2748 int fd;
2749#ifdef MAP_POPULATE
2750 int flags;
2751#endif
2752 unsigned long hpagesize;
2753
2754 hpagesize = gethugepagesize(path);
2755 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002756 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002757 }
2758
2759 if (memory < hpagesize) {
2760 return NULL;
2761 }
2762
2763 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2764 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2765 return NULL;
2766 }
2767
2768 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002769 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002770 }
2771
2772 fd = mkstemp(filename);
2773 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002774 perror("unable to create backing store for hugepages");
2775 free(filename);
2776 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002777 }
2778 unlink(filename);
2779 free(filename);
2780
2781 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2782
2783 /*
2784 * ftruncate is not supported by hugetlbfs in older
2785 * hosts, so don't bother bailing out on errors.
2786 * If anything goes wrong with it under other filesystems,
2787 * mmap will fail.
2788 */
2789 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002790 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002791
2792#ifdef MAP_POPULATE
2793 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2794 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2795 * to sidestep this quirk.
2796 */
2797 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2798 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2799#else
2800 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2801#endif
2802 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002803 perror("file_ram_alloc: can't mmap RAM pages");
2804 close(fd);
2805 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002806 }
Alex Williamson04b16652010-07-02 11:13:17 -06002807 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002808 return area;
2809}
2810#endif
2811
Alex Williamsond17b5282010-06-25 11:08:38 -06002812static ram_addr_t find_ram_offset(ram_addr_t size)
2813{
Alex Williamson04b16652010-07-02 11:13:17 -06002814 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002815 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002816
2817 if (QLIST_EMPTY(&ram_list.blocks))
2818 return 0;
2819
2820 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002821 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002822
2823 end = block->offset + block->length;
2824
2825 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2826 if (next_block->offset >= end) {
2827 next = MIN(next, next_block->offset);
2828 }
2829 }
2830 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002831 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002832 mingap = next - end;
2833 }
2834 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002835
2836 if (offset == RAM_ADDR_MAX) {
2837 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2838 (uint64_t)size);
2839 abort();
2840 }
2841
Alex Williamson04b16652010-07-02 11:13:17 -06002842 return offset;
2843}
2844
2845static ram_addr_t last_ram_offset(void)
2846{
Alex Williamsond17b5282010-06-25 11:08:38 -06002847 RAMBlock *block;
2848 ram_addr_t last = 0;
2849
2850 QLIST_FOREACH(block, &ram_list.blocks, next)
2851 last = MAX(last, block->offset + block->length);
2852
2853 return last;
2854}
2855
Avi Kivityc5705a72011-12-20 15:59:12 +02002856void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002857{
2858 RAMBlock *new_block, *block;
2859
Avi Kivityc5705a72011-12-20 15:59:12 +02002860 new_block = NULL;
2861 QLIST_FOREACH(block, &ram_list.blocks, next) {
2862 if (block->offset == addr) {
2863 new_block = block;
2864 break;
2865 }
2866 }
2867 assert(new_block);
2868 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002869
2870 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2871 char *id = dev->parent_bus->info->get_dev_path(dev);
2872 if (id) {
2873 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002874 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002875 }
2876 }
2877 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2878
2879 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002880 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002881 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2882 new_block->idstr);
2883 abort();
2884 }
2885 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002886}
2887
2888ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2889 MemoryRegion *mr)
2890{
2891 RAMBlock *new_block;
2892
2893 size = TARGET_PAGE_ALIGN(size);
2894 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002895
Avi Kivity7c637362011-12-21 13:09:49 +02002896 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002897 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002898 if (host) {
2899 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002900 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002901 } else {
2902 if (mem_path) {
2903#if defined (__linux__) && !defined(TARGET_S390X)
2904 new_block->host = file_ram_alloc(new_block, size, mem_path);
2905 if (!new_block->host) {
2906 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002907 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002908 }
2909#else
2910 fprintf(stderr, "-mem-path option unsupported\n");
2911 exit(1);
2912#endif
2913 } else {
2914#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002915 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2916 an system defined value, which is at least 256GB. Larger systems
2917 have larger values. We put the guest between the end of data
2918 segment (system break) and this value. We use 32GB as a base to
2919 have enough room for the system break to grow. */
2920 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002921 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002922 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002923 if (new_block->host == MAP_FAILED) {
2924 fprintf(stderr, "Allocating RAM failed\n");
2925 abort();
2926 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002927#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002928 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002929 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002930 } else {
2931 new_block->host = qemu_vmalloc(size);
2932 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002933#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002934 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002935 }
2936 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002937 new_block->length = size;
2938
2939 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2940
Anthony Liguori7267c092011-08-20 22:09:37 -05002941 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002942 last_ram_offset() >> TARGET_PAGE_BITS);
2943 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2944 0xff, size >> TARGET_PAGE_BITS);
2945
2946 if (kvm_enabled())
2947 kvm_setup_guest_memory(new_block->host, size);
2948
2949 return new_block->offset;
2950}
2951
Avi Kivityc5705a72011-12-20 15:59:12 +02002952ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002953{
Avi Kivityc5705a72011-12-20 15:59:12 +02002954 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002955}
bellarde9a1ab12007-02-08 23:08:38 +00002956
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002957void qemu_ram_free_from_ptr(ram_addr_t addr)
2958{
2959 RAMBlock *block;
2960
2961 QLIST_FOREACH(block, &ram_list.blocks, next) {
2962 if (addr == block->offset) {
2963 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002964 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002965 return;
2966 }
2967 }
2968}
2969
Anthony Liguoric227f092009-10-01 16:12:16 -05002970void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002971{
Alex Williamson04b16652010-07-02 11:13:17 -06002972 RAMBlock *block;
2973
2974 QLIST_FOREACH(block, &ram_list.blocks, next) {
2975 if (addr == block->offset) {
2976 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002977 if (block->flags & RAM_PREALLOC_MASK) {
2978 ;
2979 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002980#if defined (__linux__) && !defined(TARGET_S390X)
2981 if (block->fd) {
2982 munmap(block->host, block->length);
2983 close(block->fd);
2984 } else {
2985 qemu_vfree(block->host);
2986 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002987#else
2988 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002989#endif
2990 } else {
2991#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2992 munmap(block->host, block->length);
2993#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002994 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002995 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002996 } else {
2997 qemu_vfree(block->host);
2998 }
Alex Williamson04b16652010-07-02 11:13:17 -06002999#endif
3000 }
Anthony Liguori7267c092011-08-20 22:09:37 -05003001 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06003002 return;
3003 }
3004 }
3005
bellarde9a1ab12007-02-08 23:08:38 +00003006}
3007
Huang Yingcd19cfa2011-03-02 08:56:19 +01003008#ifndef _WIN32
3009void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3010{
3011 RAMBlock *block;
3012 ram_addr_t offset;
3013 int flags;
3014 void *area, *vaddr;
3015
3016 QLIST_FOREACH(block, &ram_list.blocks, next) {
3017 offset = addr - block->offset;
3018 if (offset < block->length) {
3019 vaddr = block->host + offset;
3020 if (block->flags & RAM_PREALLOC_MASK) {
3021 ;
3022 } else {
3023 flags = MAP_FIXED;
3024 munmap(vaddr, length);
3025 if (mem_path) {
3026#if defined(__linux__) && !defined(TARGET_S390X)
3027 if (block->fd) {
3028#ifdef MAP_POPULATE
3029 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3030 MAP_PRIVATE;
3031#else
3032 flags |= MAP_PRIVATE;
3033#endif
3034 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3035 flags, block->fd, offset);
3036 } else {
3037 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3038 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3039 flags, -1, 0);
3040 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003041#else
3042 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003043#endif
3044 } else {
3045#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3046 flags |= MAP_SHARED | MAP_ANONYMOUS;
3047 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3048 flags, -1, 0);
3049#else
3050 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3051 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3052 flags, -1, 0);
3053#endif
3054 }
3055 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003056 fprintf(stderr, "Could not remap addr: "
3057 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003058 length, addr);
3059 exit(1);
3060 }
3061 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3062 }
3063 return;
3064 }
3065 }
3066}
3067#endif /* !_WIN32 */
3068
pbrookdc828ca2009-04-09 22:21:07 +00003069/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003070 With the exception of the softmmu code in this file, this should
3071 only be used for local memory (e.g. video ram) that the device owns,
3072 and knows it isn't going to access beyond the end of the block.
3073
3074 It should not be used for general purpose DMA.
3075 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3076 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003077void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003078{
pbrook94a6b542009-04-11 17:15:54 +00003079 RAMBlock *block;
3080
Alex Williamsonf471a172010-06-11 11:11:42 -06003081 QLIST_FOREACH(block, &ram_list.blocks, next) {
3082 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003083 /* Move this entry to to start of the list. */
3084 if (block != QLIST_FIRST(&ram_list.blocks)) {
3085 QLIST_REMOVE(block, next);
3086 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3087 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003088 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003089 /* We need to check if the requested address is in the RAM
3090 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003091 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003092 */
3093 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003094 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003095 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003096 block->host =
3097 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003098 }
3099 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003100 return block->host + (addr - block->offset);
3101 }
pbrook94a6b542009-04-11 17:15:54 +00003102 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003103
3104 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3105 abort();
3106
3107 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003108}
3109
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003110/* Return a host pointer to ram allocated with qemu_ram_alloc.
3111 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3112 */
3113void *qemu_safe_ram_ptr(ram_addr_t addr)
3114{
3115 RAMBlock *block;
3116
3117 QLIST_FOREACH(block, &ram_list.blocks, next) {
3118 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003119 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003120 /* We need to check if the requested address is in the RAM
3121 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003122 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003123 */
3124 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003125 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003126 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003127 block->host =
3128 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003129 }
3130 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003131 return block->host + (addr - block->offset);
3132 }
3133 }
3134
3135 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3136 abort();
3137
3138 return NULL;
3139}
3140
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003141/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3142 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003143void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003144{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003145 if (*size == 0) {
3146 return NULL;
3147 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003148 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003149 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003150 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003151 RAMBlock *block;
3152
3153 QLIST_FOREACH(block, &ram_list.blocks, next) {
3154 if (addr - block->offset < block->length) {
3155 if (addr - block->offset + *size > block->length)
3156 *size = block->length - addr + block->offset;
3157 return block->host + (addr - block->offset);
3158 }
3159 }
3160
3161 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3162 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003163 }
3164}
3165
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003166void qemu_put_ram_ptr(void *addr)
3167{
3168 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003169}
3170
Marcelo Tosattie8902612010-10-11 15:31:19 -03003171int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003172{
pbrook94a6b542009-04-11 17:15:54 +00003173 RAMBlock *block;
3174 uint8_t *host = ptr;
3175
Jan Kiszka868bb332011-06-21 22:59:09 +02003176 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003177 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003178 return 0;
3179 }
3180
Alex Williamsonf471a172010-06-11 11:11:42 -06003181 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003182 /* This case append when the block is not mapped. */
3183 if (block->host == NULL) {
3184 continue;
3185 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003186 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003187 *ram_addr = block->offset + (host - block->host);
3188 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003189 }
pbrook94a6b542009-04-11 17:15:54 +00003190 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003191
Marcelo Tosattie8902612010-10-11 15:31:19 -03003192 return -1;
3193}
Alex Williamsonf471a172010-06-11 11:11:42 -06003194
Marcelo Tosattie8902612010-10-11 15:31:19 -03003195/* Some of the softmmu routines need to translate from a host pointer
3196 (typically a TLB entry) back to a ram offset. */
3197ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3198{
3199 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003200
Marcelo Tosattie8902612010-10-11 15:31:19 -03003201 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3202 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3203 abort();
3204 }
3205 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003206}
3207
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003208static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3209 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003210{
pbrook67d3b952006-12-18 05:03:52 +00003211#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003212 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003213#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003214#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003215 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003216#endif
3217 return 0;
3218}
3219
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003220static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3221 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003222{
3223#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003224 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003225#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003226#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003227 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003228#endif
3229}
3230
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003231static const MemoryRegionOps unassigned_mem_ops = {
3232 .read = unassigned_mem_read,
3233 .write = unassigned_mem_write,
3234 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003235};
3236
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003237static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3238 unsigned size)
3239{
3240 abort();
3241}
3242
3243static void error_mem_write(void *opaque, target_phys_addr_t addr,
3244 uint64_t value, unsigned size)
3245{
3246 abort();
3247}
3248
3249static const MemoryRegionOps error_mem_ops = {
3250 .read = error_mem_read,
3251 .write = error_mem_write,
3252 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003253};
3254
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003255static const MemoryRegionOps rom_mem_ops = {
3256 .read = error_mem_read,
3257 .write = unassigned_mem_write,
3258 .endianness = DEVICE_NATIVE_ENDIAN,
3259};
3260
3261static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3262 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003263{
bellard3a7d9292005-08-21 09:26:42 +00003264 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003265 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003266 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3267#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003268 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003269 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003270#endif
3271 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003272 switch (size) {
3273 case 1:
3274 stb_p(qemu_get_ram_ptr(ram_addr), val);
3275 break;
3276 case 2:
3277 stw_p(qemu_get_ram_ptr(ram_addr), val);
3278 break;
3279 case 4:
3280 stl_p(qemu_get_ram_ptr(ram_addr), val);
3281 break;
3282 default:
3283 abort();
3284 }
bellardf23db162005-08-21 19:12:28 +00003285 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003286 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003287 /* we remove the notdirty callback only if the code has been
3288 flushed */
3289 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003290 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003291}
3292
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003293static const MemoryRegionOps notdirty_mem_ops = {
3294 .read = error_mem_read,
3295 .write = notdirty_mem_write,
3296 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003297};
3298
pbrook0f459d12008-06-09 00:20:13 +00003299/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003300static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003301{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003302 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003303 target_ulong pc, cs_base;
3304 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003305 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003306 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003307 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003308
aliguori06d55cc2008-11-18 20:24:06 +00003309 if (env->watchpoint_hit) {
3310 /* We re-entered the check after replacing the TB. Now raise
3311 * the debug interrupt so that is will trigger after the
3312 * current instruction. */
3313 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3314 return;
3315 }
pbrook2e70f6e2008-06-29 01:03:05 +00003316 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003317 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003318 if ((vaddr == (wp->vaddr & len_mask) ||
3319 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003320 wp->flags |= BP_WATCHPOINT_HIT;
3321 if (!env->watchpoint_hit) {
3322 env->watchpoint_hit = wp;
3323 tb = tb_find_pc(env->mem_io_pc);
3324 if (!tb) {
3325 cpu_abort(env, "check_watchpoint: could not find TB for "
3326 "pc=%p", (void *)env->mem_io_pc);
3327 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003328 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003329 tb_phys_invalidate(tb, -1);
3330 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3331 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04003332 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00003333 } else {
3334 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3335 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04003336 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00003337 }
aliguori06d55cc2008-11-18 20:24:06 +00003338 }
aliguori6e140f22008-11-18 20:37:55 +00003339 } else {
3340 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003341 }
3342 }
3343}
3344
pbrook6658ffb2007-03-16 23:58:11 +00003345/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3346 so these check for a hit then pass through to the normal out-of-line
3347 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003348static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3349 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003350{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003351 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3352 switch (size) {
3353 case 1: return ldub_phys(addr);
3354 case 2: return lduw_phys(addr);
3355 case 4: return ldl_phys(addr);
3356 default: abort();
3357 }
pbrook6658ffb2007-03-16 23:58:11 +00003358}
3359
Avi Kivity1ec9b902012-01-02 12:47:48 +02003360static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3361 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003362{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003363 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3364 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003365 case 1:
3366 stb_phys(addr, val);
3367 break;
3368 case 2:
3369 stw_phys(addr, val);
3370 break;
3371 case 4:
3372 stl_phys(addr, val);
3373 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003374 default: abort();
3375 }
pbrook6658ffb2007-03-16 23:58:11 +00003376}
3377
Avi Kivity1ec9b902012-01-02 12:47:48 +02003378static const MemoryRegionOps watch_mem_ops = {
3379 .read = watch_mem_read,
3380 .write = watch_mem_write,
3381 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003382};
pbrook6658ffb2007-03-16 23:58:11 +00003383
Avi Kivity70c68e42012-01-02 12:32:48 +02003384static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3385 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003386{
Avi Kivity70c68e42012-01-02 12:32:48 +02003387 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003388 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003389 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003390#if defined(DEBUG_SUBPAGE)
3391 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3392 mmio, len, addr, idx);
3393#endif
blueswir1db7b5422007-05-26 17:36:03 +00003394
Avi Kivity5312bd82012-02-12 18:32:55 +02003395 section = &phys_sections[mmio->sub_section[idx]];
3396 addr += mmio->base;
3397 addr -= section->offset_within_address_space;
3398 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003399 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003400}
3401
Avi Kivity70c68e42012-01-02 12:32:48 +02003402static void subpage_write(void *opaque, target_phys_addr_t addr,
3403 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003404{
Avi Kivity70c68e42012-01-02 12:32:48 +02003405 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003406 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003407 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003408#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003409 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3410 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003411 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003412#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003413
Avi Kivity5312bd82012-02-12 18:32:55 +02003414 section = &phys_sections[mmio->sub_section[idx]];
3415 addr += mmio->base;
3416 addr -= section->offset_within_address_space;
3417 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003418 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003419}
3420
Avi Kivity70c68e42012-01-02 12:32:48 +02003421static const MemoryRegionOps subpage_ops = {
3422 .read = subpage_read,
3423 .write = subpage_write,
3424 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003425};
3426
Avi Kivityde712f92012-01-02 12:41:07 +02003427static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3428 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003429{
3430 ram_addr_t raddr = addr;
3431 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003432 switch (size) {
3433 case 1: return ldub_p(ptr);
3434 case 2: return lduw_p(ptr);
3435 case 4: return ldl_p(ptr);
3436 default: abort();
3437 }
Andreas Färber56384e82011-11-30 16:26:21 +01003438}
3439
Avi Kivityde712f92012-01-02 12:41:07 +02003440static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3441 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003442{
3443 ram_addr_t raddr = addr;
3444 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003445 switch (size) {
3446 case 1: return stb_p(ptr, value);
3447 case 2: return stw_p(ptr, value);
3448 case 4: return stl_p(ptr, value);
3449 default: abort();
3450 }
Andreas Färber56384e82011-11-30 16:26:21 +01003451}
3452
Avi Kivityde712f92012-01-02 12:41:07 +02003453static const MemoryRegionOps subpage_ram_ops = {
3454 .read = subpage_ram_read,
3455 .write = subpage_ram_write,
3456 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003457};
3458
Anthony Liguoric227f092009-10-01 16:12:16 -05003459static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003460 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003461{
3462 int idx, eidx;
3463
3464 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3465 return -1;
3466 idx = SUBPAGE_IDX(start);
3467 eidx = SUBPAGE_IDX(end);
3468#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003469 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003470 mmio, start, end, idx, eidx, memory);
3471#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003472 if (memory_region_is_ram(phys_sections[section].mr)) {
3473 MemoryRegionSection new_section = phys_sections[section];
3474 new_section.mr = &io_mem_subpage_ram;
3475 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003476 }
blueswir1db7b5422007-05-26 17:36:03 +00003477 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003478 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003479 }
3480
3481 return 0;
3482}
3483
Avi Kivity0f0cb162012-02-13 17:14:32 +02003484static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003485{
Anthony Liguoric227f092009-10-01 16:12:16 -05003486 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003487
Anthony Liguori7267c092011-08-20 22:09:37 -05003488 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003489
3490 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003491 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3492 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003493 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003494#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003495 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3496 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003497#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003498 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003499
3500 return mmio;
3501}
3502
Avi Kivity5312bd82012-02-12 18:32:55 +02003503static uint16_t dummy_section(MemoryRegion *mr)
3504{
3505 MemoryRegionSection section = {
3506 .mr = mr,
3507 .offset_within_address_space = 0,
3508 .offset_within_region = 0,
3509 .size = UINT64_MAX,
3510 };
3511
3512 return phys_section_add(&section);
3513}
3514
Avi Kivity37ec01d2012-03-08 18:08:35 +02003515MemoryRegion *iotlb_to_region(target_phys_addr_t index)
Avi Kivityaa102232012-03-08 17:06:55 +02003516{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003517 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003518}
3519
Avi Kivitye9179ce2009-06-14 11:38:52 +03003520static void io_mem_init(void)
3521{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003522 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003523 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3524 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3525 "unassigned", UINT64_MAX);
3526 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3527 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003528 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3529 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003530 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3531 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003532}
3533
Avi Kivity50c1e142012-02-08 21:36:02 +02003534static void core_begin(MemoryListener *listener)
3535{
Avi Kivity54688b12012-02-09 17:34:32 +02003536 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003537 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003538 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003539 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003540 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3541 phys_section_rom = dummy_section(&io_mem_rom);
3542 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003543}
3544
3545static void core_commit(MemoryListener *listener)
3546{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003547 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003548
3549 /* since each CPU stores ram addresses in its TLB cache, we must
3550 reset the modified entries */
3551 /* XXX: slow ! */
3552 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3553 tlb_flush(env, 1);
3554 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003555}
3556
Avi Kivity93632742012-02-08 16:54:16 +02003557static void core_region_add(MemoryListener *listener,
3558 MemoryRegionSection *section)
3559{
Avi Kivity4855d412012-02-08 21:16:05 +02003560 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003561}
3562
3563static void core_region_del(MemoryListener *listener,
3564 MemoryRegionSection *section)
3565{
Avi Kivity93632742012-02-08 16:54:16 +02003566}
3567
Avi Kivity50c1e142012-02-08 21:36:02 +02003568static void core_region_nop(MemoryListener *listener,
3569 MemoryRegionSection *section)
3570{
Avi Kivity54688b12012-02-09 17:34:32 +02003571 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003572}
3573
Avi Kivity93632742012-02-08 16:54:16 +02003574static void core_log_start(MemoryListener *listener,
3575 MemoryRegionSection *section)
3576{
3577}
3578
3579static void core_log_stop(MemoryListener *listener,
3580 MemoryRegionSection *section)
3581{
3582}
3583
3584static void core_log_sync(MemoryListener *listener,
3585 MemoryRegionSection *section)
3586{
3587}
3588
3589static void core_log_global_start(MemoryListener *listener)
3590{
3591 cpu_physical_memory_set_dirty_tracking(1);
3592}
3593
3594static void core_log_global_stop(MemoryListener *listener)
3595{
3596 cpu_physical_memory_set_dirty_tracking(0);
3597}
3598
3599static void core_eventfd_add(MemoryListener *listener,
3600 MemoryRegionSection *section,
3601 bool match_data, uint64_t data, int fd)
3602{
3603}
3604
3605static void core_eventfd_del(MemoryListener *listener,
3606 MemoryRegionSection *section,
3607 bool match_data, uint64_t data, int fd)
3608{
3609}
3610
Avi Kivity50c1e142012-02-08 21:36:02 +02003611static void io_begin(MemoryListener *listener)
3612{
3613}
3614
3615static void io_commit(MemoryListener *listener)
3616{
3617}
3618
Avi Kivity4855d412012-02-08 21:16:05 +02003619static void io_region_add(MemoryListener *listener,
3620 MemoryRegionSection *section)
3621{
Avi Kivitya2d33522012-03-05 17:40:12 +02003622 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3623
3624 mrio->mr = section->mr;
3625 mrio->offset = section->offset_within_region;
3626 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003627 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003628 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003629}
3630
3631static void io_region_del(MemoryListener *listener,
3632 MemoryRegionSection *section)
3633{
3634 isa_unassign_ioport(section->offset_within_address_space, section->size);
3635}
3636
Avi Kivity50c1e142012-02-08 21:36:02 +02003637static void io_region_nop(MemoryListener *listener,
3638 MemoryRegionSection *section)
3639{
3640}
3641
Avi Kivity4855d412012-02-08 21:16:05 +02003642static void io_log_start(MemoryListener *listener,
3643 MemoryRegionSection *section)
3644{
3645}
3646
3647static void io_log_stop(MemoryListener *listener,
3648 MemoryRegionSection *section)
3649{
3650}
3651
3652static void io_log_sync(MemoryListener *listener,
3653 MemoryRegionSection *section)
3654{
3655}
3656
3657static void io_log_global_start(MemoryListener *listener)
3658{
3659}
3660
3661static void io_log_global_stop(MemoryListener *listener)
3662{
3663}
3664
3665static void io_eventfd_add(MemoryListener *listener,
3666 MemoryRegionSection *section,
3667 bool match_data, uint64_t data, int fd)
3668{
3669}
3670
3671static void io_eventfd_del(MemoryListener *listener,
3672 MemoryRegionSection *section,
3673 bool match_data, uint64_t data, int fd)
3674{
3675}
3676
Avi Kivity93632742012-02-08 16:54:16 +02003677static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003678 .begin = core_begin,
3679 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003680 .region_add = core_region_add,
3681 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003682 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003683 .log_start = core_log_start,
3684 .log_stop = core_log_stop,
3685 .log_sync = core_log_sync,
3686 .log_global_start = core_log_global_start,
3687 .log_global_stop = core_log_global_stop,
3688 .eventfd_add = core_eventfd_add,
3689 .eventfd_del = core_eventfd_del,
3690 .priority = 0,
3691};
3692
Avi Kivity4855d412012-02-08 21:16:05 +02003693static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003694 .begin = io_begin,
3695 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003696 .region_add = io_region_add,
3697 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003698 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003699 .log_start = io_log_start,
3700 .log_stop = io_log_stop,
3701 .log_sync = io_log_sync,
3702 .log_global_start = io_log_global_start,
3703 .log_global_stop = io_log_global_stop,
3704 .eventfd_add = io_eventfd_add,
3705 .eventfd_del = io_eventfd_del,
3706 .priority = 0,
3707};
3708
Avi Kivity62152b82011-07-26 14:26:14 +03003709static void memory_map_init(void)
3710{
Anthony Liguori7267c092011-08-20 22:09:37 -05003711 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003712 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003713 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003714
Anthony Liguori7267c092011-08-20 22:09:37 -05003715 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003716 memory_region_init(system_io, "io", 65536);
3717 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003718
Avi Kivity4855d412012-02-08 21:16:05 +02003719 memory_listener_register(&core_memory_listener, system_memory);
3720 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003721}
3722
3723MemoryRegion *get_system_memory(void)
3724{
3725 return system_memory;
3726}
3727
Avi Kivity309cb472011-08-08 16:09:03 +03003728MemoryRegion *get_system_io(void)
3729{
3730 return system_io;
3731}
3732
pbrooke2eef172008-06-08 01:09:01 +00003733#endif /* !defined(CONFIG_USER_ONLY) */
3734
bellard13eb76e2004-01-24 15:23:36 +00003735/* physical memory access (slow version, mainly for debug) */
3736#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003737int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003738 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003739{
3740 int l, flags;
3741 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003742 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003743
3744 while (len > 0) {
3745 page = addr & TARGET_PAGE_MASK;
3746 l = (page + TARGET_PAGE_SIZE) - addr;
3747 if (l > len)
3748 l = len;
3749 flags = page_get_flags(page);
3750 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003751 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003752 if (is_write) {
3753 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003754 return -1;
bellard579a97f2007-11-11 14:26:47 +00003755 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003756 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003757 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003758 memcpy(p, buf, l);
3759 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003760 } else {
3761 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003762 return -1;
bellard579a97f2007-11-11 14:26:47 +00003763 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003764 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003765 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003766 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003767 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003768 }
3769 len -= l;
3770 buf += l;
3771 addr += l;
3772 }
Paul Brooka68fe892010-03-01 00:08:59 +00003773 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003774}
bellard8df1cd02005-01-28 22:37:22 +00003775
bellard13eb76e2004-01-24 15:23:36 +00003776#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003777void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003778 int len, int is_write)
3779{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003780 int l;
bellard13eb76e2004-01-24 15:23:36 +00003781 uint8_t *ptr;
3782 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003783 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003784 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003785
bellard13eb76e2004-01-24 15:23:36 +00003786 while (len > 0) {
3787 page = addr & TARGET_PAGE_MASK;
3788 l = (page + TARGET_PAGE_SIZE) - addr;
3789 if (l > len)
3790 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003791 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003792
bellard13eb76e2004-01-24 15:23:36 +00003793 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003794 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003795 target_phys_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003796 addr1 = section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003797 /* XXX: could force cpu_single_env to NULL to avoid
3798 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003799 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003800 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003801 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003802 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003803 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003804 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003805 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003806 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003807 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003808 l = 2;
3809 } else {
bellard1c213d12005-09-03 10:49:04 +00003810 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003811 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003812 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003813 l = 1;
3814 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003815 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003816 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003817 addr1 = memory_region_get_ram_addr(section->mr)
3818 + section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003819 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003820 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003821 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003822 if (!cpu_physical_memory_is_dirty(addr1)) {
3823 /* invalidate code */
3824 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3825 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003826 cpu_physical_memory_set_dirty_flags(
3827 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003828 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003829 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003830 }
3831 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003832 if (!is_ram_rom_romd(section)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003833 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003834 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003835 addr1 = section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003836 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003837 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003838 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003839 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003840 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003841 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003842 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003843 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003844 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003845 l = 2;
3846 } else {
bellard1c213d12005-09-03 10:49:04 +00003847 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003848 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003849 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003850 l = 1;
3851 }
3852 } else {
3853 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003854 ptr = qemu_get_ram_ptr(section->mr->ram_addr)
3855 + section_addr(section, addr);
3856 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003857 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003858 }
3859 }
3860 len -= l;
3861 buf += l;
3862 addr += l;
3863 }
3864}
bellard8df1cd02005-01-28 22:37:22 +00003865
bellardd0ecd2a2006-04-23 17:14:48 +00003866/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003867void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003868 const uint8_t *buf, int len)
3869{
3870 int l;
3871 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003872 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003873 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003874
bellardd0ecd2a2006-04-23 17:14:48 +00003875 while (len > 0) {
3876 page = addr & TARGET_PAGE_MASK;
3877 l = (page + TARGET_PAGE_SIZE) - addr;
3878 if (l > len)
3879 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003880 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003881
Avi Kivityf3705d52012-03-08 16:16:34 +02003882 if (!is_ram_rom_romd(section)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003883 /* do nothing */
3884 } else {
3885 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003886 addr1 = memory_region_get_ram_addr(section->mr)
3887 + section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003888 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003889 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003890 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003891 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003892 }
3893 len -= l;
3894 buf += l;
3895 addr += l;
3896 }
3897}
3898
aliguori6d16c2f2009-01-22 16:59:11 +00003899typedef struct {
3900 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003901 target_phys_addr_t addr;
3902 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003903} BounceBuffer;
3904
3905static BounceBuffer bounce;
3906
aliguoriba223c22009-01-22 16:59:16 +00003907typedef struct MapClient {
3908 void *opaque;
3909 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003910 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003911} MapClient;
3912
Blue Swirl72cf2d42009-09-12 07:36:22 +00003913static QLIST_HEAD(map_client_list, MapClient) map_client_list
3914 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003915
3916void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3917{
Anthony Liguori7267c092011-08-20 22:09:37 -05003918 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003919
3920 client->opaque = opaque;
3921 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003922 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003923 return client;
3924}
3925
3926void cpu_unregister_map_client(void *_client)
3927{
3928 MapClient *client = (MapClient *)_client;
3929
Blue Swirl72cf2d42009-09-12 07:36:22 +00003930 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003931 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003932}
3933
3934static void cpu_notify_map_clients(void)
3935{
3936 MapClient *client;
3937
Blue Swirl72cf2d42009-09-12 07:36:22 +00003938 while (!QLIST_EMPTY(&map_client_list)) {
3939 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003940 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003941 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003942 }
3943}
3944
aliguori6d16c2f2009-01-22 16:59:11 +00003945/* Map a physical memory region into a host virtual address.
3946 * May map a subset of the requested range, given by and returned in *plen.
3947 * May return NULL if resources needed to perform the mapping are exhausted.
3948 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003949 * Use cpu_register_map_client() to know when retrying the map operation is
3950 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003951 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003952void *cpu_physical_memory_map(target_phys_addr_t addr,
3953 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003954 int is_write)
3955{
Anthony Liguoric227f092009-10-01 16:12:16 -05003956 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003957 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003958 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003959 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003960 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003961 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003962 ram_addr_t rlen;
3963 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003964
3965 while (len > 0) {
3966 page = addr & TARGET_PAGE_MASK;
3967 l = (page + TARGET_PAGE_SIZE) - addr;
3968 if (l > len)
3969 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003970 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003971
Avi Kivityf3705d52012-03-08 16:16:34 +02003972 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003973 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003974 break;
3975 }
3976 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3977 bounce.addr = addr;
3978 bounce.len = l;
3979 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003980 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003981 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003982
3983 *plen = l;
3984 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003985 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003986 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003987 raddr = memory_region_get_ram_addr(section->mr)
3988 + section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003989 }
aliguori6d16c2f2009-01-22 16:59:11 +00003990
3991 len -= l;
3992 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003993 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003994 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003995 rlen = todo;
3996 ret = qemu_ram_ptr_length(raddr, &rlen);
3997 *plen = rlen;
3998 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003999}
4000
4001/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4002 * Will also mark the memory as dirty if is_write == 1. access_len gives
4003 * the amount of memory that was actually read or written by the caller.
4004 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004005void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4006 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004007{
4008 if (buffer != bounce.buffer) {
4009 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004010 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004011 while (access_len) {
4012 unsigned l;
4013 l = TARGET_PAGE_SIZE;
4014 if (l > access_len)
4015 l = access_len;
4016 if (!cpu_physical_memory_is_dirty(addr1)) {
4017 /* invalidate code */
4018 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4019 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004020 cpu_physical_memory_set_dirty_flags(
4021 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004022 }
4023 addr1 += l;
4024 access_len -= l;
4025 }
4026 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004027 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004028 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004029 }
aliguori6d16c2f2009-01-22 16:59:11 +00004030 return;
4031 }
4032 if (is_write) {
4033 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4034 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004035 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004036 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004037 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004038}
bellardd0ecd2a2006-04-23 17:14:48 +00004039
bellard8df1cd02005-01-28 22:37:22 +00004040/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004041static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4042 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004043{
bellard8df1cd02005-01-28 22:37:22 +00004044 uint8_t *ptr;
4045 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02004046 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004047
Avi Kivity06ef3522012-02-13 16:11:22 +02004048 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004049
Avi Kivityf3705d52012-03-08 16:16:34 +02004050 if (!is_ram_rom_romd(section)) {
bellard8df1cd02005-01-28 22:37:22 +00004051 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004052 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004053 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004054#if defined(TARGET_WORDS_BIGENDIAN)
4055 if (endian == DEVICE_LITTLE_ENDIAN) {
4056 val = bswap32(val);
4057 }
4058#else
4059 if (endian == DEVICE_BIG_ENDIAN) {
4060 val = bswap32(val);
4061 }
4062#endif
bellard8df1cd02005-01-28 22:37:22 +00004063 } else {
4064 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004065 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004066 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004067 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004068 switch (endian) {
4069 case DEVICE_LITTLE_ENDIAN:
4070 val = ldl_le_p(ptr);
4071 break;
4072 case DEVICE_BIG_ENDIAN:
4073 val = ldl_be_p(ptr);
4074 break;
4075 default:
4076 val = ldl_p(ptr);
4077 break;
4078 }
bellard8df1cd02005-01-28 22:37:22 +00004079 }
4080 return val;
4081}
4082
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004083uint32_t ldl_phys(target_phys_addr_t addr)
4084{
4085 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4086}
4087
4088uint32_t ldl_le_phys(target_phys_addr_t addr)
4089{
4090 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4091}
4092
4093uint32_t ldl_be_phys(target_phys_addr_t addr)
4094{
4095 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4096}
4097
bellard84b7b8e2005-11-28 21:19:04 +00004098/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004099static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4100 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004101{
bellard84b7b8e2005-11-28 21:19:04 +00004102 uint8_t *ptr;
4103 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02004104 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00004105
Avi Kivity06ef3522012-02-13 16:11:22 +02004106 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004107
Avi Kivityf3705d52012-03-08 16:16:34 +02004108 if (!is_ram_rom_romd(section)) {
bellard84b7b8e2005-11-28 21:19:04 +00004109 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004110 addr = section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004111
4112 /* XXX This is broken when device endian != cpu endian.
4113 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004114#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02004115 val = io_mem_read(section->mr, addr, 4) << 32;
4116 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004117#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02004118 val = io_mem_read(section->mr, addr, 4);
4119 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004120#endif
4121 } else {
4122 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004123 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004124 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004125 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004126 switch (endian) {
4127 case DEVICE_LITTLE_ENDIAN:
4128 val = ldq_le_p(ptr);
4129 break;
4130 case DEVICE_BIG_ENDIAN:
4131 val = ldq_be_p(ptr);
4132 break;
4133 default:
4134 val = ldq_p(ptr);
4135 break;
4136 }
bellard84b7b8e2005-11-28 21:19:04 +00004137 }
4138 return val;
4139}
4140
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004141uint64_t ldq_phys(target_phys_addr_t addr)
4142{
4143 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4144}
4145
4146uint64_t ldq_le_phys(target_phys_addr_t addr)
4147{
4148 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4149}
4150
4151uint64_t ldq_be_phys(target_phys_addr_t addr)
4152{
4153 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4154}
4155
bellardaab33092005-10-30 20:48:42 +00004156/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004157uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004158{
4159 uint8_t val;
4160 cpu_physical_memory_read(addr, &val, 1);
4161 return val;
4162}
4163
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004164/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004165static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4166 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004167{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004168 uint8_t *ptr;
4169 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02004170 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004171
Avi Kivity06ef3522012-02-13 16:11:22 +02004172 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004173
Avi Kivityf3705d52012-03-08 16:16:34 +02004174 if (!is_ram_rom_romd(section)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004175 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004176 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004177 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004178#if defined(TARGET_WORDS_BIGENDIAN)
4179 if (endian == DEVICE_LITTLE_ENDIAN) {
4180 val = bswap16(val);
4181 }
4182#else
4183 if (endian == DEVICE_BIG_ENDIAN) {
4184 val = bswap16(val);
4185 }
4186#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004187 } else {
4188 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004189 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004190 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004191 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004192 switch (endian) {
4193 case DEVICE_LITTLE_ENDIAN:
4194 val = lduw_le_p(ptr);
4195 break;
4196 case DEVICE_BIG_ENDIAN:
4197 val = lduw_be_p(ptr);
4198 break;
4199 default:
4200 val = lduw_p(ptr);
4201 break;
4202 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004203 }
4204 return val;
bellardaab33092005-10-30 20:48:42 +00004205}
4206
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004207uint32_t lduw_phys(target_phys_addr_t addr)
4208{
4209 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4210}
4211
4212uint32_t lduw_le_phys(target_phys_addr_t addr)
4213{
4214 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4215}
4216
4217uint32_t lduw_be_phys(target_phys_addr_t addr)
4218{
4219 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4220}
4221
bellard8df1cd02005-01-28 22:37:22 +00004222/* warning: addr must be aligned. The ram page is not masked as dirty
4223 and the code inside is not invalidated. It is useful if the dirty
4224 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004225void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004226{
bellard8df1cd02005-01-28 22:37:22 +00004227 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004228 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004229
Avi Kivity06ef3522012-02-13 16:11:22 +02004230 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004231
Avi Kivityf3705d52012-03-08 16:16:34 +02004232 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004233 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004234 if (memory_region_is_ram(section->mr)) {
4235 section = &phys_sections[phys_section_rom];
4236 }
4237 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004238 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004239 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004240 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004241 + section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00004242 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004243 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004244
4245 if (unlikely(in_migration)) {
4246 if (!cpu_physical_memory_is_dirty(addr1)) {
4247 /* invalidate code */
4248 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4249 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004250 cpu_physical_memory_set_dirty_flags(
4251 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004252 }
4253 }
bellard8df1cd02005-01-28 22:37:22 +00004254 }
4255}
4256
Anthony Liguoric227f092009-10-01 16:12:16 -05004257void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004258{
j_mayerbc98a7e2007-04-04 07:55:12 +00004259 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004260 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00004261
Avi Kivity06ef3522012-02-13 16:11:22 +02004262 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004263
Avi Kivityf3705d52012-03-08 16:16:34 +02004264 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004265 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004266 if (memory_region_is_ram(section->mr)) {
4267 section = &phys_sections[phys_section_rom];
4268 }
j_mayerbc98a7e2007-04-04 07:55:12 +00004269#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02004270 io_mem_write(section->mr, addr, val >> 32, 4);
4271 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004272#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02004273 io_mem_write(section->mr, addr, (uint32_t)val, 4);
4274 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004275#endif
4276 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004277 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004278 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004279 + section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00004280 stq_p(ptr, val);
4281 }
4282}
4283
bellard8df1cd02005-01-28 22:37:22 +00004284/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004285static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4286 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004287{
bellard8df1cd02005-01-28 22:37:22 +00004288 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004289 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004290
Avi Kivity06ef3522012-02-13 16:11:22 +02004291 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004292
Avi Kivityf3705d52012-03-08 16:16:34 +02004293 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004294 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004295 if (memory_region_is_ram(section->mr)) {
4296 section = &phys_sections[phys_section_rom];
4297 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004298#if defined(TARGET_WORDS_BIGENDIAN)
4299 if (endian == DEVICE_LITTLE_ENDIAN) {
4300 val = bswap32(val);
4301 }
4302#else
4303 if (endian == DEVICE_BIG_ENDIAN) {
4304 val = bswap32(val);
4305 }
4306#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004307 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004308 } else {
4309 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004310 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4311 + section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00004312 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004313 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004314 switch (endian) {
4315 case DEVICE_LITTLE_ENDIAN:
4316 stl_le_p(ptr, val);
4317 break;
4318 case DEVICE_BIG_ENDIAN:
4319 stl_be_p(ptr, val);
4320 break;
4321 default:
4322 stl_p(ptr, val);
4323 break;
4324 }
bellard3a7d9292005-08-21 09:26:42 +00004325 if (!cpu_physical_memory_is_dirty(addr1)) {
4326 /* invalidate code */
4327 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4328 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004329 cpu_physical_memory_set_dirty_flags(addr1,
4330 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004331 }
bellard8df1cd02005-01-28 22:37:22 +00004332 }
4333}
4334
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004335void stl_phys(target_phys_addr_t addr, uint32_t val)
4336{
4337 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4338}
4339
4340void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4341{
4342 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4343}
4344
4345void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4346{
4347 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4348}
4349
bellardaab33092005-10-30 20:48:42 +00004350/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004351void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004352{
4353 uint8_t v = val;
4354 cpu_physical_memory_write(addr, &v, 1);
4355}
4356
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004357/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004358static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4359 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004360{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004361 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004362 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004363
Avi Kivity06ef3522012-02-13 16:11:22 +02004364 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004365
Avi Kivityf3705d52012-03-08 16:16:34 +02004366 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004367 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004368 if (memory_region_is_ram(section->mr)) {
4369 section = &phys_sections[phys_section_rom];
4370 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004371#if defined(TARGET_WORDS_BIGENDIAN)
4372 if (endian == DEVICE_LITTLE_ENDIAN) {
4373 val = bswap16(val);
4374 }
4375#else
4376 if (endian == DEVICE_BIG_ENDIAN) {
4377 val = bswap16(val);
4378 }
4379#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004380 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004381 } else {
4382 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004383 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4384 + section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004385 /* RAM case */
4386 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004387 switch (endian) {
4388 case DEVICE_LITTLE_ENDIAN:
4389 stw_le_p(ptr, val);
4390 break;
4391 case DEVICE_BIG_ENDIAN:
4392 stw_be_p(ptr, val);
4393 break;
4394 default:
4395 stw_p(ptr, val);
4396 break;
4397 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004398 if (!cpu_physical_memory_is_dirty(addr1)) {
4399 /* invalidate code */
4400 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4401 /* set dirty bit */
4402 cpu_physical_memory_set_dirty_flags(addr1,
4403 (0xff & ~CODE_DIRTY_FLAG));
4404 }
4405 }
bellardaab33092005-10-30 20:48:42 +00004406}
4407
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004408void stw_phys(target_phys_addr_t addr, uint32_t val)
4409{
4410 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4411}
4412
4413void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4414{
4415 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4416}
4417
4418void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4419{
4420 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4421}
4422
bellardaab33092005-10-30 20:48:42 +00004423/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004424void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004425{
4426 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004427 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004428}
4429
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004430void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4431{
4432 val = cpu_to_le64(val);
4433 cpu_physical_memory_write(addr, &val, 8);
4434}
4435
4436void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4437{
4438 val = cpu_to_be64(val);
4439 cpu_physical_memory_write(addr, &val, 8);
4440}
4441
aliguori5e2972f2009-03-28 17:51:36 +00004442/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004443int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004444 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004445{
4446 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004447 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004448 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004449
4450 while (len > 0) {
4451 page = addr & TARGET_PAGE_MASK;
4452 phys_addr = cpu_get_phys_page_debug(env, page);
4453 /* if no physical page mapped, return an error */
4454 if (phys_addr == -1)
4455 return -1;
4456 l = (page + TARGET_PAGE_SIZE) - addr;
4457 if (l > len)
4458 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004459 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004460 if (is_write)
4461 cpu_physical_memory_write_rom(phys_addr, buf, l);
4462 else
aliguori5e2972f2009-03-28 17:51:36 +00004463 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004464 len -= l;
4465 buf += l;
4466 addr += l;
4467 }
4468 return 0;
4469}
Paul Brooka68fe892010-03-01 00:08:59 +00004470#endif
bellard13eb76e2004-01-24 15:23:36 +00004471
pbrook2e70f6e2008-06-29 01:03:05 +00004472/* in deterministic execution mode, instructions doing device I/Os
4473 must be at the end of the TB */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004474void cpu_io_recompile(CPUArchState *env, void *retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004475{
4476 TranslationBlock *tb;
4477 uint32_t n, cflags;
4478 target_ulong pc, cs_base;
4479 uint64_t flags;
4480
4481 tb = tb_find_pc((unsigned long)retaddr);
4482 if (!tb) {
4483 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4484 retaddr);
4485 }
4486 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004487 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004488 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004489 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004490 n = n - env->icount_decr.u16.low;
4491 /* Generate a new TB ending on the I/O insn. */
4492 n++;
4493 /* On MIPS and SH, delay slot instructions can only be restarted if
4494 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004495 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004496 branch. */
4497#if defined(TARGET_MIPS)
4498 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4499 env->active_tc.PC -= 4;
4500 env->icount_decr.u16.low++;
4501 env->hflags &= ~MIPS_HFLAG_BMASK;
4502 }
4503#elif defined(TARGET_SH4)
4504 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4505 && n > 1) {
4506 env->pc -= 2;
4507 env->icount_decr.u16.low++;
4508 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4509 }
4510#endif
4511 /* This should never happen. */
4512 if (n > CF_COUNT_MASK)
4513 cpu_abort(env, "TB too big during recompile");
4514
4515 cflags = n | CF_LAST_IO;
4516 pc = tb->pc;
4517 cs_base = tb->cs_base;
4518 flags = tb->flags;
4519 tb_phys_invalidate(tb, -1);
4520 /* FIXME: In theory this could raise an exception. In practice
4521 we have already translated the block once so it's probably ok. */
4522 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004523 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004524 the first in the TB) then we end up generating a whole new TB and
4525 repeating the fault, which is horribly inefficient.
4526 Better would be to execute just this insn uncached, or generate a
4527 second new TB. */
4528 cpu_resume_from_signal(env, NULL);
4529}
4530
Paul Brookb3755a92010-03-12 16:54:58 +00004531#if !defined(CONFIG_USER_ONLY)
4532
Stefan Weil055403b2010-10-22 23:03:32 +02004533void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004534{
4535 int i, target_code_size, max_target_code_size;
4536 int direct_jmp_count, direct_jmp2_count, cross_page;
4537 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004538
bellarde3db7222005-01-26 22:00:47 +00004539 target_code_size = 0;
4540 max_target_code_size = 0;
4541 cross_page = 0;
4542 direct_jmp_count = 0;
4543 direct_jmp2_count = 0;
4544 for(i = 0; i < nb_tbs; i++) {
4545 tb = &tbs[i];
4546 target_code_size += tb->size;
4547 if (tb->size > max_target_code_size)
4548 max_target_code_size = tb->size;
4549 if (tb->page_addr[1] != -1)
4550 cross_page++;
4551 if (tb->tb_next_offset[0] != 0xffff) {
4552 direct_jmp_count++;
4553 if (tb->tb_next_offset[1] != 0xffff) {
4554 direct_jmp2_count++;
4555 }
4556 }
4557 }
4558 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004559 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004560 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004561 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4562 cpu_fprintf(f, "TB count %d/%d\n",
4563 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004564 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004565 nb_tbs ? target_code_size / nb_tbs : 0,
4566 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004567 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004568 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4569 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004570 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4571 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004572 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4573 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004574 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004575 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4576 direct_jmp2_count,
4577 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004578 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004579 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4580 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4581 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004582 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004583}
4584
Avi Kivityd39e8222012-01-01 23:35:10 +02004585/* NOTE: this function can trigger an exception */
4586/* NOTE2: the returned address is not exactly the physical address: it
4587 is the offset relative to phys_ram_base */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004588tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
Avi Kivityd39e8222012-01-01 23:35:10 +02004589{
4590 int mmu_idx, page_index, pd;
4591 void *p;
Avi Kivity37ec01d2012-03-08 18:08:35 +02004592 MemoryRegion *mr;
Avi Kivityd39e8222012-01-01 23:35:10 +02004593
4594 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4595 mmu_idx = cpu_mmu_index(env1);
4596 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4597 (addr & TARGET_PAGE_MASK))) {
Blue Swirle141ab52011-09-18 14:55:46 +00004598#ifdef CONFIG_TCG_PASS_AREG0
4599 cpu_ldub_code(env1, addr);
4600#else
Avi Kivityd39e8222012-01-01 23:35:10 +02004601 ldub_code(addr);
Blue Swirle141ab52011-09-18 14:55:46 +00004602#endif
Avi Kivityd39e8222012-01-01 23:35:10 +02004603 }
Avi Kivityce5d64c2012-03-08 18:50:18 +02004604 pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
Avi Kivity37ec01d2012-03-08 18:08:35 +02004605 mr = iotlb_to_region(pd);
4606 if (mr != &io_mem_ram && mr != &io_mem_rom
4607 && mr != &io_mem_notdirty && !mr->rom_device) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004608#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4609 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4610#else
4611 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4612#endif
4613 }
4614 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4615 return qemu_ram_addr_from_host_nofail(p);
4616}
4617
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004618/*
4619 * A helper function for the _utterly broken_ virtio device model to find out if
4620 * it's running on a big endian machine. Don't do this at home kids!
4621 */
4622bool virtio_is_big_endian(void);
4623bool virtio_is_big_endian(void)
4624{
4625#if defined(TARGET_WORDS_BIGENDIAN)
4626 return true;
4627#else
4628 return false;
4629#endif
4630}
4631
bellard61382a52003-10-27 21:22:23 +00004632#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004633#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004634#define GETPC() NULL
4635#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004636#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004637
4638#define SHIFT 0
4639#include "softmmu_template.h"
4640
4641#define SHIFT 1
4642#include "softmmu_template.h"
4643
4644#define SHIFT 2
4645#include "softmmu_template.h"
4646
4647#define SHIFT 3
4648#include "softmmu_template.h"
4649
4650#undef env
4651
4652#endif