blob: 03d3a6b6095ff7d05ebf232253304d64ca3bfdcf [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
Andreas Färber9349b4f2012-03-14 01:38:32 +0100126CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100129DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Avi Kivity3eef53d2012-02-10 14:57:31 +0200163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200189typedef struct PhysPageEntry PhysPageEntry;
190
Avi Kivity5312bd82012-02-12 18:32:55 +0200191static MemoryRegionSection *phys_sections;
192static unsigned phys_sections_nb, phys_sections_nb_alloc;
193static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200194static uint16_t phys_section_notdirty;
195static uint16_t phys_section_rom;
196static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200197
Avi Kivity4346ae32012-02-10 17:00:01 +0200198struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200199 uint16_t is_leaf : 1;
200 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
201 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200202};
203
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200204/* Simple allocator for PhysPageEntry nodes */
205static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
206static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
207
Avi Kivity07f07b32012-02-13 20:45:32 +0200208#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200209
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800210/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200211 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200212static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000213
pbrooke2eef172008-06-08 01:09:01 +0000214static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300215static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000216
Avi Kivity1ec9b902012-01-02 12:47:48 +0200217static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000218#endif
bellard33417e72003-08-10 21:47:01 +0000219
bellard34865132003-10-05 14:28:56 +0000220/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200221#ifdef WIN32
222static const char *logfilename = "qemu.log";
223#else
blueswir1d9b630f2008-10-05 09:57:08 +0000224static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200225#endif
bellard34865132003-10-05 14:28:56 +0000226FILE *logfile;
227int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000228static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000229
bellarde3db7222005-01-26 22:00:47 +0000230/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000231#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000232static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000233#endif
bellarde3db7222005-01-26 22:00:47 +0000234static int tb_flush_count;
235static int tb_phys_invalidate_count;
236
bellard7cb69ca2008-05-10 10:55:51 +0000237#ifdef _WIN32
238static void map_exec(void *addr, long size)
239{
240 DWORD old_protect;
241 VirtualProtect(addr, size,
242 PAGE_EXECUTE_READWRITE, &old_protect);
243
244}
245#else
246static void map_exec(void *addr, long size)
247{
bellard43694152008-05-29 09:35:57 +0000248 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000249
bellard43694152008-05-29 09:35:57 +0000250 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000251 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000252 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000253
254 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000255 end += page_size - 1;
256 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000257
258 mprotect((void *)start, end - start,
259 PROT_READ | PROT_WRITE | PROT_EXEC);
260}
261#endif
262
bellardb346ff42003-06-15 20:05:50 +0000263static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000264{
bellard83fb7ad2004-07-05 21:25:26 +0000265 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000266 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000267#ifdef _WIN32
268 {
269 SYSTEM_INFO system_info;
270
271 GetSystemInfo(&system_info);
272 qemu_real_host_page_size = system_info.dwPageSize;
273 }
274#else
275 qemu_real_host_page_size = getpagesize();
276#endif
bellard83fb7ad2004-07-05 21:25:26 +0000277 if (qemu_host_page_size == 0)
278 qemu_host_page_size = qemu_real_host_page_size;
279 if (qemu_host_page_size < TARGET_PAGE_SIZE)
280 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000281 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000282
Paul Brook2e9a5712010-05-05 16:32:59 +0100283#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000284 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100285#ifdef HAVE_KINFO_GETVMMAP
286 struct kinfo_vmentry *freep;
287 int i, cnt;
288
289 freep = kinfo_getvmmap(getpid(), &cnt);
290 if (freep) {
291 mmap_lock();
292 for (i = 0; i < cnt; i++) {
293 unsigned long startaddr, endaddr;
294
295 startaddr = freep[i].kve_start;
296 endaddr = freep[i].kve_end;
297 if (h2g_valid(startaddr)) {
298 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
299
300 if (h2g_valid(endaddr)) {
301 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100303 } else {
304#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
305 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200306 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100307#endif
308 }
309 }
310 }
311 free(freep);
312 mmap_unlock();
313 }
314#else
balrog50a95692007-12-12 01:16:23 +0000315 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000316
pbrook07765902008-05-31 16:33:53 +0000317 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800318
Aurelien Jarnofd436902010-04-10 17:20:36 +0200319 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000320 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800321 mmap_lock();
322
balrog50a95692007-12-12 01:16:23 +0000323 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800324 unsigned long startaddr, endaddr;
325 int n;
326
327 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
328
329 if (n == 2 && h2g_valid(startaddr)) {
330 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
331
332 if (h2g_valid(endaddr)) {
333 endaddr = h2g(endaddr);
334 } else {
335 endaddr = ~0ul;
336 }
337 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000338 }
339 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800340
balrog50a95692007-12-12 01:16:23 +0000341 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800342 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000343 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100344#endif
balrog50a95692007-12-12 01:16:23 +0000345 }
346#endif
bellard54936002003-05-13 00:25:15 +0000347}
348
Paul Brook41c1b1c2010-03-12 16:54:58 +0000349static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000350{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000351 PageDesc *pd;
352 void **lp;
353 int i;
354
pbrook17e23772008-06-09 13:47:45 +0000355#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500356 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800357# define ALLOC(P, SIZE) \
358 do { \
359 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
360 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800361 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000362#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800363# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500364 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000365#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800366
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800367 /* Level 1. Always allocated. */
368 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
369
370 /* Level 2..N-1. */
371 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
372 void **p = *lp;
373
374 if (p == NULL) {
375 if (!alloc) {
376 return NULL;
377 }
378 ALLOC(p, sizeof(void *) * L2_SIZE);
379 *lp = p;
380 }
381
382 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000383 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800384
385 pd = *lp;
386 if (pd == NULL) {
387 if (!alloc) {
388 return NULL;
389 }
390 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
391 *lp = pd;
392 }
393
394#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800395
396 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000397}
398
Paul Brook41c1b1c2010-03-12 16:54:58 +0000399static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000400{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800401 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000402}
403
Paul Brook6d9a1302010-02-28 23:55:53 +0000404#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200405
Avi Kivityf7bf5462012-02-13 20:12:05 +0200406static void phys_map_node_reserve(unsigned nodes)
407{
408 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
409 typedef PhysPageEntry Node[L2_SIZE];
410 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
411 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
412 phys_map_nodes_nb + nodes);
413 phys_map_nodes = g_renew(Node, phys_map_nodes,
414 phys_map_nodes_nb_alloc);
415 }
416}
417
418static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200419{
420 unsigned i;
421 uint16_t ret;
422
Avi Kivityf7bf5462012-02-13 20:12:05 +0200423 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200424 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200425 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200426 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200427 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200428 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200429 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200430 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200431}
432
433static void phys_map_nodes_reset(void)
434{
435 phys_map_nodes_nb = 0;
436}
437
Avi Kivityf7bf5462012-02-13 20:12:05 +0200438
Avi Kivity29990972012-02-13 20:21:20 +0200439static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
440 target_phys_addr_t *nb, uint16_t leaf,
441 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200442{
443 PhysPageEntry *p;
444 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200445 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200446
Avi Kivity07f07b32012-02-13 20:45:32 +0200447 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200448 lp->ptr = phys_map_node_alloc();
449 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200450 if (level == 0) {
451 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200452 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200453 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200454 }
455 }
456 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200457 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200458 }
Avi Kivity29990972012-02-13 20:21:20 +0200459 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200460
Avi Kivity29990972012-02-13 20:21:20 +0200461 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200462 if ((*index & (step - 1)) == 0 && *nb >= step) {
463 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200464 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200465 *index += step;
466 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200467 } else {
468 phys_page_set_level(lp, index, nb, leaf, level - 1);
469 }
470 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200471 }
472}
473
Avi Kivity29990972012-02-13 20:21:20 +0200474static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
475 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000476{
Avi Kivity29990972012-02-13 20:21:20 +0200477 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200478 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000479
Avi Kivity29990972012-02-13 20:21:20 +0200480 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000481}
482
Avi Kivityf3705d52012-03-08 16:16:34 +0200483static MemoryRegionSection *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000484{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200485 PhysPageEntry lp = phys_map;
486 PhysPageEntry *p;
487 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200488 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200489
Avi Kivity07f07b32012-02-13 20:45:32 +0200490 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200491 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200492 goto not_found;
493 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200494 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200495 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200496 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200497
Avi Kivityc19e8802012-02-13 20:25:31 +0200498 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200499not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200500 return &phys_sections[s_index];
501}
502
503static target_phys_addr_t section_addr(MemoryRegionSection *section,
504 target_phys_addr_t addr)
505{
506 addr -= section->offset_within_address_space;
507 addr += section->offset_within_region;
508 return addr;
bellard92e873b2004-05-21 14:52:29 +0000509}
510
Anthony Liguoric227f092009-10-01 16:12:16 -0500511static void tlb_protect_code(ram_addr_t ram_addr);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100512static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000513 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000514#define mmap_lock() do { } while(0)
515#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000516#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000517
bellard43694152008-05-29 09:35:57 +0000518#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
519
520#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100521/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000522 user mode. It will change when a dedicated libc will be used */
523#define USE_STATIC_CODE_GEN_BUFFER
524#endif
525
526#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200527static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
528 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000529#endif
530
blueswir18fcd3692008-08-17 20:26:25 +0000531static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000532{
bellard43694152008-05-29 09:35:57 +0000533#ifdef USE_STATIC_CODE_GEN_BUFFER
534 code_gen_buffer = static_code_gen_buffer;
535 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
536 map_exec(code_gen_buffer, code_gen_buffer_size);
537#else
bellard26a5f132008-05-28 12:30:31 +0000538 code_gen_buffer_size = tb_size;
539 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000540#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000541 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
542#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100543 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000544 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000545#endif
bellard26a5f132008-05-28 12:30:31 +0000546 }
547 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
548 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
549 /* The code gen buffer location may have constraints depending on
550 the host cpu and OS */
551#if defined(__linux__)
552 {
553 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000554 void *start = NULL;
555
bellard26a5f132008-05-28 12:30:31 +0000556 flags = MAP_PRIVATE | MAP_ANONYMOUS;
557#if defined(__x86_64__)
558 flags |= MAP_32BIT;
559 /* Cannot map more than that */
560 if (code_gen_buffer_size > (800 * 1024 * 1024))
561 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000562#elif defined(__sparc_v9__)
563 // Map the buffer below 2G, so we can use direct calls and branches
564 flags |= MAP_FIXED;
565 start = (void *) 0x60000000UL;
566 if (code_gen_buffer_size > (512 * 1024 * 1024))
567 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000568#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100569 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000570 if (code_gen_buffer_size > 16 * 1024 * 1024)
571 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700572#elif defined(__s390x__)
573 /* Map the buffer so that we can use direct calls and branches. */
574 /* We have a +- 4GB range on the branches; leave some slop. */
575 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
576 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
577 }
578 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000579#endif
blueswir1141ac462008-07-26 15:05:57 +0000580 code_gen_buffer = mmap(start, code_gen_buffer_size,
581 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000582 flags, -1, 0);
583 if (code_gen_buffer == MAP_FAILED) {
584 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
585 exit(1);
586 }
587 }
Bradcbb608a2010-12-20 21:25:40 -0500588#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000589 || defined(__DragonFly__) || defined(__OpenBSD__) \
590 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000591 {
592 int flags;
593 void *addr = NULL;
594 flags = MAP_PRIVATE | MAP_ANONYMOUS;
595#if defined(__x86_64__)
596 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
597 * 0x40000000 is free */
598 flags |= MAP_FIXED;
599 addr = (void *)0x40000000;
600 /* Cannot map more than that */
601 if (code_gen_buffer_size > (800 * 1024 * 1024))
602 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000603#elif defined(__sparc_v9__)
604 // Map the buffer below 2G, so we can use direct calls and branches
605 flags |= MAP_FIXED;
606 addr = (void *) 0x60000000UL;
607 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
608 code_gen_buffer_size = (512 * 1024 * 1024);
609 }
aliguori06e67a82008-09-27 15:32:41 +0000610#endif
611 code_gen_buffer = mmap(addr, code_gen_buffer_size,
612 PROT_WRITE | PROT_READ | PROT_EXEC,
613 flags, -1, 0);
614 if (code_gen_buffer == MAP_FAILED) {
615 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
616 exit(1);
617 }
618 }
bellard26a5f132008-05-28 12:30:31 +0000619#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500620 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000621 map_exec(code_gen_buffer, code_gen_buffer_size);
622#endif
bellard43694152008-05-29 09:35:57 +0000623#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000624 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100625 code_gen_buffer_max_size = code_gen_buffer_size -
626 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000627 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500628 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000629}
630
631/* Must be called before using the QEMU cpus. 'tb_size' is the size
632 (in bytes) allocated to the translation buffer. Zero means default
633 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200634void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000635{
bellard26a5f132008-05-28 12:30:31 +0000636 cpu_gen_init();
637 code_gen_alloc(tb_size);
638 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700639 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000640 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700641#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
642 /* There's no guest base to take into account, so go ahead and
643 initialize the prologue now. */
644 tcg_prologue_init(&tcg_ctx);
645#endif
bellard26a5f132008-05-28 12:30:31 +0000646}
647
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200648bool tcg_enabled(void)
649{
650 return code_gen_buffer != NULL;
651}
652
653void cpu_exec_init_all(void)
654{
655#if !defined(CONFIG_USER_ONLY)
656 memory_map_init();
657 io_mem_init();
658#endif
659}
660
pbrook9656f322008-07-01 20:01:19 +0000661#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
662
Juan Quintelae59fb372009-09-29 22:48:21 +0200663static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200664{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100665 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200666
aurel323098dba2009-03-07 21:28:24 +0000667 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
668 version_id is increased. */
669 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000670 tlb_flush(env, 1);
671
672 return 0;
673}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200674
675static const VMStateDescription vmstate_cpu_common = {
676 .name = "cpu_common",
677 .version_id = 1,
678 .minimum_version_id = 1,
679 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200680 .post_load = cpu_common_post_load,
681 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100682 VMSTATE_UINT32(halted, CPUArchState),
683 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200684 VMSTATE_END_OF_LIST()
685 }
686};
pbrook9656f322008-07-01 20:01:19 +0000687#endif
688
Andreas Färber9349b4f2012-03-14 01:38:32 +0100689CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400690{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100691 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400692
693 while (env) {
694 if (env->cpu_index == cpu)
695 break;
696 env = env->next_cpu;
697 }
698
699 return env;
700}
701
Andreas Färber9349b4f2012-03-14 01:38:32 +0100702void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000703{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100704 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000705 int cpu_index;
706
pbrookc2764712009-03-07 15:24:59 +0000707#if defined(CONFIG_USER_ONLY)
708 cpu_list_lock();
709#endif
bellard6a00d602005-11-21 23:25:50 +0000710 env->next_cpu = NULL;
711 penv = &first_cpu;
712 cpu_index = 0;
713 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700714 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000715 cpu_index++;
716 }
717 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000718 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000719 QTAILQ_INIT(&env->breakpoints);
720 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100721#ifndef CONFIG_USER_ONLY
722 env->thread_id = qemu_get_thread_id();
723#endif
bellard6a00d602005-11-21 23:25:50 +0000724 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000725#if defined(CONFIG_USER_ONLY)
726 cpu_list_unlock();
727#endif
pbrookb3c77242008-06-30 16:31:04 +0000728#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600729 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
730 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000731 cpu_save, cpu_load, env);
732#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000733}
734
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100735/* Allocate a new translation block. Flush the translation buffer if
736 too many translation blocks or too much generated code. */
737static TranslationBlock *tb_alloc(target_ulong pc)
738{
739 TranslationBlock *tb;
740
741 if (nb_tbs >= code_gen_max_blocks ||
742 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
743 return NULL;
744 tb = &tbs[nb_tbs++];
745 tb->pc = pc;
746 tb->cflags = 0;
747 return tb;
748}
749
750void tb_free(TranslationBlock *tb)
751{
752 /* In practice this is mostly used for single use temporary TB
753 Ignore the hard cases and just back up if this TB happens to
754 be the last one generated. */
755 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
756 code_gen_ptr = tb->tc_ptr;
757 nb_tbs--;
758 }
759}
760
bellard9fa3e852004-01-04 18:06:42 +0000761static inline void invalidate_page_bitmap(PageDesc *p)
762{
763 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500764 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000765 p->code_bitmap = NULL;
766 }
767 p->code_write_count = 0;
768}
769
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800770/* Set to NULL all the 'first_tb' fields in all PageDescs. */
771
772static void page_flush_tb_1 (int level, void **lp)
773{
774 int i;
775
776 if (*lp == NULL) {
777 return;
778 }
779 if (level == 0) {
780 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000781 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800782 pd[i].first_tb = NULL;
783 invalidate_page_bitmap(pd + i);
784 }
785 } else {
786 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000787 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800788 page_flush_tb_1 (level - 1, pp + i);
789 }
790 }
791}
792
bellardfd6ce8f2003-05-14 19:00:11 +0000793static void page_flush_tb(void)
794{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800795 int i;
796 for (i = 0; i < V_L1_SIZE; i++) {
797 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000798 }
799}
800
801/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000802/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100803void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000804{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100805 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000806#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000807 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
808 (unsigned long)(code_gen_ptr - code_gen_buffer),
809 nb_tbs, nb_tbs > 0 ?
810 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000811#endif
bellard26a5f132008-05-28 12:30:31 +0000812 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000813 cpu_abort(env1, "Internal error: code buffer overflow\n");
814
bellardfd6ce8f2003-05-14 19:00:11 +0000815 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000816
bellard6a00d602005-11-21 23:25:50 +0000817 for(env = first_cpu; env != NULL; env = env->next_cpu) {
818 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
819 }
bellard9fa3e852004-01-04 18:06:42 +0000820
bellard8a8a6082004-10-03 13:36:49 +0000821 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000822 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000823
bellardfd6ce8f2003-05-14 19:00:11 +0000824 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000825 /* XXX: flush processor icache at this point if cache flush is
826 expensive */
bellarde3db7222005-01-26 22:00:47 +0000827 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000828}
829
830#ifdef DEBUG_TB_CHECK
831
j_mayerbc98a7e2007-04-04 07:55:12 +0000832static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000833{
834 TranslationBlock *tb;
835 int i;
836 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000837 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
838 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000839 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
840 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000841 printf("ERROR invalidate: address=" TARGET_FMT_lx
842 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000843 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000844 }
845 }
846 }
847}
848
849/* verify that all the pages have correct rights for code */
850static void tb_page_check(void)
851{
852 TranslationBlock *tb;
853 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000854
pbrook99773bd2006-04-16 15:14:59 +0000855 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
856 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000857 flags1 = page_get_flags(tb->pc);
858 flags2 = page_get_flags(tb->pc + tb->size - 1);
859 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
860 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000861 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000862 }
863 }
864 }
865}
866
867#endif
868
869/* invalidate one TB */
870static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
871 int next_offset)
872{
873 TranslationBlock *tb1;
874 for(;;) {
875 tb1 = *ptb;
876 if (tb1 == tb) {
877 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
878 break;
879 }
880 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
881 }
882}
883
bellard9fa3e852004-01-04 18:06:42 +0000884static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
885{
886 TranslationBlock *tb1;
887 unsigned int n1;
888
889 for(;;) {
890 tb1 = *ptb;
891 n1 = (long)tb1 & 3;
892 tb1 = (TranslationBlock *)((long)tb1 & ~3);
893 if (tb1 == tb) {
894 *ptb = tb1->page_next[n1];
895 break;
896 }
897 ptb = &tb1->page_next[n1];
898 }
899}
900
bellardd4e81642003-05-25 16:46:15 +0000901static inline void tb_jmp_remove(TranslationBlock *tb, int n)
902{
903 TranslationBlock *tb1, **ptb;
904 unsigned int n1;
905
906 ptb = &tb->jmp_next[n];
907 tb1 = *ptb;
908 if (tb1) {
909 /* find tb(n) in circular list */
910 for(;;) {
911 tb1 = *ptb;
912 n1 = (long)tb1 & 3;
913 tb1 = (TranslationBlock *)((long)tb1 & ~3);
914 if (n1 == n && tb1 == tb)
915 break;
916 if (n1 == 2) {
917 ptb = &tb1->jmp_first;
918 } else {
919 ptb = &tb1->jmp_next[n1];
920 }
921 }
922 /* now we can suppress tb(n) from the list */
923 *ptb = tb->jmp_next[n];
924
925 tb->jmp_next[n] = NULL;
926 }
927}
928
929/* reset the jump entry 'n' of a TB so that it is not chained to
930 another TB */
931static inline void tb_reset_jump(TranslationBlock *tb, int n)
932{
933 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
934}
935
Paul Brook41c1b1c2010-03-12 16:54:58 +0000936void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000937{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100938 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000939 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000940 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000941 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000942 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000943
bellard9fa3e852004-01-04 18:06:42 +0000944 /* remove the TB from the hash list */
945 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
946 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000947 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000948 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000949
bellard9fa3e852004-01-04 18:06:42 +0000950 /* remove the TB from the page list */
951 if (tb->page_addr[0] != page_addr) {
952 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
953 tb_page_remove(&p->first_tb, tb);
954 invalidate_page_bitmap(p);
955 }
956 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
957 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
958 tb_page_remove(&p->first_tb, tb);
959 invalidate_page_bitmap(p);
960 }
961
bellard8a40a182005-11-20 10:35:40 +0000962 tb_invalidated_flag = 1;
963
964 /* remove the TB from the hash list */
965 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000966 for(env = first_cpu; env != NULL; env = env->next_cpu) {
967 if (env->tb_jmp_cache[h] == tb)
968 env->tb_jmp_cache[h] = NULL;
969 }
bellard8a40a182005-11-20 10:35:40 +0000970
971 /* suppress this TB from the two jump lists */
972 tb_jmp_remove(tb, 0);
973 tb_jmp_remove(tb, 1);
974
975 /* suppress any remaining jumps to this TB */
976 tb1 = tb->jmp_first;
977 for(;;) {
978 n1 = (long)tb1 & 3;
979 if (n1 == 2)
980 break;
981 tb1 = (TranslationBlock *)((long)tb1 & ~3);
982 tb2 = tb1->jmp_next[n1];
983 tb_reset_jump(tb1, n1);
984 tb1->jmp_next[n1] = NULL;
985 tb1 = tb2;
986 }
987 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
988
bellarde3db7222005-01-26 22:00:47 +0000989 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000990}
991
992static inline void set_bits(uint8_t *tab, int start, int len)
993{
994 int end, mask, end1;
995
996 end = start + len;
997 tab += start >> 3;
998 mask = 0xff << (start & 7);
999 if ((start & ~7) == (end & ~7)) {
1000 if (start < end) {
1001 mask &= ~(0xff << (end & 7));
1002 *tab |= mask;
1003 }
1004 } else {
1005 *tab++ |= mask;
1006 start = (start + 8) & ~7;
1007 end1 = end & ~7;
1008 while (start < end1) {
1009 *tab++ = 0xff;
1010 start += 8;
1011 }
1012 if (start < end) {
1013 mask = ~(0xff << (end & 7));
1014 *tab |= mask;
1015 }
1016 }
1017}
1018
1019static void build_page_bitmap(PageDesc *p)
1020{
1021 int n, tb_start, tb_end;
1022 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001023
Anthony Liguori7267c092011-08-20 22:09:37 -05001024 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001025
1026 tb = p->first_tb;
1027 while (tb != NULL) {
1028 n = (long)tb & 3;
1029 tb = (TranslationBlock *)((long)tb & ~3);
1030 /* NOTE: this is subtle as a TB may span two physical pages */
1031 if (n == 0) {
1032 /* NOTE: tb_end may be after the end of the page, but
1033 it is not a problem */
1034 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1035 tb_end = tb_start + tb->size;
1036 if (tb_end > TARGET_PAGE_SIZE)
1037 tb_end = TARGET_PAGE_SIZE;
1038 } else {
1039 tb_start = 0;
1040 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1041 }
1042 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1043 tb = tb->page_next[n];
1044 }
1045}
1046
Andreas Färber9349b4f2012-03-14 01:38:32 +01001047TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001048 target_ulong pc, target_ulong cs_base,
1049 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001050{
1051 TranslationBlock *tb;
1052 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001053 tb_page_addr_t phys_pc, phys_page2;
1054 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001055 int code_gen_size;
1056
Paul Brook41c1b1c2010-03-12 16:54:58 +00001057 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001058 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001059 if (!tb) {
1060 /* flush must be done */
1061 tb_flush(env);
1062 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001063 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001064 /* Don't forget to invalidate previous TB info. */
1065 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001066 }
1067 tc_ptr = code_gen_ptr;
1068 tb->tc_ptr = tc_ptr;
1069 tb->cs_base = cs_base;
1070 tb->flags = flags;
1071 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001072 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001073 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001074
bellardd720b932004-04-25 17:57:43 +00001075 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001076 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001077 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001078 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001079 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001080 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001081 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001082 return tb;
bellardd720b932004-04-25 17:57:43 +00001083}
ths3b46e622007-09-17 08:09:54 +00001084
bellard9fa3e852004-01-04 18:06:42 +00001085/* invalidate all TBs which intersect with the target physical page
1086 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001087 the same physical page. 'is_cpu_write_access' should be true if called
1088 from a real cpu write access: the virtual CPU will exit the current
1089 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001090void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001091 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001092{
aliguori6b917542008-11-18 19:46:41 +00001093 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001094 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001095 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001096 PageDesc *p;
1097 int n;
1098#ifdef TARGET_HAS_PRECISE_SMC
1099 int current_tb_not_found = is_cpu_write_access;
1100 TranslationBlock *current_tb = NULL;
1101 int current_tb_modified = 0;
1102 target_ulong current_pc = 0;
1103 target_ulong current_cs_base = 0;
1104 int current_flags = 0;
1105#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001106
1107 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001108 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001109 return;
ths5fafdf22007-09-16 21:08:06 +00001110 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001111 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1112 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001113 /* build code bitmap */
1114 build_page_bitmap(p);
1115 }
1116
1117 /* we remove all the TBs in the range [start, end[ */
1118 /* XXX: see if in some cases it could be faster to invalidate all the code */
1119 tb = p->first_tb;
1120 while (tb != NULL) {
1121 n = (long)tb & 3;
1122 tb = (TranslationBlock *)((long)tb & ~3);
1123 tb_next = tb->page_next[n];
1124 /* NOTE: this is subtle as a TB may span two physical pages */
1125 if (n == 0) {
1126 /* NOTE: tb_end may be after the end of the page, but
1127 it is not a problem */
1128 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1129 tb_end = tb_start + tb->size;
1130 } else {
1131 tb_start = tb->page_addr[1];
1132 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1133 }
1134 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001135#ifdef TARGET_HAS_PRECISE_SMC
1136 if (current_tb_not_found) {
1137 current_tb_not_found = 0;
1138 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001139 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001140 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001141 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001142 }
1143 }
1144 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001145 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001146 /* If we are modifying the current TB, we must stop
1147 its execution. We could be more precise by checking
1148 that the modification is after the current PC, but it
1149 would require a specialized function to partially
1150 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001151
bellardd720b932004-04-25 17:57:43 +00001152 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001153 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001154 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1155 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001156 }
1157#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001158 /* we need to do that to handle the case where a signal
1159 occurs while doing tb_phys_invalidate() */
1160 saved_tb = NULL;
1161 if (env) {
1162 saved_tb = env->current_tb;
1163 env->current_tb = NULL;
1164 }
bellard9fa3e852004-01-04 18:06:42 +00001165 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001166 if (env) {
1167 env->current_tb = saved_tb;
1168 if (env->interrupt_request && env->current_tb)
1169 cpu_interrupt(env, env->interrupt_request);
1170 }
bellard9fa3e852004-01-04 18:06:42 +00001171 }
1172 tb = tb_next;
1173 }
1174#if !defined(CONFIG_USER_ONLY)
1175 /* if no code remaining, no need to continue to use slow writes */
1176 if (!p->first_tb) {
1177 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001178 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001179 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001180 }
1181 }
1182#endif
1183#ifdef TARGET_HAS_PRECISE_SMC
1184 if (current_tb_modified) {
1185 /* we generate a block containing just the instruction
1186 modifying the memory. It will ensure that it cannot modify
1187 itself */
bellardea1c1802004-06-14 18:56:36 +00001188 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001189 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001190 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001191 }
1192#endif
1193}
1194
1195/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001196static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001197{
1198 PageDesc *p;
1199 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001200#if 0
bellarda4193c82004-06-03 14:01:43 +00001201 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001202 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1203 cpu_single_env->mem_io_vaddr, len,
1204 cpu_single_env->eip,
1205 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001206 }
1207#endif
bellard9fa3e852004-01-04 18:06:42 +00001208 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001209 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001210 return;
1211 if (p->code_bitmap) {
1212 offset = start & ~TARGET_PAGE_MASK;
1213 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1214 if (b & ((1 << len) - 1))
1215 goto do_invalidate;
1216 } else {
1217 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001218 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001219 }
1220}
1221
bellard9fa3e852004-01-04 18:06:42 +00001222#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001223static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001224 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001225{
aliguori6b917542008-11-18 19:46:41 +00001226 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001227 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001228 int n;
bellardd720b932004-04-25 17:57:43 +00001229#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001230 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001231 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001232 int current_tb_modified = 0;
1233 target_ulong current_pc = 0;
1234 target_ulong current_cs_base = 0;
1235 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001236#endif
bellard9fa3e852004-01-04 18:06:42 +00001237
1238 addr &= TARGET_PAGE_MASK;
1239 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001240 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001241 return;
1242 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001243#ifdef TARGET_HAS_PRECISE_SMC
1244 if (tb && pc != 0) {
1245 current_tb = tb_find_pc(pc);
1246 }
1247#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001248 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001249 n = (long)tb & 3;
1250 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001251#ifdef TARGET_HAS_PRECISE_SMC
1252 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001253 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001254 /* If we are modifying the current TB, we must stop
1255 its execution. We could be more precise by checking
1256 that the modification is after the current PC, but it
1257 would require a specialized function to partially
1258 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001259
bellardd720b932004-04-25 17:57:43 +00001260 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001261 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001262 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1263 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001264 }
1265#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001266 tb_phys_invalidate(tb, addr);
1267 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001268 }
1269 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001270#ifdef TARGET_HAS_PRECISE_SMC
1271 if (current_tb_modified) {
1272 /* we generate a block containing just the instruction
1273 modifying the memory. It will ensure that it cannot modify
1274 itself */
bellardea1c1802004-06-14 18:56:36 +00001275 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001276 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001277 cpu_resume_from_signal(env, puc);
1278 }
1279#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001280}
bellard9fa3e852004-01-04 18:06:42 +00001281#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001282
1283/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001284static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001285 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001286{
1287 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001288#ifndef CONFIG_USER_ONLY
1289 bool page_already_protected;
1290#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001291
bellard9fa3e852004-01-04 18:06:42 +00001292 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001293 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001294 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001295#ifndef CONFIG_USER_ONLY
1296 page_already_protected = p->first_tb != NULL;
1297#endif
bellard9fa3e852004-01-04 18:06:42 +00001298 p->first_tb = (TranslationBlock *)((long)tb | n);
1299 invalidate_page_bitmap(p);
1300
bellard107db442004-06-22 18:48:46 +00001301#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001302
bellard9fa3e852004-01-04 18:06:42 +00001303#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001304 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001305 target_ulong addr;
1306 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001307 int prot;
1308
bellardfd6ce8f2003-05-14 19:00:11 +00001309 /* force the host page as non writable (writes will have a
1310 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001311 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001312 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001313 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1314 addr += TARGET_PAGE_SIZE) {
1315
1316 p2 = page_find (addr >> TARGET_PAGE_BITS);
1317 if (!p2)
1318 continue;
1319 prot |= p2->flags;
1320 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001321 }
ths5fafdf22007-09-16 21:08:06 +00001322 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001323 (prot & PAGE_BITS) & ~PAGE_WRITE);
1324#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001325 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001326 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001327#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001328 }
bellard9fa3e852004-01-04 18:06:42 +00001329#else
1330 /* if some code is already present, then the pages are already
1331 protected. So we handle the case where only the first TB is
1332 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001333 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001334 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001335 }
1336#endif
bellardd720b932004-04-25 17:57:43 +00001337
1338#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001339}
1340
bellard9fa3e852004-01-04 18:06:42 +00001341/* add a new TB and link it to the physical page tables. phys_page2 is
1342 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001343void tb_link_page(TranslationBlock *tb,
1344 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001345{
bellard9fa3e852004-01-04 18:06:42 +00001346 unsigned int h;
1347 TranslationBlock **ptb;
1348
pbrookc8a706f2008-06-02 16:16:42 +00001349 /* Grab the mmap lock to stop another thread invalidating this TB
1350 before we are done. */
1351 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001352 /* add in the physical hash table */
1353 h = tb_phys_hash_func(phys_pc);
1354 ptb = &tb_phys_hash[h];
1355 tb->phys_hash_next = *ptb;
1356 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001357
1358 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001359 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1360 if (phys_page2 != -1)
1361 tb_alloc_page(tb, 1, phys_page2);
1362 else
1363 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001364
bellardd4e81642003-05-25 16:46:15 +00001365 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1366 tb->jmp_next[0] = NULL;
1367 tb->jmp_next[1] = NULL;
1368
1369 /* init original jump addresses */
1370 if (tb->tb_next_offset[0] != 0xffff)
1371 tb_reset_jump(tb, 0);
1372 if (tb->tb_next_offset[1] != 0xffff)
1373 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001374
1375#ifdef DEBUG_TB_CHECK
1376 tb_page_check();
1377#endif
pbrookc8a706f2008-06-02 16:16:42 +00001378 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001379}
1380
bellarda513fe12003-05-27 23:29:48 +00001381/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1382 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001383TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001384{
1385 int m_min, m_max, m;
1386 unsigned long v;
1387 TranslationBlock *tb;
1388
1389 if (nb_tbs <= 0)
1390 return NULL;
1391 if (tc_ptr < (unsigned long)code_gen_buffer ||
1392 tc_ptr >= (unsigned long)code_gen_ptr)
1393 return NULL;
1394 /* binary search (cf Knuth) */
1395 m_min = 0;
1396 m_max = nb_tbs - 1;
1397 while (m_min <= m_max) {
1398 m = (m_min + m_max) >> 1;
1399 tb = &tbs[m];
1400 v = (unsigned long)tb->tc_ptr;
1401 if (v == tc_ptr)
1402 return tb;
1403 else if (tc_ptr < v) {
1404 m_max = m - 1;
1405 } else {
1406 m_min = m + 1;
1407 }
ths5fafdf22007-09-16 21:08:06 +00001408 }
bellarda513fe12003-05-27 23:29:48 +00001409 return &tbs[m_max];
1410}
bellard75012672003-06-21 13:11:07 +00001411
bellardea041c02003-06-25 16:16:50 +00001412static void tb_reset_jump_recursive(TranslationBlock *tb);
1413
1414static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1415{
1416 TranslationBlock *tb1, *tb_next, **ptb;
1417 unsigned int n1;
1418
1419 tb1 = tb->jmp_next[n];
1420 if (tb1 != NULL) {
1421 /* find head of list */
1422 for(;;) {
1423 n1 = (long)tb1 & 3;
1424 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1425 if (n1 == 2)
1426 break;
1427 tb1 = tb1->jmp_next[n1];
1428 }
1429 /* we are now sure now that tb jumps to tb1 */
1430 tb_next = tb1;
1431
1432 /* remove tb from the jmp_first list */
1433 ptb = &tb_next->jmp_first;
1434 for(;;) {
1435 tb1 = *ptb;
1436 n1 = (long)tb1 & 3;
1437 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1438 if (n1 == n && tb1 == tb)
1439 break;
1440 ptb = &tb1->jmp_next[n1];
1441 }
1442 *ptb = tb->jmp_next[n];
1443 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001444
bellardea041c02003-06-25 16:16:50 +00001445 /* suppress the jump to next tb in generated code */
1446 tb_reset_jump(tb, n);
1447
bellard01243112004-01-04 15:48:17 +00001448 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001449 tb_reset_jump_recursive(tb_next);
1450 }
1451}
1452
1453static void tb_reset_jump_recursive(TranslationBlock *tb)
1454{
1455 tb_reset_jump_recursive2(tb, 0);
1456 tb_reset_jump_recursive2(tb, 1);
1457}
1458
bellard1fddef42005-04-17 19:16:13 +00001459#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001460#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001461static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001462{
1463 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1464}
1465#else
Andreas Färber9349b4f2012-03-14 01:38:32 +01001466static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
bellardd720b932004-04-25 17:57:43 +00001467{
Anthony Liguoric227f092009-10-01 16:12:16 -05001468 target_phys_addr_t addr;
Anthony Liguoric227f092009-10-01 16:12:16 -05001469 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001470 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001471
pbrookc2f07f82006-04-08 17:14:56 +00001472 addr = cpu_get_phys_page_debug(env, pc);
Avi Kivity06ef3522012-02-13 16:11:22 +02001473 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001474 if (!(memory_region_is_ram(section->mr)
1475 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001476 return;
1477 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001478 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1479 + section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001480 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001481}
bellardc27004e2005-01-03 23:35:10 +00001482#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001483#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001484
Paul Brookc527ee82010-03-01 03:31:14 +00001485#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001486void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001487
1488{
1489}
1490
Andreas Färber9349b4f2012-03-14 01:38:32 +01001491int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001492 int flags, CPUWatchpoint **watchpoint)
1493{
1494 return -ENOSYS;
1495}
1496#else
pbrook6658ffb2007-03-16 23:58:11 +00001497/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001498int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001499 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001500{
aliguorib4051332008-11-18 20:14:20 +00001501 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001502 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001503
aliguorib4051332008-11-18 20:14:20 +00001504 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001505 if ((len & (len - 1)) || (addr & ~len_mask) ||
1506 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001507 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1508 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1509 return -EINVAL;
1510 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001511 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001512
aliguoria1d1bb32008-11-18 20:07:32 +00001513 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001514 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001515 wp->flags = flags;
1516
aliguori2dc9f412008-11-18 20:56:59 +00001517 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001518 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001519 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001520 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001521 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001522
pbrook6658ffb2007-03-16 23:58:11 +00001523 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001524
1525 if (watchpoint)
1526 *watchpoint = wp;
1527 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001528}
1529
aliguoria1d1bb32008-11-18 20:07:32 +00001530/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001531int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001532 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001533{
aliguorib4051332008-11-18 20:14:20 +00001534 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001535 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001536
Blue Swirl72cf2d42009-09-12 07:36:22 +00001537 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001538 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001539 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001540 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001541 return 0;
1542 }
1543 }
aliguoria1d1bb32008-11-18 20:07:32 +00001544 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001545}
1546
aliguoria1d1bb32008-11-18 20:07:32 +00001547/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001548void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001549{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001550 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001551
aliguoria1d1bb32008-11-18 20:07:32 +00001552 tlb_flush_page(env, watchpoint->vaddr);
1553
Anthony Liguori7267c092011-08-20 22:09:37 -05001554 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001555}
1556
aliguoria1d1bb32008-11-18 20:07:32 +00001557/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001558void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001559{
aliguoric0ce9982008-11-25 22:13:57 +00001560 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001561
Blue Swirl72cf2d42009-09-12 07:36:22 +00001562 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001563 if (wp->flags & mask)
1564 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001565 }
aliguoria1d1bb32008-11-18 20:07:32 +00001566}
Paul Brookc527ee82010-03-01 03:31:14 +00001567#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001568
1569/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001570int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001571 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001572{
bellard1fddef42005-04-17 19:16:13 +00001573#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001574 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001575
Anthony Liguori7267c092011-08-20 22:09:37 -05001576 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001577
1578 bp->pc = pc;
1579 bp->flags = flags;
1580
aliguori2dc9f412008-11-18 20:56:59 +00001581 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001582 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001583 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001584 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001585 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001586
1587 breakpoint_invalidate(env, pc);
1588
1589 if (breakpoint)
1590 *breakpoint = bp;
1591 return 0;
1592#else
1593 return -ENOSYS;
1594#endif
1595}
1596
1597/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001598int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001599{
1600#if defined(TARGET_HAS_ICE)
1601 CPUBreakpoint *bp;
1602
Blue Swirl72cf2d42009-09-12 07:36:22 +00001603 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001604 if (bp->pc == pc && bp->flags == flags) {
1605 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001606 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001607 }
bellard4c3a88a2003-07-26 12:06:08 +00001608 }
aliguoria1d1bb32008-11-18 20:07:32 +00001609 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001610#else
aliguoria1d1bb32008-11-18 20:07:32 +00001611 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001612#endif
1613}
1614
aliguoria1d1bb32008-11-18 20:07:32 +00001615/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001616void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001617{
bellard1fddef42005-04-17 19:16:13 +00001618#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001619 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001620
aliguoria1d1bb32008-11-18 20:07:32 +00001621 breakpoint_invalidate(env, breakpoint->pc);
1622
Anthony Liguori7267c092011-08-20 22:09:37 -05001623 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001624#endif
1625}
1626
1627/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001628void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001629{
1630#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001631 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001632
Blue Swirl72cf2d42009-09-12 07:36:22 +00001633 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001634 if (bp->flags & mask)
1635 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001636 }
bellard4c3a88a2003-07-26 12:06:08 +00001637#endif
1638}
1639
bellardc33a3462003-07-29 20:50:33 +00001640/* enable or disable single step mode. EXCP_DEBUG is returned by the
1641 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001642void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001643{
bellard1fddef42005-04-17 19:16:13 +00001644#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001645 if (env->singlestep_enabled != enabled) {
1646 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001647 if (kvm_enabled())
1648 kvm_update_guest_debug(env, 0);
1649 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001650 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001651 /* XXX: only flush what is necessary */
1652 tb_flush(env);
1653 }
bellardc33a3462003-07-29 20:50:33 +00001654 }
1655#endif
1656}
1657
bellard34865132003-10-05 14:28:56 +00001658/* enable or disable low levels log */
1659void cpu_set_log(int log_flags)
1660{
1661 loglevel = log_flags;
1662 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001663 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001664 if (!logfile) {
1665 perror(logfilename);
1666 _exit(1);
1667 }
bellard9fa3e852004-01-04 18:06:42 +00001668#if !defined(CONFIG_SOFTMMU)
1669 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1670 {
blueswir1b55266b2008-09-20 08:07:15 +00001671 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001672 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1673 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001674#elif defined(_WIN32)
1675 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1676 setvbuf(logfile, NULL, _IONBF, 0);
1677#else
bellard34865132003-10-05 14:28:56 +00001678 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001679#endif
pbrooke735b912007-06-30 13:53:24 +00001680 log_append = 1;
1681 }
1682 if (!loglevel && logfile) {
1683 fclose(logfile);
1684 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001685 }
1686}
1687
1688void cpu_set_log_filename(const char *filename)
1689{
1690 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001691 if (logfile) {
1692 fclose(logfile);
1693 logfile = NULL;
1694 }
1695 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001696}
bellardc33a3462003-07-29 20:50:33 +00001697
Andreas Färber9349b4f2012-03-14 01:38:32 +01001698static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001699{
pbrookd5975362008-06-07 20:50:51 +00001700 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1701 problem and hope the cpu will stop of its own accord. For userspace
1702 emulation this often isn't actually as bad as it sounds. Often
1703 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001704 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001705 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001706
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001707 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001708 tb = env->current_tb;
1709 /* if the cpu is currently executing code, we must unlink it and
1710 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001711 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001712 env->current_tb = NULL;
1713 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001714 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001715 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001716}
1717
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001718#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001719/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001720static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001721{
1722 int old_mask;
1723
1724 old_mask = env->interrupt_request;
1725 env->interrupt_request |= mask;
1726
aliguori8edac962009-04-24 18:03:45 +00001727 /*
1728 * If called from iothread context, wake the target cpu in
1729 * case its halted.
1730 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001731 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001732 qemu_cpu_kick(env);
1733 return;
1734 }
aliguori8edac962009-04-24 18:03:45 +00001735
pbrook2e70f6e2008-06-29 01:03:05 +00001736 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001737 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001738 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001739 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001740 cpu_abort(env, "Raised interrupt while not in I/O function");
1741 }
pbrook2e70f6e2008-06-29 01:03:05 +00001742 } else {
aurel323098dba2009-03-07 21:28:24 +00001743 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001744 }
1745}
1746
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001747CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1748
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001749#else /* CONFIG_USER_ONLY */
1750
Andreas Färber9349b4f2012-03-14 01:38:32 +01001751void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001752{
1753 env->interrupt_request |= mask;
1754 cpu_unlink_tb(env);
1755}
1756#endif /* CONFIG_USER_ONLY */
1757
Andreas Färber9349b4f2012-03-14 01:38:32 +01001758void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001759{
1760 env->interrupt_request &= ~mask;
1761}
1762
Andreas Färber9349b4f2012-03-14 01:38:32 +01001763void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001764{
1765 env->exit_request = 1;
1766 cpu_unlink_tb(env);
1767}
1768
blueswir1c7cd6a32008-10-02 18:27:46 +00001769const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001770 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001771 "show generated host assembly code for each compiled TB" },
1772 { CPU_LOG_TB_IN_ASM, "in_asm",
1773 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001774 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001775 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001776 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001777 "show micro ops "
1778#ifdef TARGET_I386
1779 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001780#endif
blueswir1e01a1152008-03-14 17:37:11 +00001781 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001782 { CPU_LOG_INT, "int",
1783 "show interrupts/exceptions in short format" },
1784 { CPU_LOG_EXEC, "exec",
1785 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001786 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001787 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001788#ifdef TARGET_I386
1789 { CPU_LOG_PCALL, "pcall",
1790 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001791 { CPU_LOG_RESET, "cpu_reset",
1792 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001793#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001794#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001795 { CPU_LOG_IOPORT, "ioport",
1796 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001797#endif
bellardf193c792004-03-21 17:06:25 +00001798 { 0, NULL, NULL },
1799};
1800
1801static int cmp1(const char *s1, int n, const char *s2)
1802{
1803 if (strlen(s2) != n)
1804 return 0;
1805 return memcmp(s1, s2, n) == 0;
1806}
ths3b46e622007-09-17 08:09:54 +00001807
bellardf193c792004-03-21 17:06:25 +00001808/* takes a comma separated list of log masks. Return 0 if error. */
1809int cpu_str_to_log_mask(const char *str)
1810{
blueswir1c7cd6a32008-10-02 18:27:46 +00001811 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001812 int mask;
1813 const char *p, *p1;
1814
1815 p = str;
1816 mask = 0;
1817 for(;;) {
1818 p1 = strchr(p, ',');
1819 if (!p1)
1820 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001821 if(cmp1(p,p1-p,"all")) {
1822 for(item = cpu_log_items; item->mask != 0; item++) {
1823 mask |= item->mask;
1824 }
1825 } else {
1826 for(item = cpu_log_items; item->mask != 0; item++) {
1827 if (cmp1(p, p1 - p, item->name))
1828 goto found;
1829 }
1830 return 0;
bellardf193c792004-03-21 17:06:25 +00001831 }
bellardf193c792004-03-21 17:06:25 +00001832 found:
1833 mask |= item->mask;
1834 if (*p1 != ',')
1835 break;
1836 p = p1 + 1;
1837 }
1838 return mask;
1839}
bellardea041c02003-06-25 16:16:50 +00001840
Andreas Färber9349b4f2012-03-14 01:38:32 +01001841void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001842{
1843 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001844 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001845
1846 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001847 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001848 fprintf(stderr, "qemu: fatal: ");
1849 vfprintf(stderr, fmt, ap);
1850 fprintf(stderr, "\n");
1851#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001852 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1853#else
1854 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001855#endif
aliguori93fcfe32009-01-15 22:34:14 +00001856 if (qemu_log_enabled()) {
1857 qemu_log("qemu: fatal: ");
1858 qemu_log_vprintf(fmt, ap2);
1859 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001860#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001861 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001862#else
aliguori93fcfe32009-01-15 22:34:14 +00001863 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001864#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001865 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001866 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001867 }
pbrook493ae1f2007-11-23 16:53:59 +00001868 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001869 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001870#if defined(CONFIG_USER_ONLY)
1871 {
1872 struct sigaction act;
1873 sigfillset(&act.sa_mask);
1874 act.sa_handler = SIG_DFL;
1875 sigaction(SIGABRT, &act, NULL);
1876 }
1877#endif
bellard75012672003-06-21 13:11:07 +00001878 abort();
1879}
1880
Andreas Färber9349b4f2012-03-14 01:38:32 +01001881CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001882{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001883 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1884 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001885 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001886#if defined(TARGET_HAS_ICE)
1887 CPUBreakpoint *bp;
1888 CPUWatchpoint *wp;
1889#endif
1890
Andreas Färber9349b4f2012-03-14 01:38:32 +01001891 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001892
1893 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001894 new_env->next_cpu = next_cpu;
1895 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001896
1897 /* Clone all break/watchpoints.
1898 Note: Once we support ptrace with hw-debug register access, make sure
1899 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001900 QTAILQ_INIT(&env->breakpoints);
1901 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001902#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001903 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001904 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1905 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001906 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001907 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1908 wp->flags, NULL);
1909 }
1910#endif
1911
thsc5be9f02007-02-28 20:20:53 +00001912 return new_env;
1913}
1914
bellard01243112004-01-04 15:48:17 +00001915#if !defined(CONFIG_USER_ONLY)
1916
Andreas Färber9349b4f2012-03-14 01:38:32 +01001917static inline void tlb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001918{
1919 unsigned int i;
1920
1921 /* Discard jump cache entries for any tb which might potentially
1922 overlap the flushed page. */
1923 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1924 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001925 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001926
1927 i = tb_jmp_cache_hash_page(addr);
1928 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001929 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001930}
1931
Igor Kovalenko08738982009-07-12 02:15:40 +04001932static CPUTLBEntry s_cputlb_empty_entry = {
1933 .addr_read = -1,
1934 .addr_write = -1,
1935 .addr_code = -1,
1936 .addend = -1,
1937};
1938
Peter Maydell771124e2012-01-17 13:23:13 +00001939/* NOTE:
1940 * If flush_global is true (the usual case), flush all tlb entries.
1941 * If flush_global is false, flush (at least) all tlb entries not
1942 * marked global.
1943 *
1944 * Since QEMU doesn't currently implement a global/not-global flag
1945 * for tlb entries, at the moment tlb_flush() will also flush all
1946 * tlb entries in the flush_global == false case. This is OK because
1947 * CPU architectures generally permit an implementation to drop
1948 * entries from the TLB at any time, so flushing more entries than
1949 * required is only an efficiency issue, not a correctness issue.
1950 */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001951void tlb_flush(CPUArchState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001952{
bellard33417e72003-08-10 21:47:01 +00001953 int i;
bellard01243112004-01-04 15:48:17 +00001954
bellard9fa3e852004-01-04 18:06:42 +00001955#if defined(DEBUG_TLB)
1956 printf("tlb_flush:\n");
1957#endif
bellard01243112004-01-04 15:48:17 +00001958 /* must reset current TB so that interrupts cannot modify the
1959 links while we are modifying them */
1960 env->current_tb = NULL;
1961
bellard33417e72003-08-10 21:47:01 +00001962 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001963 int mmu_idx;
1964 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001965 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001966 }
bellard33417e72003-08-10 21:47:01 +00001967 }
bellard9fa3e852004-01-04 18:06:42 +00001968
bellard8a40a182005-11-20 10:35:40 +00001969 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001970
Paul Brookd4c430a2010-03-17 02:14:28 +00001971 env->tlb_flush_addr = -1;
1972 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001973 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001974}
1975
bellard274da6b2004-05-20 21:56:27 +00001976static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001977{
ths5fafdf22007-09-16 21:08:06 +00001978 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001979 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001980 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001981 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001982 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001983 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001984 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001985 }
bellard61382a52003-10-27 21:22:23 +00001986}
1987
Andreas Färber9349b4f2012-03-14 01:38:32 +01001988void tlb_flush_page(CPUArchState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001989{
bellard8a40a182005-11-20 10:35:40 +00001990 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001991 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001992
bellard9fa3e852004-01-04 18:06:42 +00001993#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001994 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001995#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001996 /* Check if we need to flush due to large pages. */
1997 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1998#if defined(DEBUG_TLB)
1999 printf("tlb_flush_page: forced full flush ("
2000 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2001 env->tlb_flush_addr, env->tlb_flush_mask);
2002#endif
2003 tlb_flush(env, 1);
2004 return;
2005 }
bellard01243112004-01-04 15:48:17 +00002006 /* must reset current TB so that interrupts cannot modify the
2007 links while we are modifying them */
2008 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002009
bellard61382a52003-10-27 21:22:23 +00002010 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002011 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002012 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2013 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002014
edgar_igl5c751e92008-05-06 08:44:21 +00002015 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002016}
2017
bellard9fa3e852004-01-04 18:06:42 +00002018/* update the TLBs so that writes to code in the virtual page 'addr'
2019 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002020static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002021{
ths5fafdf22007-09-16 21:08:06 +00002022 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002023 ram_addr + TARGET_PAGE_SIZE,
2024 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002025}
2026
bellard9fa3e852004-01-04 18:06:42 +00002027/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002028 tested for self modifying code */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002029static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002030 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002031{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002032 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002033}
2034
Avi Kivity7859cc62012-03-14 16:19:39 +02002035static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
2036{
2037 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
2038}
2039
ths5fafdf22007-09-16 21:08:06 +00002040static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002041 unsigned long start, unsigned long length)
2042{
2043 unsigned long addr;
Avi Kivity7859cc62012-03-14 16:19:39 +02002044 if (tlb_is_dirty_ram(tlb_entry)) {
bellard84b7b8e2005-11-28 21:19:04 +00002045 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002046 if ((addr - start) < length) {
Avi Kivity7859cc62012-03-14 16:19:39 +02002047 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002048 }
2049 }
2050}
2051
pbrook5579c7f2009-04-11 14:47:08 +00002052/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002053void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002054 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002055{
Andreas Färber9349b4f2012-03-14 01:38:32 +01002056 CPUArchState *env;
bellard4f2ac232004-04-26 19:44:02 +00002057 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002058 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002059
2060 start &= TARGET_PAGE_MASK;
2061 end = TARGET_PAGE_ALIGN(end);
2062
2063 length = end - start;
2064 if (length == 0)
2065 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002066 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002067
bellard1ccde1c2004-02-06 19:46:14 +00002068 /* we modify the TLB cache so that the dirty bit will be set again
2069 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002070 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002071 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002072 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002073 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002074 != (end - 1) - start) {
2075 abort();
2076 }
2077
bellard6a00d602005-11-21 23:25:50 +00002078 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002079 int mmu_idx;
2080 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2081 for(i = 0; i < CPU_TLB_SIZE; i++)
2082 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2083 start1, length);
2084 }
bellard6a00d602005-11-21 23:25:50 +00002085 }
bellard1ccde1c2004-02-06 19:46:14 +00002086}
2087
aliguori74576192008-10-06 14:02:03 +00002088int cpu_physical_memory_set_dirty_tracking(int enable)
2089{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002090 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002091 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002092 return ret;
aliguori74576192008-10-06 14:02:03 +00002093}
2094
bellard3a7d9292005-08-21 09:26:42 +00002095static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2096{
Anthony Liguoric227f092009-10-01 16:12:16 -05002097 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002098 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002099
Avi Kivity7859cc62012-03-14 16:19:39 +02002100 if (tlb_is_dirty_ram(tlb_entry)) {
pbrook5579c7f2009-04-11 14:47:08 +00002101 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2102 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002103 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002104 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002105 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002106 }
2107 }
2108}
2109
2110/* update the TLB according to the current state of the dirty bits */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002111void cpu_tlb_update_dirty(CPUArchState *env)
bellard3a7d9292005-08-21 09:26:42 +00002112{
2113 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002114 int mmu_idx;
2115 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2116 for(i = 0; i < CPU_TLB_SIZE; i++)
2117 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2118 }
bellard3a7d9292005-08-21 09:26:42 +00002119}
2120
pbrook0f459d12008-06-09 00:20:13 +00002121static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002122{
pbrook0f459d12008-06-09 00:20:13 +00002123 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2124 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002125}
2126
pbrook0f459d12008-06-09 00:20:13 +00002127/* update the TLB corresponding to virtual page vaddr
2128 so that it is no longer dirty */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002129static inline void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002130{
bellard1ccde1c2004-02-06 19:46:14 +00002131 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002132 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002133
pbrook0f459d12008-06-09 00:20:13 +00002134 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002135 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002136 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2137 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002138}
2139
Paul Brookd4c430a2010-03-17 02:14:28 +00002140/* Our TLB does not support large pages, so remember the area covered by
2141 large pages and trigger a full TLB flush if these are invalidated. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002142static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
Paul Brookd4c430a2010-03-17 02:14:28 +00002143 target_ulong size)
2144{
2145 target_ulong mask = ~(size - 1);
2146
2147 if (env->tlb_flush_addr == (target_ulong)-1) {
2148 env->tlb_flush_addr = vaddr & mask;
2149 env->tlb_flush_mask = mask;
2150 return;
2151 }
2152 /* Extend the existing region to include the new page.
2153 This is a compromise between unnecessary flushes and the cost
2154 of maintaining a full variable size TLB. */
2155 mask &= env->tlb_flush_mask;
2156 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2157 mask <<= 1;
2158 }
2159 env->tlb_flush_addr &= mask;
2160 env->tlb_flush_mask = mask;
2161}
2162
Avi Kivity06ef3522012-02-13 16:11:22 +02002163static bool is_ram_rom(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002164{
Avi Kivity06ef3522012-02-13 16:11:22 +02002165 return memory_region_is_ram(s->mr);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002166}
2167
Avi Kivity06ef3522012-02-13 16:11:22 +02002168static bool is_romd(MemoryRegionSection *s)
Avi Kivity75c578d2012-01-02 15:40:52 +02002169{
Avi Kivity06ef3522012-02-13 16:11:22 +02002170 MemoryRegion *mr = s->mr;
Avi Kivity75c578d2012-01-02 15:40:52 +02002171
Avi Kivity75c578d2012-01-02 15:40:52 +02002172 return mr->rom_device && mr->readable;
2173}
2174
Avi Kivity06ef3522012-02-13 16:11:22 +02002175static bool is_ram_rom_romd(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002176{
Avi Kivity06ef3522012-02-13 16:11:22 +02002177 return is_ram_rom(s) || is_romd(s);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002178}
2179
Paul Brookd4c430a2010-03-17 02:14:28 +00002180/* Add a new TLB entry. At most one entry for a given virtual address
2181 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2182 supplied size is only used by tlb_flush_page. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002183void tlb_set_page(CPUArchState *env, target_ulong vaddr,
Paul Brookd4c430a2010-03-17 02:14:28 +00002184 target_phys_addr_t paddr, int prot,
2185 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002186{
Avi Kivityf3705d52012-03-08 16:16:34 +02002187 MemoryRegionSection *section;
bellard9fa3e852004-01-04 18:06:42 +00002188 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002189 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002190 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002191 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002192 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002193 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002194 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002195
Paul Brookd4c430a2010-03-17 02:14:28 +00002196 assert(size >= TARGET_PAGE_SIZE);
2197 if (size != TARGET_PAGE_SIZE) {
2198 tlb_add_large_page(env, vaddr, size);
2199 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002200 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002201#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002202 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2203 " prot=%x idx=%d pd=0x%08lx\n",
2204 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002205#endif
2206
pbrook0f459d12008-06-09 00:20:13 +00002207 address = vaddr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002208 if (!is_ram_rom_romd(section)) {
pbrook0f459d12008-06-09 00:20:13 +00002209 /* IO memory case (romd handled later) */
2210 address |= TLB_MMIO;
2211 }
Avi Kivityf3705d52012-03-08 16:16:34 +02002212 if (is_ram_rom_romd(section)) {
2213 addend = (unsigned long)memory_region_get_ram_ptr(section->mr)
2214 + section_addr(section, paddr);
Avi Kivity06ef3522012-02-13 16:11:22 +02002215 } else {
2216 addend = 0;
2217 }
Avi Kivityf3705d52012-03-08 16:16:34 +02002218 if (is_ram_rom(section)) {
pbrook0f459d12008-06-09 00:20:13 +00002219 /* Normal RAM. */
Avi Kivityf3705d52012-03-08 16:16:34 +02002220 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2221 + section_addr(section, paddr);
2222 if (!section->readonly)
Avi Kivityaa102232012-03-08 17:06:55 +02002223 iotlb |= phys_section_notdirty;
pbrook0f459d12008-06-09 00:20:13 +00002224 else
Avi Kivityaa102232012-03-08 17:06:55 +02002225 iotlb |= phys_section_rom;
pbrook0f459d12008-06-09 00:20:13 +00002226 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002227 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002228 It would be nice to pass an offset from the base address
2229 of that region. This would avoid having to special case RAM,
2230 and avoid full address decoding in every device.
2231 We can't use the high bits of pd for this because
2232 IO_MEM_ROMD uses these as a ram address. */
Avi Kivityaa102232012-03-08 17:06:55 +02002233 iotlb = section - phys_sections;
Avi Kivityf3705d52012-03-08 16:16:34 +02002234 iotlb += section_addr(section, paddr);
pbrook0f459d12008-06-09 00:20:13 +00002235 }
pbrook6658ffb2007-03-16 23:58:11 +00002236
pbrook0f459d12008-06-09 00:20:13 +00002237 code_address = address;
2238 /* Make accesses to pages with watchpoints go via the
2239 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002240 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002241 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002242 /* Avoid trapping reads of pages with a write breakpoint. */
2243 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivityaa102232012-03-08 17:06:55 +02002244 iotlb = phys_section_watch + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002245 address |= TLB_MMIO;
2246 break;
2247 }
pbrook6658ffb2007-03-16 23:58:11 +00002248 }
pbrook0f459d12008-06-09 00:20:13 +00002249 }
balrogd79acba2007-06-26 20:01:13 +00002250
pbrook0f459d12008-06-09 00:20:13 +00002251 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2252 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2253 te = &env->tlb_table[mmu_idx][index];
2254 te->addend = addend - vaddr;
2255 if (prot & PAGE_READ) {
2256 te->addr_read = address;
2257 } else {
2258 te->addr_read = -1;
2259 }
edgar_igl5c751e92008-05-06 08:44:21 +00002260
pbrook0f459d12008-06-09 00:20:13 +00002261 if (prot & PAGE_EXEC) {
2262 te->addr_code = code_address;
2263 } else {
2264 te->addr_code = -1;
2265 }
2266 if (prot & PAGE_WRITE) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002267 if ((memory_region_is_ram(section->mr) && section->readonly)
2268 || is_romd(section)) {
pbrook0f459d12008-06-09 00:20:13 +00002269 /* Write access calls the I/O callback. */
2270 te->addr_write = address | TLB_MMIO;
Avi Kivityf3705d52012-03-08 16:16:34 +02002271 } else if (memory_region_is_ram(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002272 && !cpu_physical_memory_is_dirty(
Avi Kivityf3705d52012-03-08 16:16:34 +02002273 section->mr->ram_addr
2274 + section_addr(section, paddr))) {
pbrook0f459d12008-06-09 00:20:13 +00002275 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002276 } else {
pbrook0f459d12008-06-09 00:20:13 +00002277 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002278 }
pbrook0f459d12008-06-09 00:20:13 +00002279 } else {
2280 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002281 }
bellard9fa3e852004-01-04 18:06:42 +00002282}
2283
bellard01243112004-01-04 15:48:17 +00002284#else
2285
Andreas Färber9349b4f2012-03-14 01:38:32 +01002286void tlb_flush(CPUArchState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002287{
2288}
2289
Andreas Färber9349b4f2012-03-14 01:38:32 +01002290void tlb_flush_page(CPUArchState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002291{
2292}
2293
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002294/*
2295 * Walks guest process memory "regions" one by one
2296 * and calls callback function 'fn' for each region.
2297 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002298
2299struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002300{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002301 walk_memory_regions_fn fn;
2302 void *priv;
2303 unsigned long start;
2304 int prot;
2305};
bellard9fa3e852004-01-04 18:06:42 +00002306
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002307static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002308 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002309{
2310 if (data->start != -1ul) {
2311 int rc = data->fn(data->priv, data->start, end, data->prot);
2312 if (rc != 0) {
2313 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002314 }
bellard33417e72003-08-10 21:47:01 +00002315 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002316
2317 data->start = (new_prot ? end : -1ul);
2318 data->prot = new_prot;
2319
2320 return 0;
2321}
2322
2323static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002324 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002325{
Paul Brookb480d9b2010-03-12 23:23:29 +00002326 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002327 int i, rc;
2328
2329 if (*lp == NULL) {
2330 return walk_memory_regions_end(data, base, 0);
2331 }
2332
2333 if (level == 0) {
2334 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002335 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002336 int prot = pd[i].flags;
2337
2338 pa = base | (i << TARGET_PAGE_BITS);
2339 if (prot != data->prot) {
2340 rc = walk_memory_regions_end(data, pa, prot);
2341 if (rc != 0) {
2342 return rc;
2343 }
2344 }
2345 }
2346 } else {
2347 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002348 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002349 pa = base | ((abi_ulong)i <<
2350 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002351 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2352 if (rc != 0) {
2353 return rc;
2354 }
2355 }
2356 }
2357
2358 return 0;
2359}
2360
2361int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2362{
2363 struct walk_memory_regions_data data;
2364 unsigned long i;
2365
2366 data.fn = fn;
2367 data.priv = priv;
2368 data.start = -1ul;
2369 data.prot = 0;
2370
2371 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002372 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002373 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2374 if (rc != 0) {
2375 return rc;
2376 }
2377 }
2378
2379 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002380}
2381
Paul Brookb480d9b2010-03-12 23:23:29 +00002382static int dump_region(void *priv, abi_ulong start,
2383 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002384{
2385 FILE *f = (FILE *)priv;
2386
Paul Brookb480d9b2010-03-12 23:23:29 +00002387 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2388 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002389 start, end, end - start,
2390 ((prot & PAGE_READ) ? 'r' : '-'),
2391 ((prot & PAGE_WRITE) ? 'w' : '-'),
2392 ((prot & PAGE_EXEC) ? 'x' : '-'));
2393
2394 return (0);
2395}
2396
2397/* dump memory mappings */
2398void page_dump(FILE *f)
2399{
2400 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2401 "start", "end", "size", "prot");
2402 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002403}
2404
pbrook53a59602006-03-25 19:31:22 +00002405int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002406{
bellard9fa3e852004-01-04 18:06:42 +00002407 PageDesc *p;
2408
2409 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002410 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002411 return 0;
2412 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002413}
2414
Richard Henderson376a7902010-03-10 15:57:04 -08002415/* Modify the flags of a page and invalidate the code if necessary.
2416 The flag PAGE_WRITE_ORG is positioned automatically depending
2417 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002418void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002419{
Richard Henderson376a7902010-03-10 15:57:04 -08002420 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002421
Richard Henderson376a7902010-03-10 15:57:04 -08002422 /* This function should never be called with addresses outside the
2423 guest address space. If this assert fires, it probably indicates
2424 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002425#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2426 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002427#endif
2428 assert(start < end);
2429
bellard9fa3e852004-01-04 18:06:42 +00002430 start = start & TARGET_PAGE_MASK;
2431 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002432
2433 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002434 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002435 }
2436
2437 for (addr = start, len = end - start;
2438 len != 0;
2439 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2440 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2441
2442 /* If the write protection bit is set, then we invalidate
2443 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002444 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002445 (flags & PAGE_WRITE) &&
2446 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002447 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002448 }
2449 p->flags = flags;
2450 }
bellard9fa3e852004-01-04 18:06:42 +00002451}
2452
ths3d97b402007-11-02 19:02:07 +00002453int page_check_range(target_ulong start, target_ulong len, int flags)
2454{
2455 PageDesc *p;
2456 target_ulong end;
2457 target_ulong addr;
2458
Richard Henderson376a7902010-03-10 15:57:04 -08002459 /* This function should never be called with addresses outside the
2460 guest address space. If this assert fires, it probably indicates
2461 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002462#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2463 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002464#endif
2465
Richard Henderson3e0650a2010-03-29 10:54:42 -07002466 if (len == 0) {
2467 return 0;
2468 }
Richard Henderson376a7902010-03-10 15:57:04 -08002469 if (start + len - 1 < start) {
2470 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002471 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002472 }
balrog55f280c2008-10-28 10:24:11 +00002473
ths3d97b402007-11-02 19:02:07 +00002474 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2475 start = start & TARGET_PAGE_MASK;
2476
Richard Henderson376a7902010-03-10 15:57:04 -08002477 for (addr = start, len = end - start;
2478 len != 0;
2479 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002480 p = page_find(addr >> TARGET_PAGE_BITS);
2481 if( !p )
2482 return -1;
2483 if( !(p->flags & PAGE_VALID) )
2484 return -1;
2485
bellarddae32702007-11-14 10:51:00 +00002486 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002487 return -1;
bellarddae32702007-11-14 10:51:00 +00002488 if (flags & PAGE_WRITE) {
2489 if (!(p->flags & PAGE_WRITE_ORG))
2490 return -1;
2491 /* unprotect the page if it was put read-only because it
2492 contains translated code */
2493 if (!(p->flags & PAGE_WRITE)) {
2494 if (!page_unprotect(addr, 0, NULL))
2495 return -1;
2496 }
2497 return 0;
2498 }
ths3d97b402007-11-02 19:02:07 +00002499 }
2500 return 0;
2501}
2502
bellard9fa3e852004-01-04 18:06:42 +00002503/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002504 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002505int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002506{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002507 unsigned int prot;
2508 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002509 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002510
pbrookc8a706f2008-06-02 16:16:42 +00002511 /* Technically this isn't safe inside a signal handler. However we
2512 know this only ever happens in a synchronous SEGV handler, so in
2513 practice it seems to be ok. */
2514 mmap_lock();
2515
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002516 p = page_find(address >> TARGET_PAGE_BITS);
2517 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002518 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002519 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002520 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002521
bellard9fa3e852004-01-04 18:06:42 +00002522 /* if the page was really writable, then we change its
2523 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002524 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2525 host_start = address & qemu_host_page_mask;
2526 host_end = host_start + qemu_host_page_size;
2527
2528 prot = 0;
2529 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2530 p = page_find(addr >> TARGET_PAGE_BITS);
2531 p->flags |= PAGE_WRITE;
2532 prot |= p->flags;
2533
bellard9fa3e852004-01-04 18:06:42 +00002534 /* and since the content will be modified, we must invalidate
2535 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002536 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002537#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002538 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002539#endif
bellard9fa3e852004-01-04 18:06:42 +00002540 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002541 mprotect((void *)g2h(host_start), qemu_host_page_size,
2542 prot & PAGE_BITS);
2543
2544 mmap_unlock();
2545 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002546 }
pbrookc8a706f2008-06-02 16:16:42 +00002547 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002548 return 0;
2549}
2550
Andreas Färber9349b4f2012-03-14 01:38:32 +01002551static inline void tlb_set_dirty(CPUArchState *env,
bellard6a00d602005-11-21 23:25:50 +00002552 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002553{
2554}
bellard9fa3e852004-01-04 18:06:42 +00002555#endif /* defined(CONFIG_USER_ONLY) */
2556
pbrooke2eef172008-06-08 01:09:01 +00002557#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002558
Paul Brookc04b2b72010-03-01 03:31:14 +00002559#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2560typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002561 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002562 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002563 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002564} subpage_t;
2565
Anthony Liguoric227f092009-10-01 16:12:16 -05002566static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002567 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002568static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002569static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002570{
Avi Kivity5312bd82012-02-12 18:32:55 +02002571 MemoryRegionSection *section = &phys_sections[section_index];
2572 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002573
2574 if (mr->subpage) {
2575 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2576 memory_region_destroy(&subpage->iomem);
2577 g_free(subpage);
2578 }
2579}
2580
Avi Kivity4346ae32012-02-10 17:00:01 +02002581static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002582{
2583 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002584 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002585
Avi Kivityc19e8802012-02-13 20:25:31 +02002586 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002587 return;
2588 }
2589
Avi Kivityc19e8802012-02-13 20:25:31 +02002590 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002591 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002592 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002593 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002594 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002595 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002596 }
Avi Kivity54688b12012-02-09 17:34:32 +02002597 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002598 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002599 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002600}
2601
2602static void destroy_all_mappings(void)
2603{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002604 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002605 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002606}
2607
Avi Kivity5312bd82012-02-12 18:32:55 +02002608static uint16_t phys_section_add(MemoryRegionSection *section)
2609{
2610 if (phys_sections_nb == phys_sections_nb_alloc) {
2611 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2612 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2613 phys_sections_nb_alloc);
2614 }
2615 phys_sections[phys_sections_nb] = *section;
2616 return phys_sections_nb++;
2617}
2618
2619static void phys_sections_clear(void)
2620{
2621 phys_sections_nb = 0;
2622}
2623
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002624/* register physical memory.
2625 For RAM, 'size' must be a multiple of the target page size.
2626 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002627 io memory page. The address used when calling the IO function is
2628 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002629 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002630 before calculating this offset. This should not be a problem unless
2631 the low bits of start_addr and region_offset differ. */
Avi Kivity0f0cb162012-02-13 17:14:32 +02002632static void register_subpage(MemoryRegionSection *section)
2633{
2634 subpage_t *subpage;
2635 target_phys_addr_t base = section->offset_within_address_space
2636 & TARGET_PAGE_MASK;
Avi Kivityf3705d52012-03-08 16:16:34 +02002637 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002638 MemoryRegionSection subsection = {
2639 .offset_within_address_space = base,
2640 .size = TARGET_PAGE_SIZE,
2641 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002642 target_phys_addr_t start, end;
2643
Avi Kivityf3705d52012-03-08 16:16:34 +02002644 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002645
Avi Kivityf3705d52012-03-08 16:16:34 +02002646 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002647 subpage = subpage_init(base);
2648 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002649 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2650 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002651 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002652 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002653 }
2654 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2655 end = start + section->size;
2656 subpage_register(subpage, start, end, phys_section_add(section));
2657}
2658
2659
2660static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002661{
Avi Kivitydd811242012-01-02 12:17:03 +02002662 target_phys_addr_t start_addr = section->offset_within_address_space;
2663 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002664 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002665 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002666
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002667 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002668
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002669 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002670 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2671 section_index);
bellard33417e72003-08-10 21:47:01 +00002672}
2673
Avi Kivity0f0cb162012-02-13 17:14:32 +02002674void cpu_register_physical_memory_log(MemoryRegionSection *section,
2675 bool readonly)
2676{
2677 MemoryRegionSection now = *section, remain = *section;
2678
2679 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2680 || (now.size < TARGET_PAGE_SIZE)) {
2681 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2682 - now.offset_within_address_space,
2683 now.size);
2684 register_subpage(&now);
2685 remain.size -= now.size;
2686 remain.offset_within_address_space += now.size;
2687 remain.offset_within_region += now.size;
2688 }
2689 now = remain;
2690 now.size &= TARGET_PAGE_MASK;
2691 if (now.size) {
2692 register_multipage(&now);
2693 remain.size -= now.size;
2694 remain.offset_within_address_space += now.size;
2695 remain.offset_within_region += now.size;
2696 }
2697 now = remain;
2698 if (now.size) {
2699 register_subpage(&now);
2700 }
2701}
2702
2703
Anthony Liguoric227f092009-10-01 16:12:16 -05002704void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002705{
2706 if (kvm_enabled())
2707 kvm_coalesce_mmio_region(addr, size);
2708}
2709
Anthony Liguoric227f092009-10-01 16:12:16 -05002710void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002711{
2712 if (kvm_enabled())
2713 kvm_uncoalesce_mmio_region(addr, size);
2714}
2715
Sheng Yang62a27442010-01-26 19:21:16 +08002716void qemu_flush_coalesced_mmio_buffer(void)
2717{
2718 if (kvm_enabled())
2719 kvm_flush_coalesced_mmio_buffer();
2720}
2721
Marcelo Tosattic9027602010-03-01 20:25:08 -03002722#if defined(__linux__) && !defined(TARGET_S390X)
2723
2724#include <sys/vfs.h>
2725
2726#define HUGETLBFS_MAGIC 0x958458f6
2727
2728static long gethugepagesize(const char *path)
2729{
2730 struct statfs fs;
2731 int ret;
2732
2733 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002734 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002735 } while (ret != 0 && errno == EINTR);
2736
2737 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002738 perror(path);
2739 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002740 }
2741
2742 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002743 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002744
2745 return fs.f_bsize;
2746}
2747
Alex Williamson04b16652010-07-02 11:13:17 -06002748static void *file_ram_alloc(RAMBlock *block,
2749 ram_addr_t memory,
2750 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002751{
2752 char *filename;
2753 void *area;
2754 int fd;
2755#ifdef MAP_POPULATE
2756 int flags;
2757#endif
2758 unsigned long hpagesize;
2759
2760 hpagesize = gethugepagesize(path);
2761 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002762 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002763 }
2764
2765 if (memory < hpagesize) {
2766 return NULL;
2767 }
2768
2769 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2770 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2771 return NULL;
2772 }
2773
2774 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002775 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002776 }
2777
2778 fd = mkstemp(filename);
2779 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002780 perror("unable to create backing store for hugepages");
2781 free(filename);
2782 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002783 }
2784 unlink(filename);
2785 free(filename);
2786
2787 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2788
2789 /*
2790 * ftruncate is not supported by hugetlbfs in older
2791 * hosts, so don't bother bailing out on errors.
2792 * If anything goes wrong with it under other filesystems,
2793 * mmap will fail.
2794 */
2795 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002796 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002797
2798#ifdef MAP_POPULATE
2799 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2800 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2801 * to sidestep this quirk.
2802 */
2803 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2804 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2805#else
2806 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2807#endif
2808 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002809 perror("file_ram_alloc: can't mmap RAM pages");
2810 close(fd);
2811 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002812 }
Alex Williamson04b16652010-07-02 11:13:17 -06002813 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002814 return area;
2815}
2816#endif
2817
Alex Williamsond17b5282010-06-25 11:08:38 -06002818static ram_addr_t find_ram_offset(ram_addr_t size)
2819{
Alex Williamson04b16652010-07-02 11:13:17 -06002820 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002821 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002822
2823 if (QLIST_EMPTY(&ram_list.blocks))
2824 return 0;
2825
2826 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002827 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002828
2829 end = block->offset + block->length;
2830
2831 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2832 if (next_block->offset >= end) {
2833 next = MIN(next, next_block->offset);
2834 }
2835 }
2836 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002837 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002838 mingap = next - end;
2839 }
2840 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002841
2842 if (offset == RAM_ADDR_MAX) {
2843 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2844 (uint64_t)size);
2845 abort();
2846 }
2847
Alex Williamson04b16652010-07-02 11:13:17 -06002848 return offset;
2849}
2850
2851static ram_addr_t last_ram_offset(void)
2852{
Alex Williamsond17b5282010-06-25 11:08:38 -06002853 RAMBlock *block;
2854 ram_addr_t last = 0;
2855
2856 QLIST_FOREACH(block, &ram_list.blocks, next)
2857 last = MAX(last, block->offset + block->length);
2858
2859 return last;
2860}
2861
Avi Kivityc5705a72011-12-20 15:59:12 +02002862void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002863{
2864 RAMBlock *new_block, *block;
2865
Avi Kivityc5705a72011-12-20 15:59:12 +02002866 new_block = NULL;
2867 QLIST_FOREACH(block, &ram_list.blocks, next) {
2868 if (block->offset == addr) {
2869 new_block = block;
2870 break;
2871 }
2872 }
2873 assert(new_block);
2874 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002875
2876 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2877 char *id = dev->parent_bus->info->get_dev_path(dev);
2878 if (id) {
2879 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002880 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002881 }
2882 }
2883 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2884
2885 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002886 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002887 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2888 new_block->idstr);
2889 abort();
2890 }
2891 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002892}
2893
2894ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2895 MemoryRegion *mr)
2896{
2897 RAMBlock *new_block;
2898
2899 size = TARGET_PAGE_ALIGN(size);
2900 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002901
Avi Kivity7c637362011-12-21 13:09:49 +02002902 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002903 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002904 if (host) {
2905 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002906 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002907 } else {
2908 if (mem_path) {
2909#if defined (__linux__) && !defined(TARGET_S390X)
2910 new_block->host = file_ram_alloc(new_block, size, mem_path);
2911 if (!new_block->host) {
2912 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002913 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002914 }
2915#else
2916 fprintf(stderr, "-mem-path option unsupported\n");
2917 exit(1);
2918#endif
2919 } else {
2920#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002921 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2922 an system defined value, which is at least 256GB. Larger systems
2923 have larger values. We put the guest between the end of data
2924 segment (system break) and this value. We use 32GB as a base to
2925 have enough room for the system break to grow. */
2926 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002927 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002928 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002929 if (new_block->host == MAP_FAILED) {
2930 fprintf(stderr, "Allocating RAM failed\n");
2931 abort();
2932 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002933#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002934 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002935 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002936 } else {
2937 new_block->host = qemu_vmalloc(size);
2938 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002939#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002940 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002941 }
2942 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002943 new_block->length = size;
2944
2945 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2946
Anthony Liguori7267c092011-08-20 22:09:37 -05002947 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002948 last_ram_offset() >> TARGET_PAGE_BITS);
2949 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2950 0xff, size >> TARGET_PAGE_BITS);
2951
2952 if (kvm_enabled())
2953 kvm_setup_guest_memory(new_block->host, size);
2954
2955 return new_block->offset;
2956}
2957
Avi Kivityc5705a72011-12-20 15:59:12 +02002958ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002959{
Avi Kivityc5705a72011-12-20 15:59:12 +02002960 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002961}
bellarde9a1ab12007-02-08 23:08:38 +00002962
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002963void qemu_ram_free_from_ptr(ram_addr_t addr)
2964{
2965 RAMBlock *block;
2966
2967 QLIST_FOREACH(block, &ram_list.blocks, next) {
2968 if (addr == block->offset) {
2969 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002970 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002971 return;
2972 }
2973 }
2974}
2975
Anthony Liguoric227f092009-10-01 16:12:16 -05002976void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002977{
Alex Williamson04b16652010-07-02 11:13:17 -06002978 RAMBlock *block;
2979
2980 QLIST_FOREACH(block, &ram_list.blocks, next) {
2981 if (addr == block->offset) {
2982 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002983 if (block->flags & RAM_PREALLOC_MASK) {
2984 ;
2985 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002986#if defined (__linux__) && !defined(TARGET_S390X)
2987 if (block->fd) {
2988 munmap(block->host, block->length);
2989 close(block->fd);
2990 } else {
2991 qemu_vfree(block->host);
2992 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002993#else
2994 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002995#endif
2996 } else {
2997#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2998 munmap(block->host, block->length);
2999#else
Jan Kiszka868bb332011-06-21 22:59:09 +02003000 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003001 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01003002 } else {
3003 qemu_vfree(block->host);
3004 }
Alex Williamson04b16652010-07-02 11:13:17 -06003005#endif
3006 }
Anthony Liguori7267c092011-08-20 22:09:37 -05003007 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06003008 return;
3009 }
3010 }
3011
bellarde9a1ab12007-02-08 23:08:38 +00003012}
3013
Huang Yingcd19cfa2011-03-02 08:56:19 +01003014#ifndef _WIN32
3015void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3016{
3017 RAMBlock *block;
3018 ram_addr_t offset;
3019 int flags;
3020 void *area, *vaddr;
3021
3022 QLIST_FOREACH(block, &ram_list.blocks, next) {
3023 offset = addr - block->offset;
3024 if (offset < block->length) {
3025 vaddr = block->host + offset;
3026 if (block->flags & RAM_PREALLOC_MASK) {
3027 ;
3028 } else {
3029 flags = MAP_FIXED;
3030 munmap(vaddr, length);
3031 if (mem_path) {
3032#if defined(__linux__) && !defined(TARGET_S390X)
3033 if (block->fd) {
3034#ifdef MAP_POPULATE
3035 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3036 MAP_PRIVATE;
3037#else
3038 flags |= MAP_PRIVATE;
3039#endif
3040 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3041 flags, block->fd, offset);
3042 } else {
3043 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3044 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3045 flags, -1, 0);
3046 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003047#else
3048 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003049#endif
3050 } else {
3051#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3052 flags |= MAP_SHARED | MAP_ANONYMOUS;
3053 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3054 flags, -1, 0);
3055#else
3056 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3057 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3058 flags, -1, 0);
3059#endif
3060 }
3061 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003062 fprintf(stderr, "Could not remap addr: "
3063 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003064 length, addr);
3065 exit(1);
3066 }
3067 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3068 }
3069 return;
3070 }
3071 }
3072}
3073#endif /* !_WIN32 */
3074
pbrookdc828ca2009-04-09 22:21:07 +00003075/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003076 With the exception of the softmmu code in this file, this should
3077 only be used for local memory (e.g. video ram) that the device owns,
3078 and knows it isn't going to access beyond the end of the block.
3079
3080 It should not be used for general purpose DMA.
3081 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3082 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003083void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003084{
pbrook94a6b542009-04-11 17:15:54 +00003085 RAMBlock *block;
3086
Alex Williamsonf471a172010-06-11 11:11:42 -06003087 QLIST_FOREACH(block, &ram_list.blocks, next) {
3088 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003089 /* Move this entry to to start of the list. */
3090 if (block != QLIST_FIRST(&ram_list.blocks)) {
3091 QLIST_REMOVE(block, next);
3092 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3093 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003094 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003095 /* We need to check if the requested address is in the RAM
3096 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003097 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003098 */
3099 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003100 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003101 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003102 block->host =
3103 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003104 }
3105 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003106 return block->host + (addr - block->offset);
3107 }
pbrook94a6b542009-04-11 17:15:54 +00003108 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003109
3110 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3111 abort();
3112
3113 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003114}
3115
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003116/* Return a host pointer to ram allocated with qemu_ram_alloc.
3117 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3118 */
3119void *qemu_safe_ram_ptr(ram_addr_t addr)
3120{
3121 RAMBlock *block;
3122
3123 QLIST_FOREACH(block, &ram_list.blocks, next) {
3124 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003125 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003126 /* We need to check if the requested address is in the RAM
3127 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003128 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003129 */
3130 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003131 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003132 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003133 block->host =
3134 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003135 }
3136 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003137 return block->host + (addr - block->offset);
3138 }
3139 }
3140
3141 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3142 abort();
3143
3144 return NULL;
3145}
3146
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003147/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3148 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003149void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003150{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003151 if (*size == 0) {
3152 return NULL;
3153 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003154 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003155 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003156 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003157 RAMBlock *block;
3158
3159 QLIST_FOREACH(block, &ram_list.blocks, next) {
3160 if (addr - block->offset < block->length) {
3161 if (addr - block->offset + *size > block->length)
3162 *size = block->length - addr + block->offset;
3163 return block->host + (addr - block->offset);
3164 }
3165 }
3166
3167 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3168 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003169 }
3170}
3171
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003172void qemu_put_ram_ptr(void *addr)
3173{
3174 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003175}
3176
Marcelo Tosattie8902612010-10-11 15:31:19 -03003177int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003178{
pbrook94a6b542009-04-11 17:15:54 +00003179 RAMBlock *block;
3180 uint8_t *host = ptr;
3181
Jan Kiszka868bb332011-06-21 22:59:09 +02003182 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003183 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003184 return 0;
3185 }
3186
Alex Williamsonf471a172010-06-11 11:11:42 -06003187 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003188 /* This case append when the block is not mapped. */
3189 if (block->host == NULL) {
3190 continue;
3191 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003192 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003193 *ram_addr = block->offset + (host - block->host);
3194 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003195 }
pbrook94a6b542009-04-11 17:15:54 +00003196 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003197
Marcelo Tosattie8902612010-10-11 15:31:19 -03003198 return -1;
3199}
Alex Williamsonf471a172010-06-11 11:11:42 -06003200
Marcelo Tosattie8902612010-10-11 15:31:19 -03003201/* Some of the softmmu routines need to translate from a host pointer
3202 (typically a TLB entry) back to a ram offset. */
3203ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3204{
3205 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003206
Marcelo Tosattie8902612010-10-11 15:31:19 -03003207 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3208 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3209 abort();
3210 }
3211 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003212}
3213
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003214static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3215 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003216{
pbrook67d3b952006-12-18 05:03:52 +00003217#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003218 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003219#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003220#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003221 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003222#endif
3223 return 0;
3224}
3225
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003226static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3227 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003228{
3229#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003230 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003231#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003232#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003233 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003234#endif
3235}
3236
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003237static const MemoryRegionOps unassigned_mem_ops = {
3238 .read = unassigned_mem_read,
3239 .write = unassigned_mem_write,
3240 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003241};
3242
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003243static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3244 unsigned size)
3245{
3246 abort();
3247}
3248
3249static void error_mem_write(void *opaque, target_phys_addr_t addr,
3250 uint64_t value, unsigned size)
3251{
3252 abort();
3253}
3254
3255static const MemoryRegionOps error_mem_ops = {
3256 .read = error_mem_read,
3257 .write = error_mem_write,
3258 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003259};
3260
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003261static const MemoryRegionOps rom_mem_ops = {
3262 .read = error_mem_read,
3263 .write = unassigned_mem_write,
3264 .endianness = DEVICE_NATIVE_ENDIAN,
3265};
3266
3267static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3268 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003269{
bellard3a7d9292005-08-21 09:26:42 +00003270 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003271 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003272 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3273#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003274 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003275 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003276#endif
3277 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003278 switch (size) {
3279 case 1:
3280 stb_p(qemu_get_ram_ptr(ram_addr), val);
3281 break;
3282 case 2:
3283 stw_p(qemu_get_ram_ptr(ram_addr), val);
3284 break;
3285 case 4:
3286 stl_p(qemu_get_ram_ptr(ram_addr), val);
3287 break;
3288 default:
3289 abort();
3290 }
bellardf23db162005-08-21 19:12:28 +00003291 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003292 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003293 /* we remove the notdirty callback only if the code has been
3294 flushed */
3295 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003296 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003297}
3298
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003299static const MemoryRegionOps notdirty_mem_ops = {
3300 .read = error_mem_read,
3301 .write = notdirty_mem_write,
3302 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003303};
3304
pbrook0f459d12008-06-09 00:20:13 +00003305/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003306static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003307{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003308 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003309 target_ulong pc, cs_base;
3310 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003311 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003312 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003313 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003314
aliguori06d55cc2008-11-18 20:24:06 +00003315 if (env->watchpoint_hit) {
3316 /* We re-entered the check after replacing the TB. Now raise
3317 * the debug interrupt so that is will trigger after the
3318 * current instruction. */
3319 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3320 return;
3321 }
pbrook2e70f6e2008-06-29 01:03:05 +00003322 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003323 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003324 if ((vaddr == (wp->vaddr & len_mask) ||
3325 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003326 wp->flags |= BP_WATCHPOINT_HIT;
3327 if (!env->watchpoint_hit) {
3328 env->watchpoint_hit = wp;
3329 tb = tb_find_pc(env->mem_io_pc);
3330 if (!tb) {
3331 cpu_abort(env, "check_watchpoint: could not find TB for "
3332 "pc=%p", (void *)env->mem_io_pc);
3333 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003334 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003335 tb_phys_invalidate(tb, -1);
3336 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3337 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04003338 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00003339 } else {
3340 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3341 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04003342 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00003343 }
aliguori06d55cc2008-11-18 20:24:06 +00003344 }
aliguori6e140f22008-11-18 20:37:55 +00003345 } else {
3346 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003347 }
3348 }
3349}
3350
pbrook6658ffb2007-03-16 23:58:11 +00003351/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3352 so these check for a hit then pass through to the normal out-of-line
3353 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003354static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3355 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003356{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003357 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3358 switch (size) {
3359 case 1: return ldub_phys(addr);
3360 case 2: return lduw_phys(addr);
3361 case 4: return ldl_phys(addr);
3362 default: abort();
3363 }
pbrook6658ffb2007-03-16 23:58:11 +00003364}
3365
Avi Kivity1ec9b902012-01-02 12:47:48 +02003366static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3367 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003368{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003369 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3370 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003371 case 1:
3372 stb_phys(addr, val);
3373 break;
3374 case 2:
3375 stw_phys(addr, val);
3376 break;
3377 case 4:
3378 stl_phys(addr, val);
3379 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003380 default: abort();
3381 }
pbrook6658ffb2007-03-16 23:58:11 +00003382}
3383
Avi Kivity1ec9b902012-01-02 12:47:48 +02003384static const MemoryRegionOps watch_mem_ops = {
3385 .read = watch_mem_read,
3386 .write = watch_mem_write,
3387 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003388};
pbrook6658ffb2007-03-16 23:58:11 +00003389
Avi Kivity70c68e42012-01-02 12:32:48 +02003390static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3391 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003392{
Avi Kivity70c68e42012-01-02 12:32:48 +02003393 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003394 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003395 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003396#if defined(DEBUG_SUBPAGE)
3397 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3398 mmio, len, addr, idx);
3399#endif
blueswir1db7b5422007-05-26 17:36:03 +00003400
Avi Kivity5312bd82012-02-12 18:32:55 +02003401 section = &phys_sections[mmio->sub_section[idx]];
3402 addr += mmio->base;
3403 addr -= section->offset_within_address_space;
3404 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003405 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003406}
3407
Avi Kivity70c68e42012-01-02 12:32:48 +02003408static void subpage_write(void *opaque, target_phys_addr_t addr,
3409 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003410{
Avi Kivity70c68e42012-01-02 12:32:48 +02003411 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003412 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003413 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003414#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003415 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3416 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003417 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003418#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003419
Avi Kivity5312bd82012-02-12 18:32:55 +02003420 section = &phys_sections[mmio->sub_section[idx]];
3421 addr += mmio->base;
3422 addr -= section->offset_within_address_space;
3423 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003424 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003425}
3426
Avi Kivity70c68e42012-01-02 12:32:48 +02003427static const MemoryRegionOps subpage_ops = {
3428 .read = subpage_read,
3429 .write = subpage_write,
3430 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003431};
3432
Avi Kivityde712f92012-01-02 12:41:07 +02003433static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3434 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003435{
3436 ram_addr_t raddr = addr;
3437 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003438 switch (size) {
3439 case 1: return ldub_p(ptr);
3440 case 2: return lduw_p(ptr);
3441 case 4: return ldl_p(ptr);
3442 default: abort();
3443 }
Andreas Färber56384e82011-11-30 16:26:21 +01003444}
3445
Avi Kivityde712f92012-01-02 12:41:07 +02003446static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3447 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003448{
3449 ram_addr_t raddr = addr;
3450 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003451 switch (size) {
3452 case 1: return stb_p(ptr, value);
3453 case 2: return stw_p(ptr, value);
3454 case 4: return stl_p(ptr, value);
3455 default: abort();
3456 }
Andreas Färber56384e82011-11-30 16:26:21 +01003457}
3458
Avi Kivityde712f92012-01-02 12:41:07 +02003459static const MemoryRegionOps subpage_ram_ops = {
3460 .read = subpage_ram_read,
3461 .write = subpage_ram_write,
3462 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003463};
3464
Anthony Liguoric227f092009-10-01 16:12:16 -05003465static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003466 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003467{
3468 int idx, eidx;
3469
3470 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3471 return -1;
3472 idx = SUBPAGE_IDX(start);
3473 eidx = SUBPAGE_IDX(end);
3474#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003475 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003476 mmio, start, end, idx, eidx, memory);
3477#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003478 if (memory_region_is_ram(phys_sections[section].mr)) {
3479 MemoryRegionSection new_section = phys_sections[section];
3480 new_section.mr = &io_mem_subpage_ram;
3481 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003482 }
blueswir1db7b5422007-05-26 17:36:03 +00003483 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003484 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003485 }
3486
3487 return 0;
3488}
3489
Avi Kivity0f0cb162012-02-13 17:14:32 +02003490static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003491{
Anthony Liguoric227f092009-10-01 16:12:16 -05003492 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003493
Anthony Liguori7267c092011-08-20 22:09:37 -05003494 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003495
3496 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003497 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3498 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003499 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003500#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003501 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3502 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003503#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003504 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003505
3506 return mmio;
3507}
3508
Avi Kivity5312bd82012-02-12 18:32:55 +02003509static uint16_t dummy_section(MemoryRegion *mr)
3510{
3511 MemoryRegionSection section = {
3512 .mr = mr,
3513 .offset_within_address_space = 0,
3514 .offset_within_region = 0,
3515 .size = UINT64_MAX,
3516 };
3517
3518 return phys_section_add(&section);
3519}
3520
Avi Kivity37ec01d2012-03-08 18:08:35 +02003521MemoryRegion *iotlb_to_region(target_phys_addr_t index)
Avi Kivityaa102232012-03-08 17:06:55 +02003522{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003523 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003524}
3525
Avi Kivitye9179ce2009-06-14 11:38:52 +03003526static void io_mem_init(void)
3527{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003528 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003529 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3530 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3531 "unassigned", UINT64_MAX);
3532 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3533 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003534 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3535 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003536 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3537 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003538}
3539
Avi Kivity50c1e142012-02-08 21:36:02 +02003540static void core_begin(MemoryListener *listener)
3541{
Avi Kivity54688b12012-02-09 17:34:32 +02003542 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003543 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003544 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003545 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003546 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3547 phys_section_rom = dummy_section(&io_mem_rom);
3548 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003549}
3550
3551static void core_commit(MemoryListener *listener)
3552{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003553 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003554
3555 /* since each CPU stores ram addresses in its TLB cache, we must
3556 reset the modified entries */
3557 /* XXX: slow ! */
3558 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3559 tlb_flush(env, 1);
3560 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003561}
3562
Avi Kivity93632742012-02-08 16:54:16 +02003563static void core_region_add(MemoryListener *listener,
3564 MemoryRegionSection *section)
3565{
Avi Kivity4855d412012-02-08 21:16:05 +02003566 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003567}
3568
3569static void core_region_del(MemoryListener *listener,
3570 MemoryRegionSection *section)
3571{
Avi Kivity93632742012-02-08 16:54:16 +02003572}
3573
Avi Kivity50c1e142012-02-08 21:36:02 +02003574static void core_region_nop(MemoryListener *listener,
3575 MemoryRegionSection *section)
3576{
Avi Kivity54688b12012-02-09 17:34:32 +02003577 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003578}
3579
Avi Kivity93632742012-02-08 16:54:16 +02003580static void core_log_start(MemoryListener *listener,
3581 MemoryRegionSection *section)
3582{
3583}
3584
3585static void core_log_stop(MemoryListener *listener,
3586 MemoryRegionSection *section)
3587{
3588}
3589
3590static void core_log_sync(MemoryListener *listener,
3591 MemoryRegionSection *section)
3592{
3593}
3594
3595static void core_log_global_start(MemoryListener *listener)
3596{
3597 cpu_physical_memory_set_dirty_tracking(1);
3598}
3599
3600static void core_log_global_stop(MemoryListener *listener)
3601{
3602 cpu_physical_memory_set_dirty_tracking(0);
3603}
3604
3605static void core_eventfd_add(MemoryListener *listener,
3606 MemoryRegionSection *section,
3607 bool match_data, uint64_t data, int fd)
3608{
3609}
3610
3611static void core_eventfd_del(MemoryListener *listener,
3612 MemoryRegionSection *section,
3613 bool match_data, uint64_t data, int fd)
3614{
3615}
3616
Avi Kivity50c1e142012-02-08 21:36:02 +02003617static void io_begin(MemoryListener *listener)
3618{
3619}
3620
3621static void io_commit(MemoryListener *listener)
3622{
3623}
3624
Avi Kivity4855d412012-02-08 21:16:05 +02003625static void io_region_add(MemoryListener *listener,
3626 MemoryRegionSection *section)
3627{
Avi Kivitya2d33522012-03-05 17:40:12 +02003628 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3629
3630 mrio->mr = section->mr;
3631 mrio->offset = section->offset_within_region;
3632 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003633 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003634 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003635}
3636
3637static void io_region_del(MemoryListener *listener,
3638 MemoryRegionSection *section)
3639{
3640 isa_unassign_ioport(section->offset_within_address_space, section->size);
3641}
3642
Avi Kivity50c1e142012-02-08 21:36:02 +02003643static void io_region_nop(MemoryListener *listener,
3644 MemoryRegionSection *section)
3645{
3646}
3647
Avi Kivity4855d412012-02-08 21:16:05 +02003648static void io_log_start(MemoryListener *listener,
3649 MemoryRegionSection *section)
3650{
3651}
3652
3653static void io_log_stop(MemoryListener *listener,
3654 MemoryRegionSection *section)
3655{
3656}
3657
3658static void io_log_sync(MemoryListener *listener,
3659 MemoryRegionSection *section)
3660{
3661}
3662
3663static void io_log_global_start(MemoryListener *listener)
3664{
3665}
3666
3667static void io_log_global_stop(MemoryListener *listener)
3668{
3669}
3670
3671static void io_eventfd_add(MemoryListener *listener,
3672 MemoryRegionSection *section,
3673 bool match_data, uint64_t data, int fd)
3674{
3675}
3676
3677static void io_eventfd_del(MemoryListener *listener,
3678 MemoryRegionSection *section,
3679 bool match_data, uint64_t data, int fd)
3680{
3681}
3682
Avi Kivity93632742012-02-08 16:54:16 +02003683static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003684 .begin = core_begin,
3685 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003686 .region_add = core_region_add,
3687 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003688 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003689 .log_start = core_log_start,
3690 .log_stop = core_log_stop,
3691 .log_sync = core_log_sync,
3692 .log_global_start = core_log_global_start,
3693 .log_global_stop = core_log_global_stop,
3694 .eventfd_add = core_eventfd_add,
3695 .eventfd_del = core_eventfd_del,
3696 .priority = 0,
3697};
3698
Avi Kivity4855d412012-02-08 21:16:05 +02003699static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003700 .begin = io_begin,
3701 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003702 .region_add = io_region_add,
3703 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003704 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003705 .log_start = io_log_start,
3706 .log_stop = io_log_stop,
3707 .log_sync = io_log_sync,
3708 .log_global_start = io_log_global_start,
3709 .log_global_stop = io_log_global_stop,
3710 .eventfd_add = io_eventfd_add,
3711 .eventfd_del = io_eventfd_del,
3712 .priority = 0,
3713};
3714
Avi Kivity62152b82011-07-26 14:26:14 +03003715static void memory_map_init(void)
3716{
Anthony Liguori7267c092011-08-20 22:09:37 -05003717 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003718 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003719 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003720
Anthony Liguori7267c092011-08-20 22:09:37 -05003721 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003722 memory_region_init(system_io, "io", 65536);
3723 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003724
Avi Kivity4855d412012-02-08 21:16:05 +02003725 memory_listener_register(&core_memory_listener, system_memory);
3726 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003727}
3728
3729MemoryRegion *get_system_memory(void)
3730{
3731 return system_memory;
3732}
3733
Avi Kivity309cb472011-08-08 16:09:03 +03003734MemoryRegion *get_system_io(void)
3735{
3736 return system_io;
3737}
3738
pbrooke2eef172008-06-08 01:09:01 +00003739#endif /* !defined(CONFIG_USER_ONLY) */
3740
bellard13eb76e2004-01-24 15:23:36 +00003741/* physical memory access (slow version, mainly for debug) */
3742#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003743int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003744 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003745{
3746 int l, flags;
3747 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003748 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003749
3750 while (len > 0) {
3751 page = addr & TARGET_PAGE_MASK;
3752 l = (page + TARGET_PAGE_SIZE) - addr;
3753 if (l > len)
3754 l = len;
3755 flags = page_get_flags(page);
3756 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003757 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003758 if (is_write) {
3759 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003760 return -1;
bellard579a97f2007-11-11 14:26:47 +00003761 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003762 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003763 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003764 memcpy(p, buf, l);
3765 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003766 } else {
3767 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003768 return -1;
bellard579a97f2007-11-11 14:26:47 +00003769 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003770 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003771 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003772 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003773 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003774 }
3775 len -= l;
3776 buf += l;
3777 addr += l;
3778 }
Paul Brooka68fe892010-03-01 00:08:59 +00003779 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003780}
bellard8df1cd02005-01-28 22:37:22 +00003781
bellard13eb76e2004-01-24 15:23:36 +00003782#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003783void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003784 int len, int is_write)
3785{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003786 int l;
bellard13eb76e2004-01-24 15:23:36 +00003787 uint8_t *ptr;
3788 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003789 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003790 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003791
bellard13eb76e2004-01-24 15:23:36 +00003792 while (len > 0) {
3793 page = addr & TARGET_PAGE_MASK;
3794 l = (page + TARGET_PAGE_SIZE) - addr;
3795 if (l > len)
3796 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003797 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003798
bellard13eb76e2004-01-24 15:23:36 +00003799 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003800 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003801 target_phys_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003802 addr1 = section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003803 /* XXX: could force cpu_single_env to NULL to avoid
3804 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003805 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003806 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003807 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003808 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003809 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003810 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003811 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003812 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003813 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003814 l = 2;
3815 } else {
bellard1c213d12005-09-03 10:49:04 +00003816 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003817 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003818 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003819 l = 1;
3820 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003821 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003822 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003823 addr1 = memory_region_get_ram_addr(section->mr)
3824 + section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003825 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003826 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003827 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003828 if (!cpu_physical_memory_is_dirty(addr1)) {
3829 /* invalidate code */
3830 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3831 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003832 cpu_physical_memory_set_dirty_flags(
3833 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003834 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003835 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003836 }
3837 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003838 if (!is_ram_rom_romd(section)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003839 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003840 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003841 addr1 = section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003842 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003843 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003844 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003845 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003846 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003847 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003848 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003849 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003850 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003851 l = 2;
3852 } else {
bellard1c213d12005-09-03 10:49:04 +00003853 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003854 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003855 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003856 l = 1;
3857 }
3858 } else {
3859 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003860 ptr = qemu_get_ram_ptr(section->mr->ram_addr
3861 + section_addr(section, addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003862 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003863 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003864 }
3865 }
3866 len -= l;
3867 buf += l;
3868 addr += l;
3869 }
3870}
bellard8df1cd02005-01-28 22:37:22 +00003871
bellardd0ecd2a2006-04-23 17:14:48 +00003872/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003873void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003874 const uint8_t *buf, int len)
3875{
3876 int l;
3877 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003878 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003879 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003880
bellardd0ecd2a2006-04-23 17:14:48 +00003881 while (len > 0) {
3882 page = addr & TARGET_PAGE_MASK;
3883 l = (page + TARGET_PAGE_SIZE) - addr;
3884 if (l > len)
3885 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003886 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003887
Avi Kivityf3705d52012-03-08 16:16:34 +02003888 if (!is_ram_rom_romd(section)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003889 /* do nothing */
3890 } else {
3891 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003892 addr1 = memory_region_get_ram_addr(section->mr)
3893 + section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003894 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003895 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003896 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003897 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003898 }
3899 len -= l;
3900 buf += l;
3901 addr += l;
3902 }
3903}
3904
aliguori6d16c2f2009-01-22 16:59:11 +00003905typedef struct {
3906 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003907 target_phys_addr_t addr;
3908 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003909} BounceBuffer;
3910
3911static BounceBuffer bounce;
3912
aliguoriba223c22009-01-22 16:59:16 +00003913typedef struct MapClient {
3914 void *opaque;
3915 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003916 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003917} MapClient;
3918
Blue Swirl72cf2d42009-09-12 07:36:22 +00003919static QLIST_HEAD(map_client_list, MapClient) map_client_list
3920 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003921
3922void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3923{
Anthony Liguori7267c092011-08-20 22:09:37 -05003924 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003925
3926 client->opaque = opaque;
3927 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003928 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003929 return client;
3930}
3931
3932void cpu_unregister_map_client(void *_client)
3933{
3934 MapClient *client = (MapClient *)_client;
3935
Blue Swirl72cf2d42009-09-12 07:36:22 +00003936 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003937 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003938}
3939
3940static void cpu_notify_map_clients(void)
3941{
3942 MapClient *client;
3943
Blue Swirl72cf2d42009-09-12 07:36:22 +00003944 while (!QLIST_EMPTY(&map_client_list)) {
3945 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003946 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003947 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003948 }
3949}
3950
aliguori6d16c2f2009-01-22 16:59:11 +00003951/* Map a physical memory region into a host virtual address.
3952 * May map a subset of the requested range, given by and returned in *plen.
3953 * May return NULL if resources needed to perform the mapping are exhausted.
3954 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003955 * Use cpu_register_map_client() to know when retrying the map operation is
3956 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003957 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003958void *cpu_physical_memory_map(target_phys_addr_t addr,
3959 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003960 int is_write)
3961{
Anthony Liguoric227f092009-10-01 16:12:16 -05003962 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003963 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003964 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003965 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003966 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003967 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003968 ram_addr_t rlen;
3969 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003970
3971 while (len > 0) {
3972 page = addr & TARGET_PAGE_MASK;
3973 l = (page + TARGET_PAGE_SIZE) - addr;
3974 if (l > len)
3975 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003976 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003977
Avi Kivityf3705d52012-03-08 16:16:34 +02003978 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003979 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003980 break;
3981 }
3982 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3983 bounce.addr = addr;
3984 bounce.len = l;
3985 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003986 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003987 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003988
3989 *plen = l;
3990 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003991 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003992 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003993 raddr = memory_region_get_ram_addr(section->mr)
3994 + section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003995 }
aliguori6d16c2f2009-01-22 16:59:11 +00003996
3997 len -= l;
3998 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003999 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004000 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004001 rlen = todo;
4002 ret = qemu_ram_ptr_length(raddr, &rlen);
4003 *plen = rlen;
4004 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004005}
4006
4007/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4008 * Will also mark the memory as dirty if is_write == 1. access_len gives
4009 * the amount of memory that was actually read or written by the caller.
4010 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004011void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4012 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004013{
4014 if (buffer != bounce.buffer) {
4015 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004016 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004017 while (access_len) {
4018 unsigned l;
4019 l = TARGET_PAGE_SIZE;
4020 if (l > access_len)
4021 l = access_len;
4022 if (!cpu_physical_memory_is_dirty(addr1)) {
4023 /* invalidate code */
4024 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4025 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004026 cpu_physical_memory_set_dirty_flags(
4027 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004028 }
4029 addr1 += l;
4030 access_len -= l;
4031 }
4032 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004033 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004034 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004035 }
aliguori6d16c2f2009-01-22 16:59:11 +00004036 return;
4037 }
4038 if (is_write) {
4039 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4040 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004041 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004042 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004043 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004044}
bellardd0ecd2a2006-04-23 17:14:48 +00004045
bellard8df1cd02005-01-28 22:37:22 +00004046/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004047static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4048 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004049{
bellard8df1cd02005-01-28 22:37:22 +00004050 uint8_t *ptr;
4051 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02004052 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004053
Avi Kivity06ef3522012-02-13 16:11:22 +02004054 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004055
Avi Kivityf3705d52012-03-08 16:16:34 +02004056 if (!is_ram_rom_romd(section)) {
bellard8df1cd02005-01-28 22:37:22 +00004057 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004058 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004059 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004060#if defined(TARGET_WORDS_BIGENDIAN)
4061 if (endian == DEVICE_LITTLE_ENDIAN) {
4062 val = bswap32(val);
4063 }
4064#else
4065 if (endian == DEVICE_BIG_ENDIAN) {
4066 val = bswap32(val);
4067 }
4068#endif
bellard8df1cd02005-01-28 22:37:22 +00004069 } else {
4070 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004071 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004072 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004073 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004074 switch (endian) {
4075 case DEVICE_LITTLE_ENDIAN:
4076 val = ldl_le_p(ptr);
4077 break;
4078 case DEVICE_BIG_ENDIAN:
4079 val = ldl_be_p(ptr);
4080 break;
4081 default:
4082 val = ldl_p(ptr);
4083 break;
4084 }
bellard8df1cd02005-01-28 22:37:22 +00004085 }
4086 return val;
4087}
4088
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004089uint32_t ldl_phys(target_phys_addr_t addr)
4090{
4091 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4092}
4093
4094uint32_t ldl_le_phys(target_phys_addr_t addr)
4095{
4096 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4097}
4098
4099uint32_t ldl_be_phys(target_phys_addr_t addr)
4100{
4101 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4102}
4103
bellard84b7b8e2005-11-28 21:19:04 +00004104/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004105static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4106 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004107{
bellard84b7b8e2005-11-28 21:19:04 +00004108 uint8_t *ptr;
4109 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02004110 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00004111
Avi Kivity06ef3522012-02-13 16:11:22 +02004112 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004113
Avi Kivityf3705d52012-03-08 16:16:34 +02004114 if (!is_ram_rom_romd(section)) {
bellard84b7b8e2005-11-28 21:19:04 +00004115 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004116 addr = section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004117
4118 /* XXX This is broken when device endian != cpu endian.
4119 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004120#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02004121 val = io_mem_read(section->mr, addr, 4) << 32;
4122 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004123#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02004124 val = io_mem_read(section->mr, addr, 4);
4125 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004126#endif
4127 } else {
4128 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004129 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004130 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004131 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004132 switch (endian) {
4133 case DEVICE_LITTLE_ENDIAN:
4134 val = ldq_le_p(ptr);
4135 break;
4136 case DEVICE_BIG_ENDIAN:
4137 val = ldq_be_p(ptr);
4138 break;
4139 default:
4140 val = ldq_p(ptr);
4141 break;
4142 }
bellard84b7b8e2005-11-28 21:19:04 +00004143 }
4144 return val;
4145}
4146
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004147uint64_t ldq_phys(target_phys_addr_t addr)
4148{
4149 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4150}
4151
4152uint64_t ldq_le_phys(target_phys_addr_t addr)
4153{
4154 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4155}
4156
4157uint64_t ldq_be_phys(target_phys_addr_t addr)
4158{
4159 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4160}
4161
bellardaab33092005-10-30 20:48:42 +00004162/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004163uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004164{
4165 uint8_t val;
4166 cpu_physical_memory_read(addr, &val, 1);
4167 return val;
4168}
4169
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004170/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004171static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4172 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004173{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004174 uint8_t *ptr;
4175 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02004176 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004177
Avi Kivity06ef3522012-02-13 16:11:22 +02004178 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004179
Avi Kivityf3705d52012-03-08 16:16:34 +02004180 if (!is_ram_rom_romd(section)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004181 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004182 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004183 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004184#if defined(TARGET_WORDS_BIGENDIAN)
4185 if (endian == DEVICE_LITTLE_ENDIAN) {
4186 val = bswap16(val);
4187 }
4188#else
4189 if (endian == DEVICE_BIG_ENDIAN) {
4190 val = bswap16(val);
4191 }
4192#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004193 } else {
4194 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004195 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004196 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004197 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004198 switch (endian) {
4199 case DEVICE_LITTLE_ENDIAN:
4200 val = lduw_le_p(ptr);
4201 break;
4202 case DEVICE_BIG_ENDIAN:
4203 val = lduw_be_p(ptr);
4204 break;
4205 default:
4206 val = lduw_p(ptr);
4207 break;
4208 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004209 }
4210 return val;
bellardaab33092005-10-30 20:48:42 +00004211}
4212
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004213uint32_t lduw_phys(target_phys_addr_t addr)
4214{
4215 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4216}
4217
4218uint32_t lduw_le_phys(target_phys_addr_t addr)
4219{
4220 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4221}
4222
4223uint32_t lduw_be_phys(target_phys_addr_t addr)
4224{
4225 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4226}
4227
bellard8df1cd02005-01-28 22:37:22 +00004228/* warning: addr must be aligned. The ram page is not masked as dirty
4229 and the code inside is not invalidated. It is useful if the dirty
4230 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004231void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004232{
bellard8df1cd02005-01-28 22:37:22 +00004233 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004234 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004235
Avi Kivity06ef3522012-02-13 16:11:22 +02004236 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004237
Avi Kivityf3705d52012-03-08 16:16:34 +02004238 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004239 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004240 if (memory_region_is_ram(section->mr)) {
4241 section = &phys_sections[phys_section_rom];
4242 }
4243 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004244 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004245 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004246 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004247 + section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00004248 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004249 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004250
4251 if (unlikely(in_migration)) {
4252 if (!cpu_physical_memory_is_dirty(addr1)) {
4253 /* invalidate code */
4254 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4255 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004256 cpu_physical_memory_set_dirty_flags(
4257 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004258 }
4259 }
bellard8df1cd02005-01-28 22:37:22 +00004260 }
4261}
4262
Anthony Liguoric227f092009-10-01 16:12:16 -05004263void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004264{
j_mayerbc98a7e2007-04-04 07:55:12 +00004265 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004266 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00004267
Avi Kivity06ef3522012-02-13 16:11:22 +02004268 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004269
Avi Kivityf3705d52012-03-08 16:16:34 +02004270 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004271 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004272 if (memory_region_is_ram(section->mr)) {
4273 section = &phys_sections[phys_section_rom];
4274 }
j_mayerbc98a7e2007-04-04 07:55:12 +00004275#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02004276 io_mem_write(section->mr, addr, val >> 32, 4);
4277 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004278#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02004279 io_mem_write(section->mr, addr, (uint32_t)val, 4);
4280 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004281#endif
4282 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004283 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004284 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004285 + section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00004286 stq_p(ptr, val);
4287 }
4288}
4289
bellard8df1cd02005-01-28 22:37:22 +00004290/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004291static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4292 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004293{
bellard8df1cd02005-01-28 22:37:22 +00004294 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004295 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004296
Avi Kivity06ef3522012-02-13 16:11:22 +02004297 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004298
Avi Kivityf3705d52012-03-08 16:16:34 +02004299 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004300 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004301 if (memory_region_is_ram(section->mr)) {
4302 section = &phys_sections[phys_section_rom];
4303 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004304#if defined(TARGET_WORDS_BIGENDIAN)
4305 if (endian == DEVICE_LITTLE_ENDIAN) {
4306 val = bswap32(val);
4307 }
4308#else
4309 if (endian == DEVICE_BIG_ENDIAN) {
4310 val = bswap32(val);
4311 }
4312#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004313 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004314 } else {
4315 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004316 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4317 + section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00004318 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004319 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004320 switch (endian) {
4321 case DEVICE_LITTLE_ENDIAN:
4322 stl_le_p(ptr, val);
4323 break;
4324 case DEVICE_BIG_ENDIAN:
4325 stl_be_p(ptr, val);
4326 break;
4327 default:
4328 stl_p(ptr, val);
4329 break;
4330 }
bellard3a7d9292005-08-21 09:26:42 +00004331 if (!cpu_physical_memory_is_dirty(addr1)) {
4332 /* invalidate code */
4333 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4334 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004335 cpu_physical_memory_set_dirty_flags(addr1,
4336 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004337 }
bellard8df1cd02005-01-28 22:37:22 +00004338 }
4339}
4340
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004341void stl_phys(target_phys_addr_t addr, uint32_t val)
4342{
4343 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4344}
4345
4346void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4347{
4348 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4349}
4350
4351void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4352{
4353 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4354}
4355
bellardaab33092005-10-30 20:48:42 +00004356/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004357void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004358{
4359 uint8_t v = val;
4360 cpu_physical_memory_write(addr, &v, 1);
4361}
4362
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004363/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004364static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4365 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004366{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004367 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004368 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004369
Avi Kivity06ef3522012-02-13 16:11:22 +02004370 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004371
Avi Kivityf3705d52012-03-08 16:16:34 +02004372 if (!memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004373 addr = section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004374 if (memory_region_is_ram(section->mr)) {
4375 section = &phys_sections[phys_section_rom];
4376 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004377#if defined(TARGET_WORDS_BIGENDIAN)
4378 if (endian == DEVICE_LITTLE_ENDIAN) {
4379 val = bswap16(val);
4380 }
4381#else
4382 if (endian == DEVICE_BIG_ENDIAN) {
4383 val = bswap16(val);
4384 }
4385#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004386 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004387 } else {
4388 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004389 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4390 + section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004391 /* RAM case */
4392 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004393 switch (endian) {
4394 case DEVICE_LITTLE_ENDIAN:
4395 stw_le_p(ptr, val);
4396 break;
4397 case DEVICE_BIG_ENDIAN:
4398 stw_be_p(ptr, val);
4399 break;
4400 default:
4401 stw_p(ptr, val);
4402 break;
4403 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004404 if (!cpu_physical_memory_is_dirty(addr1)) {
4405 /* invalidate code */
4406 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4407 /* set dirty bit */
4408 cpu_physical_memory_set_dirty_flags(addr1,
4409 (0xff & ~CODE_DIRTY_FLAG));
4410 }
4411 }
bellardaab33092005-10-30 20:48:42 +00004412}
4413
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004414void stw_phys(target_phys_addr_t addr, uint32_t val)
4415{
4416 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4417}
4418
4419void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4420{
4421 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4422}
4423
4424void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4425{
4426 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4427}
4428
bellardaab33092005-10-30 20:48:42 +00004429/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004430void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004431{
4432 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004433 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004434}
4435
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004436void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4437{
4438 val = cpu_to_le64(val);
4439 cpu_physical_memory_write(addr, &val, 8);
4440}
4441
4442void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4443{
4444 val = cpu_to_be64(val);
4445 cpu_physical_memory_write(addr, &val, 8);
4446}
4447
aliguori5e2972f2009-03-28 17:51:36 +00004448/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004449int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004450 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004451{
4452 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004453 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004454 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004455
4456 while (len > 0) {
4457 page = addr & TARGET_PAGE_MASK;
4458 phys_addr = cpu_get_phys_page_debug(env, page);
4459 /* if no physical page mapped, return an error */
4460 if (phys_addr == -1)
4461 return -1;
4462 l = (page + TARGET_PAGE_SIZE) - addr;
4463 if (l > len)
4464 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004465 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004466 if (is_write)
4467 cpu_physical_memory_write_rom(phys_addr, buf, l);
4468 else
aliguori5e2972f2009-03-28 17:51:36 +00004469 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004470 len -= l;
4471 buf += l;
4472 addr += l;
4473 }
4474 return 0;
4475}
Paul Brooka68fe892010-03-01 00:08:59 +00004476#endif
bellard13eb76e2004-01-24 15:23:36 +00004477
pbrook2e70f6e2008-06-29 01:03:05 +00004478/* in deterministic execution mode, instructions doing device I/Os
4479 must be at the end of the TB */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004480void cpu_io_recompile(CPUArchState *env, void *retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004481{
4482 TranslationBlock *tb;
4483 uint32_t n, cflags;
4484 target_ulong pc, cs_base;
4485 uint64_t flags;
4486
Stefan Weil6375e092012-04-06 22:26:15 +02004487 tb = tb_find_pc((uintptr_t)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004488 if (!tb) {
4489 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4490 retaddr);
4491 }
4492 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004493 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004494 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004495 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004496 n = n - env->icount_decr.u16.low;
4497 /* Generate a new TB ending on the I/O insn. */
4498 n++;
4499 /* On MIPS and SH, delay slot instructions can only be restarted if
4500 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004501 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004502 branch. */
4503#if defined(TARGET_MIPS)
4504 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4505 env->active_tc.PC -= 4;
4506 env->icount_decr.u16.low++;
4507 env->hflags &= ~MIPS_HFLAG_BMASK;
4508 }
4509#elif defined(TARGET_SH4)
4510 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4511 && n > 1) {
4512 env->pc -= 2;
4513 env->icount_decr.u16.low++;
4514 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4515 }
4516#endif
4517 /* This should never happen. */
4518 if (n > CF_COUNT_MASK)
4519 cpu_abort(env, "TB too big during recompile");
4520
4521 cflags = n | CF_LAST_IO;
4522 pc = tb->pc;
4523 cs_base = tb->cs_base;
4524 flags = tb->flags;
4525 tb_phys_invalidate(tb, -1);
4526 /* FIXME: In theory this could raise an exception. In practice
4527 we have already translated the block once so it's probably ok. */
4528 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004529 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004530 the first in the TB) then we end up generating a whole new TB and
4531 repeating the fault, which is horribly inefficient.
4532 Better would be to execute just this insn uncached, or generate a
4533 second new TB. */
4534 cpu_resume_from_signal(env, NULL);
4535}
4536
Paul Brookb3755a92010-03-12 16:54:58 +00004537#if !defined(CONFIG_USER_ONLY)
4538
Stefan Weil055403b2010-10-22 23:03:32 +02004539void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004540{
4541 int i, target_code_size, max_target_code_size;
4542 int direct_jmp_count, direct_jmp2_count, cross_page;
4543 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004544
bellarde3db7222005-01-26 22:00:47 +00004545 target_code_size = 0;
4546 max_target_code_size = 0;
4547 cross_page = 0;
4548 direct_jmp_count = 0;
4549 direct_jmp2_count = 0;
4550 for(i = 0; i < nb_tbs; i++) {
4551 tb = &tbs[i];
4552 target_code_size += tb->size;
4553 if (tb->size > max_target_code_size)
4554 max_target_code_size = tb->size;
4555 if (tb->page_addr[1] != -1)
4556 cross_page++;
4557 if (tb->tb_next_offset[0] != 0xffff) {
4558 direct_jmp_count++;
4559 if (tb->tb_next_offset[1] != 0xffff) {
4560 direct_jmp2_count++;
4561 }
4562 }
4563 }
4564 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004565 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004566 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004567 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4568 cpu_fprintf(f, "TB count %d/%d\n",
4569 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004570 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004571 nb_tbs ? target_code_size / nb_tbs : 0,
4572 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004573 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004574 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4575 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004576 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4577 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004578 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4579 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004580 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004581 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4582 direct_jmp2_count,
4583 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004584 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004585 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4586 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4587 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004588 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004589}
4590
Avi Kivityd39e8222012-01-01 23:35:10 +02004591/* NOTE: this function can trigger an exception */
4592/* NOTE2: the returned address is not exactly the physical address: it
4593 is the offset relative to phys_ram_base */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004594tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
Avi Kivityd39e8222012-01-01 23:35:10 +02004595{
4596 int mmu_idx, page_index, pd;
4597 void *p;
Avi Kivity37ec01d2012-03-08 18:08:35 +02004598 MemoryRegion *mr;
Avi Kivityd39e8222012-01-01 23:35:10 +02004599
4600 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4601 mmu_idx = cpu_mmu_index(env1);
4602 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4603 (addr & TARGET_PAGE_MASK))) {
Blue Swirle141ab52011-09-18 14:55:46 +00004604#ifdef CONFIG_TCG_PASS_AREG0
4605 cpu_ldub_code(env1, addr);
4606#else
Avi Kivityd39e8222012-01-01 23:35:10 +02004607 ldub_code(addr);
Blue Swirle141ab52011-09-18 14:55:46 +00004608#endif
Avi Kivityd39e8222012-01-01 23:35:10 +02004609 }
Avi Kivityce5d64c2012-03-08 18:50:18 +02004610 pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
Avi Kivity37ec01d2012-03-08 18:08:35 +02004611 mr = iotlb_to_region(pd);
4612 if (mr != &io_mem_ram && mr != &io_mem_rom
Avi Kivity32b08982012-03-18 18:31:13 +02004613 && mr != &io_mem_notdirty && !mr->rom_device
4614 && mr != &io_mem_watch) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004615#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4616 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4617#else
4618 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4619#endif
4620 }
4621 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4622 return qemu_ram_addr_from_host_nofail(p);
4623}
4624
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004625/*
4626 * A helper function for the _utterly broken_ virtio device model to find out if
4627 * it's running on a big endian machine. Don't do this at home kids!
4628 */
4629bool virtio_is_big_endian(void);
4630bool virtio_is_big_endian(void)
4631{
4632#if defined(TARGET_WORDS_BIGENDIAN)
4633 return true;
4634#else
4635 return false;
4636#endif
4637}
4638
bellard61382a52003-10-27 21:22:23 +00004639#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004640#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004641#define GETPC() NULL
4642#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004643#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004644
4645#define SHIFT 0
4646#include "softmmu_template.h"
4647
4648#define SHIFT 1
4649#include "softmmu_template.h"
4650
4651#define SHIFT 2
4652#include "softmmu_template.h"
4653
4654#define SHIFT 3
4655#include "softmmu_template.h"
4656
4657#undef env
4658
4659#endif