blob: 0c93b2691f29daf66836393ff63a3bfdf3e08e5d [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Avi Kivity3eef53d2012-02-10 14:57:31 +0200163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200189typedef struct PhysPageEntry PhysPageEntry;
190
Avi Kivity5312bd82012-02-12 18:32:55 +0200191static MemoryRegionSection *phys_sections;
192static unsigned phys_sections_nb, phys_sections_nb_alloc;
193static uint16_t phys_section_unassigned;
194
Avi Kivity4346ae32012-02-10 17:00:01 +0200195struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200196 uint16_t is_leaf : 1;
197 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
198 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200199};
200
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200201/* Simple allocator for PhysPageEntry nodes */
202static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
203static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
204
Avi Kivity07f07b32012-02-13 20:45:32 +0200205#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200206
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800207/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200208 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200209static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000210
pbrooke2eef172008-06-08 01:09:01 +0000211static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300212static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000213
bellard33417e72003-08-10 21:47:01 +0000214/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200215MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000216static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200217static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000218#endif
bellard33417e72003-08-10 21:47:01 +0000219
bellard34865132003-10-05 14:28:56 +0000220/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200221#ifdef WIN32
222static const char *logfilename = "qemu.log";
223#else
blueswir1d9b630f2008-10-05 09:57:08 +0000224static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200225#endif
bellard34865132003-10-05 14:28:56 +0000226FILE *logfile;
227int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000228static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000229
bellarde3db7222005-01-26 22:00:47 +0000230/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000231#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000232static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000233#endif
bellarde3db7222005-01-26 22:00:47 +0000234static int tb_flush_count;
235static int tb_phys_invalidate_count;
236
bellard7cb69ca2008-05-10 10:55:51 +0000237#ifdef _WIN32
238static void map_exec(void *addr, long size)
239{
240 DWORD old_protect;
241 VirtualProtect(addr, size,
242 PAGE_EXECUTE_READWRITE, &old_protect);
243
244}
245#else
246static void map_exec(void *addr, long size)
247{
bellard43694152008-05-29 09:35:57 +0000248 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000249
bellard43694152008-05-29 09:35:57 +0000250 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000251 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000252 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000253
254 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000255 end += page_size - 1;
256 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000257
258 mprotect((void *)start, end - start,
259 PROT_READ | PROT_WRITE | PROT_EXEC);
260}
261#endif
262
bellardb346ff42003-06-15 20:05:50 +0000263static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000264{
bellard83fb7ad2004-07-05 21:25:26 +0000265 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000266 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000267#ifdef _WIN32
268 {
269 SYSTEM_INFO system_info;
270
271 GetSystemInfo(&system_info);
272 qemu_real_host_page_size = system_info.dwPageSize;
273 }
274#else
275 qemu_real_host_page_size = getpagesize();
276#endif
bellard83fb7ad2004-07-05 21:25:26 +0000277 if (qemu_host_page_size == 0)
278 qemu_host_page_size = qemu_real_host_page_size;
279 if (qemu_host_page_size < TARGET_PAGE_SIZE)
280 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000281 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000282
Paul Brook2e9a5712010-05-05 16:32:59 +0100283#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000284 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100285#ifdef HAVE_KINFO_GETVMMAP
286 struct kinfo_vmentry *freep;
287 int i, cnt;
288
289 freep = kinfo_getvmmap(getpid(), &cnt);
290 if (freep) {
291 mmap_lock();
292 for (i = 0; i < cnt; i++) {
293 unsigned long startaddr, endaddr;
294
295 startaddr = freep[i].kve_start;
296 endaddr = freep[i].kve_end;
297 if (h2g_valid(startaddr)) {
298 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
299
300 if (h2g_valid(endaddr)) {
301 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100303 } else {
304#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
305 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200306 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100307#endif
308 }
309 }
310 }
311 free(freep);
312 mmap_unlock();
313 }
314#else
balrog50a95692007-12-12 01:16:23 +0000315 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000316
pbrook07765902008-05-31 16:33:53 +0000317 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800318
Aurelien Jarnofd436902010-04-10 17:20:36 +0200319 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000320 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800321 mmap_lock();
322
balrog50a95692007-12-12 01:16:23 +0000323 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800324 unsigned long startaddr, endaddr;
325 int n;
326
327 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
328
329 if (n == 2 && h2g_valid(startaddr)) {
330 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
331
332 if (h2g_valid(endaddr)) {
333 endaddr = h2g(endaddr);
334 } else {
335 endaddr = ~0ul;
336 }
337 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000338 }
339 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800340
balrog50a95692007-12-12 01:16:23 +0000341 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800342 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000343 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100344#endif
balrog50a95692007-12-12 01:16:23 +0000345 }
346#endif
bellard54936002003-05-13 00:25:15 +0000347}
348
Paul Brook41c1b1c2010-03-12 16:54:58 +0000349static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000350{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000351 PageDesc *pd;
352 void **lp;
353 int i;
354
pbrook17e23772008-06-09 13:47:45 +0000355#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500356 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800357# define ALLOC(P, SIZE) \
358 do { \
359 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
360 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800361 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000362#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800363# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500364 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000365#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800366
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800367 /* Level 1. Always allocated. */
368 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
369
370 /* Level 2..N-1. */
371 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
372 void **p = *lp;
373
374 if (p == NULL) {
375 if (!alloc) {
376 return NULL;
377 }
378 ALLOC(p, sizeof(void *) * L2_SIZE);
379 *lp = p;
380 }
381
382 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000383 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800384
385 pd = *lp;
386 if (pd == NULL) {
387 if (!alloc) {
388 return NULL;
389 }
390 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
391 *lp = pd;
392 }
393
394#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800395
396 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000397}
398
Paul Brook41c1b1c2010-03-12 16:54:58 +0000399static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000400{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800401 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000402}
403
Paul Brook6d9a1302010-02-28 23:55:53 +0000404#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200405
Avi Kivityf7bf5462012-02-13 20:12:05 +0200406static void phys_map_node_reserve(unsigned nodes)
407{
408 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
409 typedef PhysPageEntry Node[L2_SIZE];
410 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
411 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
412 phys_map_nodes_nb + nodes);
413 phys_map_nodes = g_renew(Node, phys_map_nodes,
414 phys_map_nodes_nb_alloc);
415 }
416}
417
418static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200419{
420 unsigned i;
421 uint16_t ret;
422
Avi Kivityf7bf5462012-02-13 20:12:05 +0200423 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200424 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200425 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200426 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200427 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200428 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200429 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200430 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200431}
432
433static void phys_map_nodes_reset(void)
434{
435 phys_map_nodes_nb = 0;
436}
437
Avi Kivityf7bf5462012-02-13 20:12:05 +0200438
Avi Kivity29990972012-02-13 20:21:20 +0200439static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
440 target_phys_addr_t *nb, uint16_t leaf,
441 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200442{
443 PhysPageEntry *p;
444 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200445 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200446
Avi Kivity07f07b32012-02-13 20:45:32 +0200447 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200448 lp->ptr = phys_map_node_alloc();
449 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200450 if (level == 0) {
451 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200452 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200453 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200454 }
455 }
456 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200457 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200458 }
Avi Kivity29990972012-02-13 20:21:20 +0200459 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200460
Avi Kivity29990972012-02-13 20:21:20 +0200461 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200462 if ((*index & (step - 1)) == 0 && *nb >= step) {
463 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200464 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200465 *index += step;
466 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200467 } else {
468 phys_page_set_level(lp, index, nb, leaf, level - 1);
469 }
470 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200471 }
472}
473
Avi Kivity29990972012-02-13 20:21:20 +0200474static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
475 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000476{
Avi Kivity29990972012-02-13 20:21:20 +0200477 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200478 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000479
Avi Kivity29990972012-02-13 20:21:20 +0200480 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000481}
482
Avi Kivity06ef3522012-02-13 16:11:22 +0200483static MemoryRegionSection phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000484{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200485 PhysPageEntry lp = phys_map;
486 PhysPageEntry *p;
487 int i;
Avi Kivity06ef3522012-02-13 16:11:22 +0200488 MemoryRegionSection section;
489 target_phys_addr_t delta;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200490 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200491
Avi Kivity07f07b32012-02-13 20:45:32 +0200492 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200493 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200494 goto not_found;
495 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200496 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200497 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200498 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200499
Avi Kivityc19e8802012-02-13 20:25:31 +0200500 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200501not_found:
Avi Kivity06ef3522012-02-13 16:11:22 +0200502 section = phys_sections[s_index];
Avi Kivity5312bd82012-02-12 18:32:55 +0200503 index <<= TARGET_PAGE_BITS;
Avi Kivity06ef3522012-02-13 16:11:22 +0200504 assert(section.offset_within_address_space <= index
505 && index <= section.offset_within_address_space + section.size-1);
506 delta = index - section.offset_within_address_space;
507 section.offset_within_address_space += delta;
508 section.offset_within_region += delta;
509 section.size -= delta;
510 return section;
bellard92e873b2004-05-21 14:52:29 +0000511}
512
Anthony Liguoric227f092009-10-01 16:12:16 -0500513static void tlb_protect_code(ram_addr_t ram_addr);
514static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000515 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000516#define mmap_lock() do { } while(0)
517#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000518#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000519
bellard43694152008-05-29 09:35:57 +0000520#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
521
522#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100523/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000524 user mode. It will change when a dedicated libc will be used */
525#define USE_STATIC_CODE_GEN_BUFFER
526#endif
527
528#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200529static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
530 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000531#endif
532
blueswir18fcd3692008-08-17 20:26:25 +0000533static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000534{
bellard43694152008-05-29 09:35:57 +0000535#ifdef USE_STATIC_CODE_GEN_BUFFER
536 code_gen_buffer = static_code_gen_buffer;
537 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
538 map_exec(code_gen_buffer, code_gen_buffer_size);
539#else
bellard26a5f132008-05-28 12:30:31 +0000540 code_gen_buffer_size = tb_size;
541 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000542#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000543 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
544#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100545 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000546 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000547#endif
bellard26a5f132008-05-28 12:30:31 +0000548 }
549 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
550 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
551 /* The code gen buffer location may have constraints depending on
552 the host cpu and OS */
553#if defined(__linux__)
554 {
555 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000556 void *start = NULL;
557
bellard26a5f132008-05-28 12:30:31 +0000558 flags = MAP_PRIVATE | MAP_ANONYMOUS;
559#if defined(__x86_64__)
560 flags |= MAP_32BIT;
561 /* Cannot map more than that */
562 if (code_gen_buffer_size > (800 * 1024 * 1024))
563 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000564#elif defined(__sparc_v9__)
565 // Map the buffer below 2G, so we can use direct calls and branches
566 flags |= MAP_FIXED;
567 start = (void *) 0x60000000UL;
568 if (code_gen_buffer_size > (512 * 1024 * 1024))
569 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000570#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100571 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000572 if (code_gen_buffer_size > 16 * 1024 * 1024)
573 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700574#elif defined(__s390x__)
575 /* Map the buffer so that we can use direct calls and branches. */
576 /* We have a +- 4GB range on the branches; leave some slop. */
577 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
578 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
579 }
580 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000581#endif
blueswir1141ac462008-07-26 15:05:57 +0000582 code_gen_buffer = mmap(start, code_gen_buffer_size,
583 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000584 flags, -1, 0);
585 if (code_gen_buffer == MAP_FAILED) {
586 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
587 exit(1);
588 }
589 }
Bradcbb608a2010-12-20 21:25:40 -0500590#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000591 || defined(__DragonFly__) || defined(__OpenBSD__) \
592 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000593 {
594 int flags;
595 void *addr = NULL;
596 flags = MAP_PRIVATE | MAP_ANONYMOUS;
597#if defined(__x86_64__)
598 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
599 * 0x40000000 is free */
600 flags |= MAP_FIXED;
601 addr = (void *)0x40000000;
602 /* Cannot map more than that */
603 if (code_gen_buffer_size > (800 * 1024 * 1024))
604 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000605#elif defined(__sparc_v9__)
606 // Map the buffer below 2G, so we can use direct calls and branches
607 flags |= MAP_FIXED;
608 addr = (void *) 0x60000000UL;
609 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
610 code_gen_buffer_size = (512 * 1024 * 1024);
611 }
aliguori06e67a82008-09-27 15:32:41 +0000612#endif
613 code_gen_buffer = mmap(addr, code_gen_buffer_size,
614 PROT_WRITE | PROT_READ | PROT_EXEC,
615 flags, -1, 0);
616 if (code_gen_buffer == MAP_FAILED) {
617 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
618 exit(1);
619 }
620 }
bellard26a5f132008-05-28 12:30:31 +0000621#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500622 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000623 map_exec(code_gen_buffer, code_gen_buffer_size);
624#endif
bellard43694152008-05-29 09:35:57 +0000625#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000626 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100627 code_gen_buffer_max_size = code_gen_buffer_size -
628 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000629 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500630 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000631}
632
633/* Must be called before using the QEMU cpus. 'tb_size' is the size
634 (in bytes) allocated to the translation buffer. Zero means default
635 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200636void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000637{
bellard26a5f132008-05-28 12:30:31 +0000638 cpu_gen_init();
639 code_gen_alloc(tb_size);
640 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000641 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700642#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
643 /* There's no guest base to take into account, so go ahead and
644 initialize the prologue now. */
645 tcg_prologue_init(&tcg_ctx);
646#endif
bellard26a5f132008-05-28 12:30:31 +0000647}
648
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200649bool tcg_enabled(void)
650{
651 return code_gen_buffer != NULL;
652}
653
654void cpu_exec_init_all(void)
655{
656#if !defined(CONFIG_USER_ONLY)
657 memory_map_init();
658 io_mem_init();
659#endif
660}
661
pbrook9656f322008-07-01 20:01:19 +0000662#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
663
Juan Quintelae59fb372009-09-29 22:48:21 +0200664static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200665{
666 CPUState *env = opaque;
667
aurel323098dba2009-03-07 21:28:24 +0000668 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
669 version_id is increased. */
670 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000671 tlb_flush(env, 1);
672
673 return 0;
674}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200675
676static const VMStateDescription vmstate_cpu_common = {
677 .name = "cpu_common",
678 .version_id = 1,
679 .minimum_version_id = 1,
680 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200681 .post_load = cpu_common_post_load,
682 .fields = (VMStateField []) {
683 VMSTATE_UINT32(halted, CPUState),
684 VMSTATE_UINT32(interrupt_request, CPUState),
685 VMSTATE_END_OF_LIST()
686 }
687};
pbrook9656f322008-07-01 20:01:19 +0000688#endif
689
Glauber Costa950f1472009-06-09 12:15:18 -0400690CPUState *qemu_get_cpu(int cpu)
691{
692 CPUState *env = first_cpu;
693
694 while (env) {
695 if (env->cpu_index == cpu)
696 break;
697 env = env->next_cpu;
698 }
699
700 return env;
701}
702
bellard6a00d602005-11-21 23:25:50 +0000703void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000704{
bellard6a00d602005-11-21 23:25:50 +0000705 CPUState **penv;
706 int cpu_index;
707
pbrookc2764712009-03-07 15:24:59 +0000708#if defined(CONFIG_USER_ONLY)
709 cpu_list_lock();
710#endif
bellard6a00d602005-11-21 23:25:50 +0000711 env->next_cpu = NULL;
712 penv = &first_cpu;
713 cpu_index = 0;
714 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700715 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000716 cpu_index++;
717 }
718 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000719 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000720 QTAILQ_INIT(&env->breakpoints);
721 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100722#ifndef CONFIG_USER_ONLY
723 env->thread_id = qemu_get_thread_id();
724#endif
bellard6a00d602005-11-21 23:25:50 +0000725 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000726#if defined(CONFIG_USER_ONLY)
727 cpu_list_unlock();
728#endif
pbrookb3c77242008-06-30 16:31:04 +0000729#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600730 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
731 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000732 cpu_save, cpu_load, env);
733#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000734}
735
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100736/* Allocate a new translation block. Flush the translation buffer if
737 too many translation blocks or too much generated code. */
738static TranslationBlock *tb_alloc(target_ulong pc)
739{
740 TranslationBlock *tb;
741
742 if (nb_tbs >= code_gen_max_blocks ||
743 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
744 return NULL;
745 tb = &tbs[nb_tbs++];
746 tb->pc = pc;
747 tb->cflags = 0;
748 return tb;
749}
750
751void tb_free(TranslationBlock *tb)
752{
753 /* In practice this is mostly used for single use temporary TB
754 Ignore the hard cases and just back up if this TB happens to
755 be the last one generated. */
756 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
757 code_gen_ptr = tb->tc_ptr;
758 nb_tbs--;
759 }
760}
761
bellard9fa3e852004-01-04 18:06:42 +0000762static inline void invalidate_page_bitmap(PageDesc *p)
763{
764 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500765 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000766 p->code_bitmap = NULL;
767 }
768 p->code_write_count = 0;
769}
770
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800771/* Set to NULL all the 'first_tb' fields in all PageDescs. */
772
773static void page_flush_tb_1 (int level, void **lp)
774{
775 int i;
776
777 if (*lp == NULL) {
778 return;
779 }
780 if (level == 0) {
781 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000782 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800783 pd[i].first_tb = NULL;
784 invalidate_page_bitmap(pd + i);
785 }
786 } else {
787 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000788 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800789 page_flush_tb_1 (level - 1, pp + i);
790 }
791 }
792}
793
bellardfd6ce8f2003-05-14 19:00:11 +0000794static void page_flush_tb(void)
795{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800796 int i;
797 for (i = 0; i < V_L1_SIZE; i++) {
798 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000799 }
800}
801
802/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000803/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000804void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000805{
bellard6a00d602005-11-21 23:25:50 +0000806 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000807#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000808 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
809 (unsigned long)(code_gen_ptr - code_gen_buffer),
810 nb_tbs, nb_tbs > 0 ?
811 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000812#endif
bellard26a5f132008-05-28 12:30:31 +0000813 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000814 cpu_abort(env1, "Internal error: code buffer overflow\n");
815
bellardfd6ce8f2003-05-14 19:00:11 +0000816 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000817
bellard6a00d602005-11-21 23:25:50 +0000818 for(env = first_cpu; env != NULL; env = env->next_cpu) {
819 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
820 }
bellard9fa3e852004-01-04 18:06:42 +0000821
bellard8a8a6082004-10-03 13:36:49 +0000822 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000823 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000824
bellardfd6ce8f2003-05-14 19:00:11 +0000825 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000826 /* XXX: flush processor icache at this point if cache flush is
827 expensive */
bellarde3db7222005-01-26 22:00:47 +0000828 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000829}
830
831#ifdef DEBUG_TB_CHECK
832
j_mayerbc98a7e2007-04-04 07:55:12 +0000833static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000834{
835 TranslationBlock *tb;
836 int i;
837 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000838 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
839 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000840 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
841 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000842 printf("ERROR invalidate: address=" TARGET_FMT_lx
843 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000844 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000845 }
846 }
847 }
848}
849
850/* verify that all the pages have correct rights for code */
851static void tb_page_check(void)
852{
853 TranslationBlock *tb;
854 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000855
pbrook99773bd2006-04-16 15:14:59 +0000856 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
857 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000858 flags1 = page_get_flags(tb->pc);
859 flags2 = page_get_flags(tb->pc + tb->size - 1);
860 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
861 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000862 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000863 }
864 }
865 }
866}
867
868#endif
869
870/* invalidate one TB */
871static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
872 int next_offset)
873{
874 TranslationBlock *tb1;
875 for(;;) {
876 tb1 = *ptb;
877 if (tb1 == tb) {
878 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
879 break;
880 }
881 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
882 }
883}
884
bellard9fa3e852004-01-04 18:06:42 +0000885static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
886{
887 TranslationBlock *tb1;
888 unsigned int n1;
889
890 for(;;) {
891 tb1 = *ptb;
892 n1 = (long)tb1 & 3;
893 tb1 = (TranslationBlock *)((long)tb1 & ~3);
894 if (tb1 == tb) {
895 *ptb = tb1->page_next[n1];
896 break;
897 }
898 ptb = &tb1->page_next[n1];
899 }
900}
901
bellardd4e81642003-05-25 16:46:15 +0000902static inline void tb_jmp_remove(TranslationBlock *tb, int n)
903{
904 TranslationBlock *tb1, **ptb;
905 unsigned int n1;
906
907 ptb = &tb->jmp_next[n];
908 tb1 = *ptb;
909 if (tb1) {
910 /* find tb(n) in circular list */
911 for(;;) {
912 tb1 = *ptb;
913 n1 = (long)tb1 & 3;
914 tb1 = (TranslationBlock *)((long)tb1 & ~3);
915 if (n1 == n && tb1 == tb)
916 break;
917 if (n1 == 2) {
918 ptb = &tb1->jmp_first;
919 } else {
920 ptb = &tb1->jmp_next[n1];
921 }
922 }
923 /* now we can suppress tb(n) from the list */
924 *ptb = tb->jmp_next[n];
925
926 tb->jmp_next[n] = NULL;
927 }
928}
929
930/* reset the jump entry 'n' of a TB so that it is not chained to
931 another TB */
932static inline void tb_reset_jump(TranslationBlock *tb, int n)
933{
934 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
935}
936
Paul Brook41c1b1c2010-03-12 16:54:58 +0000937void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000938{
bellard6a00d602005-11-21 23:25:50 +0000939 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000940 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000941 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000942 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000943 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000944
bellard9fa3e852004-01-04 18:06:42 +0000945 /* remove the TB from the hash list */
946 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
947 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000948 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000949 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000950
bellard9fa3e852004-01-04 18:06:42 +0000951 /* remove the TB from the page list */
952 if (tb->page_addr[0] != page_addr) {
953 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
954 tb_page_remove(&p->first_tb, tb);
955 invalidate_page_bitmap(p);
956 }
957 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
958 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
959 tb_page_remove(&p->first_tb, tb);
960 invalidate_page_bitmap(p);
961 }
962
bellard8a40a182005-11-20 10:35:40 +0000963 tb_invalidated_flag = 1;
964
965 /* remove the TB from the hash list */
966 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000967 for(env = first_cpu; env != NULL; env = env->next_cpu) {
968 if (env->tb_jmp_cache[h] == tb)
969 env->tb_jmp_cache[h] = NULL;
970 }
bellard8a40a182005-11-20 10:35:40 +0000971
972 /* suppress this TB from the two jump lists */
973 tb_jmp_remove(tb, 0);
974 tb_jmp_remove(tb, 1);
975
976 /* suppress any remaining jumps to this TB */
977 tb1 = tb->jmp_first;
978 for(;;) {
979 n1 = (long)tb1 & 3;
980 if (n1 == 2)
981 break;
982 tb1 = (TranslationBlock *)((long)tb1 & ~3);
983 tb2 = tb1->jmp_next[n1];
984 tb_reset_jump(tb1, n1);
985 tb1->jmp_next[n1] = NULL;
986 tb1 = tb2;
987 }
988 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
989
bellarde3db7222005-01-26 22:00:47 +0000990 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000991}
992
993static inline void set_bits(uint8_t *tab, int start, int len)
994{
995 int end, mask, end1;
996
997 end = start + len;
998 tab += start >> 3;
999 mask = 0xff << (start & 7);
1000 if ((start & ~7) == (end & ~7)) {
1001 if (start < end) {
1002 mask &= ~(0xff << (end & 7));
1003 *tab |= mask;
1004 }
1005 } else {
1006 *tab++ |= mask;
1007 start = (start + 8) & ~7;
1008 end1 = end & ~7;
1009 while (start < end1) {
1010 *tab++ = 0xff;
1011 start += 8;
1012 }
1013 if (start < end) {
1014 mask = ~(0xff << (end & 7));
1015 *tab |= mask;
1016 }
1017 }
1018}
1019
1020static void build_page_bitmap(PageDesc *p)
1021{
1022 int n, tb_start, tb_end;
1023 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001024
Anthony Liguori7267c092011-08-20 22:09:37 -05001025 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001026
1027 tb = p->first_tb;
1028 while (tb != NULL) {
1029 n = (long)tb & 3;
1030 tb = (TranslationBlock *)((long)tb & ~3);
1031 /* NOTE: this is subtle as a TB may span two physical pages */
1032 if (n == 0) {
1033 /* NOTE: tb_end may be after the end of the page, but
1034 it is not a problem */
1035 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1036 tb_end = tb_start + tb->size;
1037 if (tb_end > TARGET_PAGE_SIZE)
1038 tb_end = TARGET_PAGE_SIZE;
1039 } else {
1040 tb_start = 0;
1041 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1042 }
1043 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1044 tb = tb->page_next[n];
1045 }
1046}
1047
pbrook2e70f6e2008-06-29 01:03:05 +00001048TranslationBlock *tb_gen_code(CPUState *env,
1049 target_ulong pc, target_ulong cs_base,
1050 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001051{
1052 TranslationBlock *tb;
1053 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001054 tb_page_addr_t phys_pc, phys_page2;
1055 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001056 int code_gen_size;
1057
Paul Brook41c1b1c2010-03-12 16:54:58 +00001058 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001059 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001060 if (!tb) {
1061 /* flush must be done */
1062 tb_flush(env);
1063 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001064 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001065 /* Don't forget to invalidate previous TB info. */
1066 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001067 }
1068 tc_ptr = code_gen_ptr;
1069 tb->tc_ptr = tc_ptr;
1070 tb->cs_base = cs_base;
1071 tb->flags = flags;
1072 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001073 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001074 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001075
bellardd720b932004-04-25 17:57:43 +00001076 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001077 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001078 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001079 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001080 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001081 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001082 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001083 return tb;
bellardd720b932004-04-25 17:57:43 +00001084}
ths3b46e622007-09-17 08:09:54 +00001085
bellard9fa3e852004-01-04 18:06:42 +00001086/* invalidate all TBs which intersect with the target physical page
1087 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001088 the same physical page. 'is_cpu_write_access' should be true if called
1089 from a real cpu write access: the virtual CPU will exit the current
1090 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001091void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001092 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001093{
aliguori6b917542008-11-18 19:46:41 +00001094 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001095 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001096 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001097 PageDesc *p;
1098 int n;
1099#ifdef TARGET_HAS_PRECISE_SMC
1100 int current_tb_not_found = is_cpu_write_access;
1101 TranslationBlock *current_tb = NULL;
1102 int current_tb_modified = 0;
1103 target_ulong current_pc = 0;
1104 target_ulong current_cs_base = 0;
1105 int current_flags = 0;
1106#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001107
1108 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001109 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001110 return;
ths5fafdf22007-09-16 21:08:06 +00001111 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001112 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1113 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001114 /* build code bitmap */
1115 build_page_bitmap(p);
1116 }
1117
1118 /* we remove all the TBs in the range [start, end[ */
1119 /* XXX: see if in some cases it could be faster to invalidate all the code */
1120 tb = p->first_tb;
1121 while (tb != NULL) {
1122 n = (long)tb & 3;
1123 tb = (TranslationBlock *)((long)tb & ~3);
1124 tb_next = tb->page_next[n];
1125 /* NOTE: this is subtle as a TB may span two physical pages */
1126 if (n == 0) {
1127 /* NOTE: tb_end may be after the end of the page, but
1128 it is not a problem */
1129 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1130 tb_end = tb_start + tb->size;
1131 } else {
1132 tb_start = tb->page_addr[1];
1133 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1134 }
1135 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001136#ifdef TARGET_HAS_PRECISE_SMC
1137 if (current_tb_not_found) {
1138 current_tb_not_found = 0;
1139 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001140 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001141 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001142 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001143 }
1144 }
1145 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001146 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001147 /* If we are modifying the current TB, we must stop
1148 its execution. We could be more precise by checking
1149 that the modification is after the current PC, but it
1150 would require a specialized function to partially
1151 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001152
bellardd720b932004-04-25 17:57:43 +00001153 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001154 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001155 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1156 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001157 }
1158#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001159 /* we need to do that to handle the case where a signal
1160 occurs while doing tb_phys_invalidate() */
1161 saved_tb = NULL;
1162 if (env) {
1163 saved_tb = env->current_tb;
1164 env->current_tb = NULL;
1165 }
bellard9fa3e852004-01-04 18:06:42 +00001166 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001167 if (env) {
1168 env->current_tb = saved_tb;
1169 if (env->interrupt_request && env->current_tb)
1170 cpu_interrupt(env, env->interrupt_request);
1171 }
bellard9fa3e852004-01-04 18:06:42 +00001172 }
1173 tb = tb_next;
1174 }
1175#if !defined(CONFIG_USER_ONLY)
1176 /* if no code remaining, no need to continue to use slow writes */
1177 if (!p->first_tb) {
1178 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001179 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001180 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001181 }
1182 }
1183#endif
1184#ifdef TARGET_HAS_PRECISE_SMC
1185 if (current_tb_modified) {
1186 /* we generate a block containing just the instruction
1187 modifying the memory. It will ensure that it cannot modify
1188 itself */
bellardea1c1802004-06-14 18:56:36 +00001189 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001190 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001191 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001192 }
1193#endif
1194}
1195
1196/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001197static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001198{
1199 PageDesc *p;
1200 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001201#if 0
bellarda4193c82004-06-03 14:01:43 +00001202 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001203 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1204 cpu_single_env->mem_io_vaddr, len,
1205 cpu_single_env->eip,
1206 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001207 }
1208#endif
bellard9fa3e852004-01-04 18:06:42 +00001209 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001210 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001211 return;
1212 if (p->code_bitmap) {
1213 offset = start & ~TARGET_PAGE_MASK;
1214 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1215 if (b & ((1 << len) - 1))
1216 goto do_invalidate;
1217 } else {
1218 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001219 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001220 }
1221}
1222
bellard9fa3e852004-01-04 18:06:42 +00001223#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001224static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001225 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001226{
aliguori6b917542008-11-18 19:46:41 +00001227 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001228 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001229 int n;
bellardd720b932004-04-25 17:57:43 +00001230#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001231 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001232 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001233 int current_tb_modified = 0;
1234 target_ulong current_pc = 0;
1235 target_ulong current_cs_base = 0;
1236 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001237#endif
bellard9fa3e852004-01-04 18:06:42 +00001238
1239 addr &= TARGET_PAGE_MASK;
1240 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001241 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001242 return;
1243 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001244#ifdef TARGET_HAS_PRECISE_SMC
1245 if (tb && pc != 0) {
1246 current_tb = tb_find_pc(pc);
1247 }
1248#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001249 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001250 n = (long)tb & 3;
1251 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001252#ifdef TARGET_HAS_PRECISE_SMC
1253 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001254 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001255 /* If we are modifying the current TB, we must stop
1256 its execution. We could be more precise by checking
1257 that the modification is after the current PC, but it
1258 would require a specialized function to partially
1259 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001260
bellardd720b932004-04-25 17:57:43 +00001261 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001262 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001263 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1264 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001265 }
1266#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001267 tb_phys_invalidate(tb, addr);
1268 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001269 }
1270 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001271#ifdef TARGET_HAS_PRECISE_SMC
1272 if (current_tb_modified) {
1273 /* we generate a block containing just the instruction
1274 modifying the memory. It will ensure that it cannot modify
1275 itself */
bellardea1c1802004-06-14 18:56:36 +00001276 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001277 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001278 cpu_resume_from_signal(env, puc);
1279 }
1280#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001281}
bellard9fa3e852004-01-04 18:06:42 +00001282#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001283
1284/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001285static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001286 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001287{
1288 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001289#ifndef CONFIG_USER_ONLY
1290 bool page_already_protected;
1291#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001292
bellard9fa3e852004-01-04 18:06:42 +00001293 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001294 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001295 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001296#ifndef CONFIG_USER_ONLY
1297 page_already_protected = p->first_tb != NULL;
1298#endif
bellard9fa3e852004-01-04 18:06:42 +00001299 p->first_tb = (TranslationBlock *)((long)tb | n);
1300 invalidate_page_bitmap(p);
1301
bellard107db442004-06-22 18:48:46 +00001302#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001303
bellard9fa3e852004-01-04 18:06:42 +00001304#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001305 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001306 target_ulong addr;
1307 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001308 int prot;
1309
bellardfd6ce8f2003-05-14 19:00:11 +00001310 /* force the host page as non writable (writes will have a
1311 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001312 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001313 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001314 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1315 addr += TARGET_PAGE_SIZE) {
1316
1317 p2 = page_find (addr >> TARGET_PAGE_BITS);
1318 if (!p2)
1319 continue;
1320 prot |= p2->flags;
1321 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001322 }
ths5fafdf22007-09-16 21:08:06 +00001323 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001324 (prot & PAGE_BITS) & ~PAGE_WRITE);
1325#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001326 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001327 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001328#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001329 }
bellard9fa3e852004-01-04 18:06:42 +00001330#else
1331 /* if some code is already present, then the pages are already
1332 protected. So we handle the case where only the first TB is
1333 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001334 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001335 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001336 }
1337#endif
bellardd720b932004-04-25 17:57:43 +00001338
1339#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001340}
1341
bellard9fa3e852004-01-04 18:06:42 +00001342/* add a new TB and link it to the physical page tables. phys_page2 is
1343 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001344void tb_link_page(TranslationBlock *tb,
1345 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001346{
bellard9fa3e852004-01-04 18:06:42 +00001347 unsigned int h;
1348 TranslationBlock **ptb;
1349
pbrookc8a706f2008-06-02 16:16:42 +00001350 /* Grab the mmap lock to stop another thread invalidating this TB
1351 before we are done. */
1352 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001353 /* add in the physical hash table */
1354 h = tb_phys_hash_func(phys_pc);
1355 ptb = &tb_phys_hash[h];
1356 tb->phys_hash_next = *ptb;
1357 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001358
1359 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001360 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1361 if (phys_page2 != -1)
1362 tb_alloc_page(tb, 1, phys_page2);
1363 else
1364 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001365
bellardd4e81642003-05-25 16:46:15 +00001366 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1367 tb->jmp_next[0] = NULL;
1368 tb->jmp_next[1] = NULL;
1369
1370 /* init original jump addresses */
1371 if (tb->tb_next_offset[0] != 0xffff)
1372 tb_reset_jump(tb, 0);
1373 if (tb->tb_next_offset[1] != 0xffff)
1374 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001375
1376#ifdef DEBUG_TB_CHECK
1377 tb_page_check();
1378#endif
pbrookc8a706f2008-06-02 16:16:42 +00001379 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001380}
1381
bellarda513fe12003-05-27 23:29:48 +00001382/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1383 tb[1].tc_ptr. Return NULL if not found */
1384TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1385{
1386 int m_min, m_max, m;
1387 unsigned long v;
1388 TranslationBlock *tb;
1389
1390 if (nb_tbs <= 0)
1391 return NULL;
1392 if (tc_ptr < (unsigned long)code_gen_buffer ||
1393 tc_ptr >= (unsigned long)code_gen_ptr)
1394 return NULL;
1395 /* binary search (cf Knuth) */
1396 m_min = 0;
1397 m_max = nb_tbs - 1;
1398 while (m_min <= m_max) {
1399 m = (m_min + m_max) >> 1;
1400 tb = &tbs[m];
1401 v = (unsigned long)tb->tc_ptr;
1402 if (v == tc_ptr)
1403 return tb;
1404 else if (tc_ptr < v) {
1405 m_max = m - 1;
1406 } else {
1407 m_min = m + 1;
1408 }
ths5fafdf22007-09-16 21:08:06 +00001409 }
bellarda513fe12003-05-27 23:29:48 +00001410 return &tbs[m_max];
1411}
bellard75012672003-06-21 13:11:07 +00001412
bellardea041c02003-06-25 16:16:50 +00001413static void tb_reset_jump_recursive(TranslationBlock *tb);
1414
1415static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1416{
1417 TranslationBlock *tb1, *tb_next, **ptb;
1418 unsigned int n1;
1419
1420 tb1 = tb->jmp_next[n];
1421 if (tb1 != NULL) {
1422 /* find head of list */
1423 for(;;) {
1424 n1 = (long)tb1 & 3;
1425 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1426 if (n1 == 2)
1427 break;
1428 tb1 = tb1->jmp_next[n1];
1429 }
1430 /* we are now sure now that tb jumps to tb1 */
1431 tb_next = tb1;
1432
1433 /* remove tb from the jmp_first list */
1434 ptb = &tb_next->jmp_first;
1435 for(;;) {
1436 tb1 = *ptb;
1437 n1 = (long)tb1 & 3;
1438 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1439 if (n1 == n && tb1 == tb)
1440 break;
1441 ptb = &tb1->jmp_next[n1];
1442 }
1443 *ptb = tb->jmp_next[n];
1444 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001445
bellardea041c02003-06-25 16:16:50 +00001446 /* suppress the jump to next tb in generated code */
1447 tb_reset_jump(tb, n);
1448
bellard01243112004-01-04 15:48:17 +00001449 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001450 tb_reset_jump_recursive(tb_next);
1451 }
1452}
1453
1454static void tb_reset_jump_recursive(TranslationBlock *tb)
1455{
1456 tb_reset_jump_recursive2(tb, 0);
1457 tb_reset_jump_recursive2(tb, 1);
1458}
1459
bellard1fddef42005-04-17 19:16:13 +00001460#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001461#if defined(CONFIG_USER_ONLY)
1462static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1463{
1464 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1465}
1466#else
bellardd720b932004-04-25 17:57:43 +00001467static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1468{
Anthony Liguoric227f092009-10-01 16:12:16 -05001469 target_phys_addr_t addr;
Anthony Liguoric227f092009-10-01 16:12:16 -05001470 ram_addr_t ram_addr;
Avi Kivity06ef3522012-02-13 16:11:22 +02001471 MemoryRegionSection section;
bellardd720b932004-04-25 17:57:43 +00001472
pbrookc2f07f82006-04-08 17:14:56 +00001473 addr = cpu_get_phys_page_debug(env, pc);
Avi Kivity06ef3522012-02-13 16:11:22 +02001474 section = phys_page_find(addr >> TARGET_PAGE_BITS);
1475 if (!(memory_region_is_ram(section.mr)
1476 || (section.mr->rom_device && section.mr->readable))) {
1477 return;
1478 }
1479 ram_addr = (memory_region_get_ram_addr(section.mr)
1480 + section.offset_within_region) & TARGET_PAGE_MASK;
1481 ram_addr |= (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001482 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001483}
bellardc27004e2005-01-03 23:35:10 +00001484#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001485#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001486
Paul Brookc527ee82010-03-01 03:31:14 +00001487#if defined(CONFIG_USER_ONLY)
1488void cpu_watchpoint_remove_all(CPUState *env, int mask)
1489
1490{
1491}
1492
1493int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1494 int flags, CPUWatchpoint **watchpoint)
1495{
1496 return -ENOSYS;
1497}
1498#else
pbrook6658ffb2007-03-16 23:58:11 +00001499/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001500int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1501 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001502{
aliguorib4051332008-11-18 20:14:20 +00001503 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001504 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001505
aliguorib4051332008-11-18 20:14:20 +00001506 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1507 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1508 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1509 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1510 return -EINVAL;
1511 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001512 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001513
aliguoria1d1bb32008-11-18 20:07:32 +00001514 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001515 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001516 wp->flags = flags;
1517
aliguori2dc9f412008-11-18 20:56:59 +00001518 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001519 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001520 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001521 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001522 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001523
pbrook6658ffb2007-03-16 23:58:11 +00001524 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001525
1526 if (watchpoint)
1527 *watchpoint = wp;
1528 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001529}
1530
aliguoria1d1bb32008-11-18 20:07:32 +00001531/* Remove a specific watchpoint. */
1532int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1533 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001534{
aliguorib4051332008-11-18 20:14:20 +00001535 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001536 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001537
Blue Swirl72cf2d42009-09-12 07:36:22 +00001538 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001539 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001540 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001541 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001542 return 0;
1543 }
1544 }
aliguoria1d1bb32008-11-18 20:07:32 +00001545 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001546}
1547
aliguoria1d1bb32008-11-18 20:07:32 +00001548/* Remove a specific watchpoint by reference. */
1549void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1550{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001551 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001552
aliguoria1d1bb32008-11-18 20:07:32 +00001553 tlb_flush_page(env, watchpoint->vaddr);
1554
Anthony Liguori7267c092011-08-20 22:09:37 -05001555 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001556}
1557
aliguoria1d1bb32008-11-18 20:07:32 +00001558/* Remove all matching watchpoints. */
1559void cpu_watchpoint_remove_all(CPUState *env, int mask)
1560{
aliguoric0ce9982008-11-25 22:13:57 +00001561 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001562
Blue Swirl72cf2d42009-09-12 07:36:22 +00001563 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001564 if (wp->flags & mask)
1565 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001566 }
aliguoria1d1bb32008-11-18 20:07:32 +00001567}
Paul Brookc527ee82010-03-01 03:31:14 +00001568#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001569
1570/* Add a breakpoint. */
1571int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1572 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001573{
bellard1fddef42005-04-17 19:16:13 +00001574#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001575 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001576
Anthony Liguori7267c092011-08-20 22:09:37 -05001577 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001578
1579 bp->pc = pc;
1580 bp->flags = flags;
1581
aliguori2dc9f412008-11-18 20:56:59 +00001582 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001583 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001584 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001585 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001586 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001587
1588 breakpoint_invalidate(env, pc);
1589
1590 if (breakpoint)
1591 *breakpoint = bp;
1592 return 0;
1593#else
1594 return -ENOSYS;
1595#endif
1596}
1597
1598/* Remove a specific breakpoint. */
1599int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1600{
1601#if defined(TARGET_HAS_ICE)
1602 CPUBreakpoint *bp;
1603
Blue Swirl72cf2d42009-09-12 07:36:22 +00001604 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001605 if (bp->pc == pc && bp->flags == flags) {
1606 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001607 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001608 }
bellard4c3a88a2003-07-26 12:06:08 +00001609 }
aliguoria1d1bb32008-11-18 20:07:32 +00001610 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001611#else
aliguoria1d1bb32008-11-18 20:07:32 +00001612 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001613#endif
1614}
1615
aliguoria1d1bb32008-11-18 20:07:32 +00001616/* Remove a specific breakpoint by reference. */
1617void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001618{
bellard1fddef42005-04-17 19:16:13 +00001619#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001620 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001621
aliguoria1d1bb32008-11-18 20:07:32 +00001622 breakpoint_invalidate(env, breakpoint->pc);
1623
Anthony Liguori7267c092011-08-20 22:09:37 -05001624 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001625#endif
1626}
1627
1628/* Remove all matching breakpoints. */
1629void cpu_breakpoint_remove_all(CPUState *env, int mask)
1630{
1631#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001632 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001633
Blue Swirl72cf2d42009-09-12 07:36:22 +00001634 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001635 if (bp->flags & mask)
1636 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001637 }
bellard4c3a88a2003-07-26 12:06:08 +00001638#endif
1639}
1640
bellardc33a3462003-07-29 20:50:33 +00001641/* enable or disable single step mode. EXCP_DEBUG is returned by the
1642 CPU loop after each instruction */
1643void cpu_single_step(CPUState *env, int enabled)
1644{
bellard1fddef42005-04-17 19:16:13 +00001645#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001646 if (env->singlestep_enabled != enabled) {
1647 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001648 if (kvm_enabled())
1649 kvm_update_guest_debug(env, 0);
1650 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001651 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001652 /* XXX: only flush what is necessary */
1653 tb_flush(env);
1654 }
bellardc33a3462003-07-29 20:50:33 +00001655 }
1656#endif
1657}
1658
bellard34865132003-10-05 14:28:56 +00001659/* enable or disable low levels log */
1660void cpu_set_log(int log_flags)
1661{
1662 loglevel = log_flags;
1663 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001664 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001665 if (!logfile) {
1666 perror(logfilename);
1667 _exit(1);
1668 }
bellard9fa3e852004-01-04 18:06:42 +00001669#if !defined(CONFIG_SOFTMMU)
1670 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1671 {
blueswir1b55266b2008-09-20 08:07:15 +00001672 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001673 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1674 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001675#elif defined(_WIN32)
1676 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1677 setvbuf(logfile, NULL, _IONBF, 0);
1678#else
bellard34865132003-10-05 14:28:56 +00001679 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001680#endif
pbrooke735b912007-06-30 13:53:24 +00001681 log_append = 1;
1682 }
1683 if (!loglevel && logfile) {
1684 fclose(logfile);
1685 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001686 }
1687}
1688
1689void cpu_set_log_filename(const char *filename)
1690{
1691 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001692 if (logfile) {
1693 fclose(logfile);
1694 logfile = NULL;
1695 }
1696 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001697}
bellardc33a3462003-07-29 20:50:33 +00001698
aurel323098dba2009-03-07 21:28:24 +00001699static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001700{
pbrookd5975362008-06-07 20:50:51 +00001701 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1702 problem and hope the cpu will stop of its own accord. For userspace
1703 emulation this often isn't actually as bad as it sounds. Often
1704 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001705 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001706 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001707
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001708 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001709 tb = env->current_tb;
1710 /* if the cpu is currently executing code, we must unlink it and
1711 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001712 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001713 env->current_tb = NULL;
1714 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001715 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001716 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001717}
1718
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001719#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001720/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001721static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001722{
1723 int old_mask;
1724
1725 old_mask = env->interrupt_request;
1726 env->interrupt_request |= mask;
1727
aliguori8edac962009-04-24 18:03:45 +00001728 /*
1729 * If called from iothread context, wake the target cpu in
1730 * case its halted.
1731 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001732 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001733 qemu_cpu_kick(env);
1734 return;
1735 }
aliguori8edac962009-04-24 18:03:45 +00001736
pbrook2e70f6e2008-06-29 01:03:05 +00001737 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001738 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001739 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001740 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001741 cpu_abort(env, "Raised interrupt while not in I/O function");
1742 }
pbrook2e70f6e2008-06-29 01:03:05 +00001743 } else {
aurel323098dba2009-03-07 21:28:24 +00001744 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001745 }
1746}
1747
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001748CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1749
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001750#else /* CONFIG_USER_ONLY */
1751
1752void cpu_interrupt(CPUState *env, int mask)
1753{
1754 env->interrupt_request |= mask;
1755 cpu_unlink_tb(env);
1756}
1757#endif /* CONFIG_USER_ONLY */
1758
bellardb54ad042004-05-20 13:42:52 +00001759void cpu_reset_interrupt(CPUState *env, int mask)
1760{
1761 env->interrupt_request &= ~mask;
1762}
1763
aurel323098dba2009-03-07 21:28:24 +00001764void cpu_exit(CPUState *env)
1765{
1766 env->exit_request = 1;
1767 cpu_unlink_tb(env);
1768}
1769
blueswir1c7cd6a32008-10-02 18:27:46 +00001770const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001771 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001772 "show generated host assembly code for each compiled TB" },
1773 { CPU_LOG_TB_IN_ASM, "in_asm",
1774 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001775 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001776 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001777 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001778 "show micro ops "
1779#ifdef TARGET_I386
1780 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001781#endif
blueswir1e01a1152008-03-14 17:37:11 +00001782 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001783 { CPU_LOG_INT, "int",
1784 "show interrupts/exceptions in short format" },
1785 { CPU_LOG_EXEC, "exec",
1786 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001787 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001788 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001789#ifdef TARGET_I386
1790 { CPU_LOG_PCALL, "pcall",
1791 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001792 { CPU_LOG_RESET, "cpu_reset",
1793 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001794#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001795#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001796 { CPU_LOG_IOPORT, "ioport",
1797 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001798#endif
bellardf193c792004-03-21 17:06:25 +00001799 { 0, NULL, NULL },
1800};
1801
1802static int cmp1(const char *s1, int n, const char *s2)
1803{
1804 if (strlen(s2) != n)
1805 return 0;
1806 return memcmp(s1, s2, n) == 0;
1807}
ths3b46e622007-09-17 08:09:54 +00001808
bellardf193c792004-03-21 17:06:25 +00001809/* takes a comma separated list of log masks. Return 0 if error. */
1810int cpu_str_to_log_mask(const char *str)
1811{
blueswir1c7cd6a32008-10-02 18:27:46 +00001812 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001813 int mask;
1814 const char *p, *p1;
1815
1816 p = str;
1817 mask = 0;
1818 for(;;) {
1819 p1 = strchr(p, ',');
1820 if (!p1)
1821 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001822 if(cmp1(p,p1-p,"all")) {
1823 for(item = cpu_log_items; item->mask != 0; item++) {
1824 mask |= item->mask;
1825 }
1826 } else {
1827 for(item = cpu_log_items; item->mask != 0; item++) {
1828 if (cmp1(p, p1 - p, item->name))
1829 goto found;
1830 }
1831 return 0;
bellardf193c792004-03-21 17:06:25 +00001832 }
bellardf193c792004-03-21 17:06:25 +00001833 found:
1834 mask |= item->mask;
1835 if (*p1 != ',')
1836 break;
1837 p = p1 + 1;
1838 }
1839 return mask;
1840}
bellardea041c02003-06-25 16:16:50 +00001841
bellard75012672003-06-21 13:11:07 +00001842void cpu_abort(CPUState *env, const char *fmt, ...)
1843{
1844 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001845 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001846
1847 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001848 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001849 fprintf(stderr, "qemu: fatal: ");
1850 vfprintf(stderr, fmt, ap);
1851 fprintf(stderr, "\n");
1852#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001853 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1854#else
1855 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001856#endif
aliguori93fcfe32009-01-15 22:34:14 +00001857 if (qemu_log_enabled()) {
1858 qemu_log("qemu: fatal: ");
1859 qemu_log_vprintf(fmt, ap2);
1860 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001861#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001862 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001863#else
aliguori93fcfe32009-01-15 22:34:14 +00001864 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001865#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001866 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001867 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001868 }
pbrook493ae1f2007-11-23 16:53:59 +00001869 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001870 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001871#if defined(CONFIG_USER_ONLY)
1872 {
1873 struct sigaction act;
1874 sigfillset(&act.sa_mask);
1875 act.sa_handler = SIG_DFL;
1876 sigaction(SIGABRT, &act, NULL);
1877 }
1878#endif
bellard75012672003-06-21 13:11:07 +00001879 abort();
1880}
1881
thsc5be9f02007-02-28 20:20:53 +00001882CPUState *cpu_copy(CPUState *env)
1883{
ths01ba9812007-12-09 02:22:57 +00001884 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001885 CPUState *next_cpu = new_env->next_cpu;
1886 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001887#if defined(TARGET_HAS_ICE)
1888 CPUBreakpoint *bp;
1889 CPUWatchpoint *wp;
1890#endif
1891
thsc5be9f02007-02-28 20:20:53 +00001892 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001893
1894 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001895 new_env->next_cpu = next_cpu;
1896 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001897
1898 /* Clone all break/watchpoints.
1899 Note: Once we support ptrace with hw-debug register access, make sure
1900 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001901 QTAILQ_INIT(&env->breakpoints);
1902 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001903#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001904 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001905 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1906 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001907 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001908 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1909 wp->flags, NULL);
1910 }
1911#endif
1912
thsc5be9f02007-02-28 20:20:53 +00001913 return new_env;
1914}
1915
bellard01243112004-01-04 15:48:17 +00001916#if !defined(CONFIG_USER_ONLY)
1917
edgar_igl5c751e92008-05-06 08:44:21 +00001918static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1919{
1920 unsigned int i;
1921
1922 /* Discard jump cache entries for any tb which might potentially
1923 overlap the flushed page. */
1924 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1925 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001926 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001927
1928 i = tb_jmp_cache_hash_page(addr);
1929 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001930 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001931}
1932
Igor Kovalenko08738982009-07-12 02:15:40 +04001933static CPUTLBEntry s_cputlb_empty_entry = {
1934 .addr_read = -1,
1935 .addr_write = -1,
1936 .addr_code = -1,
1937 .addend = -1,
1938};
1939
Peter Maydell771124e2012-01-17 13:23:13 +00001940/* NOTE:
1941 * If flush_global is true (the usual case), flush all tlb entries.
1942 * If flush_global is false, flush (at least) all tlb entries not
1943 * marked global.
1944 *
1945 * Since QEMU doesn't currently implement a global/not-global flag
1946 * for tlb entries, at the moment tlb_flush() will also flush all
1947 * tlb entries in the flush_global == false case. This is OK because
1948 * CPU architectures generally permit an implementation to drop
1949 * entries from the TLB at any time, so flushing more entries than
1950 * required is only an efficiency issue, not a correctness issue.
1951 */
bellardee8b7022004-02-03 23:35:10 +00001952void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001953{
bellard33417e72003-08-10 21:47:01 +00001954 int i;
bellard01243112004-01-04 15:48:17 +00001955
bellard9fa3e852004-01-04 18:06:42 +00001956#if defined(DEBUG_TLB)
1957 printf("tlb_flush:\n");
1958#endif
bellard01243112004-01-04 15:48:17 +00001959 /* must reset current TB so that interrupts cannot modify the
1960 links while we are modifying them */
1961 env->current_tb = NULL;
1962
bellard33417e72003-08-10 21:47:01 +00001963 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001964 int mmu_idx;
1965 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001966 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001967 }
bellard33417e72003-08-10 21:47:01 +00001968 }
bellard9fa3e852004-01-04 18:06:42 +00001969
bellard8a40a182005-11-20 10:35:40 +00001970 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001971
Paul Brookd4c430a2010-03-17 02:14:28 +00001972 env->tlb_flush_addr = -1;
1973 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001974 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001975}
1976
bellard274da6b2004-05-20 21:56:27 +00001977static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001978{
ths5fafdf22007-09-16 21:08:06 +00001979 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001980 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001981 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001982 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001983 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001984 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001985 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001986 }
bellard61382a52003-10-27 21:22:23 +00001987}
1988
bellard2e126692004-04-25 21:28:44 +00001989void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001990{
bellard8a40a182005-11-20 10:35:40 +00001991 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001992 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001993
bellard9fa3e852004-01-04 18:06:42 +00001994#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001995 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001996#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001997 /* Check if we need to flush due to large pages. */
1998 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1999#if defined(DEBUG_TLB)
2000 printf("tlb_flush_page: forced full flush ("
2001 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2002 env->tlb_flush_addr, env->tlb_flush_mask);
2003#endif
2004 tlb_flush(env, 1);
2005 return;
2006 }
bellard01243112004-01-04 15:48:17 +00002007 /* must reset current TB so that interrupts cannot modify the
2008 links while we are modifying them */
2009 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002010
bellard61382a52003-10-27 21:22:23 +00002011 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002012 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002013 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2014 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002015
edgar_igl5c751e92008-05-06 08:44:21 +00002016 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002017}
2018
bellard9fa3e852004-01-04 18:06:42 +00002019/* update the TLBs so that writes to code in the virtual page 'addr'
2020 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002021static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002022{
ths5fafdf22007-09-16 21:08:06 +00002023 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002024 ram_addr + TARGET_PAGE_SIZE,
2025 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002026}
2027
bellard9fa3e852004-01-04 18:06:42 +00002028/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002029 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002030static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002031 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002032{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002033 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002034}
2035
ths5fafdf22007-09-16 21:08:06 +00002036static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002037 unsigned long start, unsigned long length)
2038{
2039 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002040 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00002041 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002042 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002043 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002044 }
2045 }
2046}
2047
pbrook5579c7f2009-04-11 14:47:08 +00002048/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002049void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002050 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002051{
2052 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002053 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002054 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002055
2056 start &= TARGET_PAGE_MASK;
2057 end = TARGET_PAGE_ALIGN(end);
2058
2059 length = end - start;
2060 if (length == 0)
2061 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002062 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002063
bellard1ccde1c2004-02-06 19:46:14 +00002064 /* we modify the TLB cache so that the dirty bit will be set again
2065 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002066 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002067 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002068 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002069 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002070 != (end - 1) - start) {
2071 abort();
2072 }
2073
bellard6a00d602005-11-21 23:25:50 +00002074 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002075 int mmu_idx;
2076 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2077 for(i = 0; i < CPU_TLB_SIZE; i++)
2078 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2079 start1, length);
2080 }
bellard6a00d602005-11-21 23:25:50 +00002081 }
bellard1ccde1c2004-02-06 19:46:14 +00002082}
2083
aliguori74576192008-10-06 14:02:03 +00002084int cpu_physical_memory_set_dirty_tracking(int enable)
2085{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002086 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002087 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002088 return ret;
aliguori74576192008-10-06 14:02:03 +00002089}
2090
bellard3a7d9292005-08-21 09:26:42 +00002091static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2092{
Anthony Liguoric227f092009-10-01 16:12:16 -05002093 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002094 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002095
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002096 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002097 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2098 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002099 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002100 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002101 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002102 }
2103 }
2104}
2105
2106/* update the TLB according to the current state of the dirty bits */
2107void cpu_tlb_update_dirty(CPUState *env)
2108{
2109 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002110 int mmu_idx;
2111 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2112 for(i = 0; i < CPU_TLB_SIZE; i++)
2113 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2114 }
bellard3a7d9292005-08-21 09:26:42 +00002115}
2116
pbrook0f459d12008-06-09 00:20:13 +00002117static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002118{
pbrook0f459d12008-06-09 00:20:13 +00002119 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2120 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002121}
2122
pbrook0f459d12008-06-09 00:20:13 +00002123/* update the TLB corresponding to virtual page vaddr
2124 so that it is no longer dirty */
2125static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002126{
bellard1ccde1c2004-02-06 19:46:14 +00002127 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002128 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002129
pbrook0f459d12008-06-09 00:20:13 +00002130 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002131 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002132 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2133 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002134}
2135
Paul Brookd4c430a2010-03-17 02:14:28 +00002136/* Our TLB does not support large pages, so remember the area covered by
2137 large pages and trigger a full TLB flush if these are invalidated. */
2138static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2139 target_ulong size)
2140{
2141 target_ulong mask = ~(size - 1);
2142
2143 if (env->tlb_flush_addr == (target_ulong)-1) {
2144 env->tlb_flush_addr = vaddr & mask;
2145 env->tlb_flush_mask = mask;
2146 return;
2147 }
2148 /* Extend the existing region to include the new page.
2149 This is a compromise between unnecessary flushes and the cost
2150 of maintaining a full variable size TLB. */
2151 mask &= env->tlb_flush_mask;
2152 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2153 mask <<= 1;
2154 }
2155 env->tlb_flush_addr &= mask;
2156 env->tlb_flush_mask = mask;
2157}
2158
Avi Kivity06ef3522012-02-13 16:11:22 +02002159static bool is_ram_rom(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002160{
Avi Kivity06ef3522012-02-13 16:11:22 +02002161 return memory_region_is_ram(s->mr);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002162}
2163
Avi Kivity06ef3522012-02-13 16:11:22 +02002164static bool is_romd(MemoryRegionSection *s)
Avi Kivity75c578d2012-01-02 15:40:52 +02002165{
Avi Kivity06ef3522012-02-13 16:11:22 +02002166 MemoryRegion *mr = s->mr;
Avi Kivity75c578d2012-01-02 15:40:52 +02002167
Avi Kivity75c578d2012-01-02 15:40:52 +02002168 return mr->rom_device && mr->readable;
2169}
2170
Avi Kivity06ef3522012-02-13 16:11:22 +02002171static bool is_ram_rom_romd(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002172{
Avi Kivity06ef3522012-02-13 16:11:22 +02002173 return is_ram_rom(s) || is_romd(s);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002174}
2175
Paul Brookd4c430a2010-03-17 02:14:28 +00002176/* Add a new TLB entry. At most one entry for a given virtual address
2177 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2178 supplied size is only used by tlb_flush_page. */
2179void tlb_set_page(CPUState *env, target_ulong vaddr,
2180 target_phys_addr_t paddr, int prot,
2181 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002182{
Avi Kivity06ef3522012-02-13 16:11:22 +02002183 MemoryRegionSection section;
bellard9fa3e852004-01-04 18:06:42 +00002184 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002185 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002186 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002187 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002188 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002189 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002190 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002191
Paul Brookd4c430a2010-03-17 02:14:28 +00002192 assert(size >= TARGET_PAGE_SIZE);
2193 if (size != TARGET_PAGE_SIZE) {
2194 tlb_add_large_page(env, vaddr, size);
2195 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002196 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002197#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002198 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2199 " prot=%x idx=%d pd=0x%08lx\n",
2200 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002201#endif
2202
pbrook0f459d12008-06-09 00:20:13 +00002203 address = vaddr;
Avi Kivity06ef3522012-02-13 16:11:22 +02002204 if (!is_ram_rom_romd(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002205 /* IO memory case (romd handled later) */
2206 address |= TLB_MMIO;
2207 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002208 if (is_ram_rom_romd(&section)) {
2209 addend = (unsigned long)(memory_region_get_ram_ptr(section.mr)
2210 + section.offset_within_region);
2211 } else {
2212 addend = 0;
2213 }
2214 if (is_ram_rom(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002215 /* Normal RAM. */
Avi Kivity06ef3522012-02-13 16:11:22 +02002216 iotlb = (memory_region_get_ram_addr(section.mr)
2217 + section.offset_within_region) & TARGET_PAGE_MASK;
2218 if (!section.readonly)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002219 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002220 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002221 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002222 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002223 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002224 It would be nice to pass an offset from the base address
2225 of that region. This would avoid having to special case RAM,
2226 and avoid full address decoding in every device.
2227 We can't use the high bits of pd for this because
2228 IO_MEM_ROMD uses these as a ram address. */
Avi Kivity06ef3522012-02-13 16:11:22 +02002229 iotlb = memory_region_get_ram_addr(section.mr) & ~TARGET_PAGE_MASK;
2230 iotlb += section.offset_within_region;
pbrook0f459d12008-06-09 00:20:13 +00002231 }
pbrook6658ffb2007-03-16 23:58:11 +00002232
pbrook0f459d12008-06-09 00:20:13 +00002233 code_address = address;
2234 /* Make accesses to pages with watchpoints go via the
2235 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002236 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002237 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002238 /* Avoid trapping reads of pages with a write breakpoint. */
2239 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivity1ec9b902012-01-02 12:47:48 +02002240 iotlb = io_mem_watch.ram_addr + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002241 address |= TLB_MMIO;
2242 break;
2243 }
pbrook6658ffb2007-03-16 23:58:11 +00002244 }
pbrook0f459d12008-06-09 00:20:13 +00002245 }
balrogd79acba2007-06-26 20:01:13 +00002246
pbrook0f459d12008-06-09 00:20:13 +00002247 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2248 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2249 te = &env->tlb_table[mmu_idx][index];
2250 te->addend = addend - vaddr;
2251 if (prot & PAGE_READ) {
2252 te->addr_read = address;
2253 } else {
2254 te->addr_read = -1;
2255 }
edgar_igl5c751e92008-05-06 08:44:21 +00002256
pbrook0f459d12008-06-09 00:20:13 +00002257 if (prot & PAGE_EXEC) {
2258 te->addr_code = code_address;
2259 } else {
2260 te->addr_code = -1;
2261 }
2262 if (prot & PAGE_WRITE) {
Avi Kivity06ef3522012-02-13 16:11:22 +02002263 if ((memory_region_is_ram(section.mr) && section.readonly)
2264 || is_romd(&section)) {
pbrook0f459d12008-06-09 00:20:13 +00002265 /* Write access calls the I/O callback. */
2266 te->addr_write = address | TLB_MMIO;
Avi Kivity06ef3522012-02-13 16:11:22 +02002267 } else if (memory_region_is_ram(section.mr)
2268 && !cpu_physical_memory_is_dirty(
2269 section.mr->ram_addr
2270 + section.offset_within_region)) {
pbrook0f459d12008-06-09 00:20:13 +00002271 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002272 } else {
pbrook0f459d12008-06-09 00:20:13 +00002273 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002274 }
pbrook0f459d12008-06-09 00:20:13 +00002275 } else {
2276 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002277 }
bellard9fa3e852004-01-04 18:06:42 +00002278}
2279
bellard01243112004-01-04 15:48:17 +00002280#else
2281
bellardee8b7022004-02-03 23:35:10 +00002282void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002283{
2284}
2285
bellard2e126692004-04-25 21:28:44 +00002286void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002287{
2288}
2289
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002290/*
2291 * Walks guest process memory "regions" one by one
2292 * and calls callback function 'fn' for each region.
2293 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002294
2295struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002296{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002297 walk_memory_regions_fn fn;
2298 void *priv;
2299 unsigned long start;
2300 int prot;
2301};
bellard9fa3e852004-01-04 18:06:42 +00002302
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002303static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002304 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002305{
2306 if (data->start != -1ul) {
2307 int rc = data->fn(data->priv, data->start, end, data->prot);
2308 if (rc != 0) {
2309 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002310 }
bellard33417e72003-08-10 21:47:01 +00002311 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002312
2313 data->start = (new_prot ? end : -1ul);
2314 data->prot = new_prot;
2315
2316 return 0;
2317}
2318
2319static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002320 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002321{
Paul Brookb480d9b2010-03-12 23:23:29 +00002322 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002323 int i, rc;
2324
2325 if (*lp == NULL) {
2326 return walk_memory_regions_end(data, base, 0);
2327 }
2328
2329 if (level == 0) {
2330 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002331 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002332 int prot = pd[i].flags;
2333
2334 pa = base | (i << TARGET_PAGE_BITS);
2335 if (prot != data->prot) {
2336 rc = walk_memory_regions_end(data, pa, prot);
2337 if (rc != 0) {
2338 return rc;
2339 }
2340 }
2341 }
2342 } else {
2343 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002344 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002345 pa = base | ((abi_ulong)i <<
2346 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002347 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2348 if (rc != 0) {
2349 return rc;
2350 }
2351 }
2352 }
2353
2354 return 0;
2355}
2356
2357int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2358{
2359 struct walk_memory_regions_data data;
2360 unsigned long i;
2361
2362 data.fn = fn;
2363 data.priv = priv;
2364 data.start = -1ul;
2365 data.prot = 0;
2366
2367 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002368 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002369 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2370 if (rc != 0) {
2371 return rc;
2372 }
2373 }
2374
2375 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002376}
2377
Paul Brookb480d9b2010-03-12 23:23:29 +00002378static int dump_region(void *priv, abi_ulong start,
2379 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002380{
2381 FILE *f = (FILE *)priv;
2382
Paul Brookb480d9b2010-03-12 23:23:29 +00002383 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2384 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002385 start, end, end - start,
2386 ((prot & PAGE_READ) ? 'r' : '-'),
2387 ((prot & PAGE_WRITE) ? 'w' : '-'),
2388 ((prot & PAGE_EXEC) ? 'x' : '-'));
2389
2390 return (0);
2391}
2392
2393/* dump memory mappings */
2394void page_dump(FILE *f)
2395{
2396 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2397 "start", "end", "size", "prot");
2398 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002399}
2400
pbrook53a59602006-03-25 19:31:22 +00002401int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002402{
bellard9fa3e852004-01-04 18:06:42 +00002403 PageDesc *p;
2404
2405 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002406 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002407 return 0;
2408 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002409}
2410
Richard Henderson376a7902010-03-10 15:57:04 -08002411/* Modify the flags of a page and invalidate the code if necessary.
2412 The flag PAGE_WRITE_ORG is positioned automatically depending
2413 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002414void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002415{
Richard Henderson376a7902010-03-10 15:57:04 -08002416 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002417
Richard Henderson376a7902010-03-10 15:57:04 -08002418 /* This function should never be called with addresses outside the
2419 guest address space. If this assert fires, it probably indicates
2420 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002421#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2422 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002423#endif
2424 assert(start < end);
2425
bellard9fa3e852004-01-04 18:06:42 +00002426 start = start & TARGET_PAGE_MASK;
2427 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002428
2429 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002430 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002431 }
2432
2433 for (addr = start, len = end - start;
2434 len != 0;
2435 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2436 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2437
2438 /* If the write protection bit is set, then we invalidate
2439 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002440 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002441 (flags & PAGE_WRITE) &&
2442 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002443 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002444 }
2445 p->flags = flags;
2446 }
bellard9fa3e852004-01-04 18:06:42 +00002447}
2448
ths3d97b402007-11-02 19:02:07 +00002449int page_check_range(target_ulong start, target_ulong len, int flags)
2450{
2451 PageDesc *p;
2452 target_ulong end;
2453 target_ulong addr;
2454
Richard Henderson376a7902010-03-10 15:57:04 -08002455 /* This function should never be called with addresses outside the
2456 guest address space. If this assert fires, it probably indicates
2457 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002458#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2459 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002460#endif
2461
Richard Henderson3e0650a2010-03-29 10:54:42 -07002462 if (len == 0) {
2463 return 0;
2464 }
Richard Henderson376a7902010-03-10 15:57:04 -08002465 if (start + len - 1 < start) {
2466 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002467 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002468 }
balrog55f280c2008-10-28 10:24:11 +00002469
ths3d97b402007-11-02 19:02:07 +00002470 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2471 start = start & TARGET_PAGE_MASK;
2472
Richard Henderson376a7902010-03-10 15:57:04 -08002473 for (addr = start, len = end - start;
2474 len != 0;
2475 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002476 p = page_find(addr >> TARGET_PAGE_BITS);
2477 if( !p )
2478 return -1;
2479 if( !(p->flags & PAGE_VALID) )
2480 return -1;
2481
bellarddae32702007-11-14 10:51:00 +00002482 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002483 return -1;
bellarddae32702007-11-14 10:51:00 +00002484 if (flags & PAGE_WRITE) {
2485 if (!(p->flags & PAGE_WRITE_ORG))
2486 return -1;
2487 /* unprotect the page if it was put read-only because it
2488 contains translated code */
2489 if (!(p->flags & PAGE_WRITE)) {
2490 if (!page_unprotect(addr, 0, NULL))
2491 return -1;
2492 }
2493 return 0;
2494 }
ths3d97b402007-11-02 19:02:07 +00002495 }
2496 return 0;
2497}
2498
bellard9fa3e852004-01-04 18:06:42 +00002499/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002500 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002501int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002502{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002503 unsigned int prot;
2504 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002505 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002506
pbrookc8a706f2008-06-02 16:16:42 +00002507 /* Technically this isn't safe inside a signal handler. However we
2508 know this only ever happens in a synchronous SEGV handler, so in
2509 practice it seems to be ok. */
2510 mmap_lock();
2511
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002512 p = page_find(address >> TARGET_PAGE_BITS);
2513 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002514 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002515 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002516 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002517
bellard9fa3e852004-01-04 18:06:42 +00002518 /* if the page was really writable, then we change its
2519 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002520 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2521 host_start = address & qemu_host_page_mask;
2522 host_end = host_start + qemu_host_page_size;
2523
2524 prot = 0;
2525 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2526 p = page_find(addr >> TARGET_PAGE_BITS);
2527 p->flags |= PAGE_WRITE;
2528 prot |= p->flags;
2529
bellard9fa3e852004-01-04 18:06:42 +00002530 /* and since the content will be modified, we must invalidate
2531 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002532 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002533#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002534 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002535#endif
bellard9fa3e852004-01-04 18:06:42 +00002536 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002537 mprotect((void *)g2h(host_start), qemu_host_page_size,
2538 prot & PAGE_BITS);
2539
2540 mmap_unlock();
2541 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002542 }
pbrookc8a706f2008-06-02 16:16:42 +00002543 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002544 return 0;
2545}
2546
bellard6a00d602005-11-21 23:25:50 +00002547static inline void tlb_set_dirty(CPUState *env,
2548 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002549{
2550}
bellard9fa3e852004-01-04 18:06:42 +00002551#endif /* defined(CONFIG_USER_ONLY) */
2552
pbrooke2eef172008-06-08 01:09:01 +00002553#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002554
Paul Brookc04b2b72010-03-01 03:31:14 +00002555#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2556typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002557 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002558 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002559 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002560} subpage_t;
2561
Anthony Liguoric227f092009-10-01 16:12:16 -05002562static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002563 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002564static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002565static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002566{
Avi Kivity5312bd82012-02-12 18:32:55 +02002567 MemoryRegionSection *section = &phys_sections[section_index];
2568 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002569
2570 if (mr->subpage) {
2571 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2572 memory_region_destroy(&subpage->iomem);
2573 g_free(subpage);
2574 }
2575}
2576
Avi Kivity4346ae32012-02-10 17:00:01 +02002577static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002578{
2579 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002580 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002581
Avi Kivityc19e8802012-02-13 20:25:31 +02002582 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002583 return;
2584 }
2585
Avi Kivityc19e8802012-02-13 20:25:31 +02002586 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002587 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002588 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002589 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002590 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002591 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002592 }
Avi Kivity54688b12012-02-09 17:34:32 +02002593 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002594 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002595 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002596}
2597
2598static void destroy_all_mappings(void)
2599{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002600 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002601 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002602}
2603
Avi Kivity5312bd82012-02-12 18:32:55 +02002604static uint16_t phys_section_add(MemoryRegionSection *section)
2605{
2606 if (phys_sections_nb == phys_sections_nb_alloc) {
2607 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2608 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2609 phys_sections_nb_alloc);
2610 }
2611 phys_sections[phys_sections_nb] = *section;
2612 return phys_sections_nb++;
2613}
2614
2615static void phys_sections_clear(void)
2616{
2617 phys_sections_nb = 0;
2618}
2619
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002620/* register physical memory.
2621 For RAM, 'size' must be a multiple of the target page size.
2622 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002623 io memory page. The address used when calling the IO function is
2624 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002625 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002626 before calculating this offset. This should not be a problem unless
2627 the low bits of start_addr and region_offset differ. */
Avi Kivity0f0cb162012-02-13 17:14:32 +02002628static void register_subpage(MemoryRegionSection *section)
2629{
2630 subpage_t *subpage;
2631 target_phys_addr_t base = section->offset_within_address_space
2632 & TARGET_PAGE_MASK;
2633 MemoryRegionSection existing = phys_page_find(base >> TARGET_PAGE_BITS);
2634 MemoryRegionSection subsection = {
2635 .offset_within_address_space = base,
2636 .size = TARGET_PAGE_SIZE,
2637 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002638 target_phys_addr_t start, end;
2639
2640 assert(existing.mr->subpage || existing.mr == &io_mem_unassigned);
2641
2642 if (!(existing.mr->subpage)) {
2643 subpage = subpage_init(base);
2644 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002645 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2646 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002647 } else {
2648 subpage = container_of(existing.mr, subpage_t, iomem);
2649 }
2650 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2651 end = start + section->size;
2652 subpage_register(subpage, start, end, phys_section_add(section));
2653}
2654
2655
2656static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002657{
Avi Kivitydd811242012-01-02 12:17:03 +02002658 target_phys_addr_t start_addr = section->offset_within_address_space;
2659 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002660 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002661 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002662
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002663 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002664
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002665 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002666 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2667 section_index);
bellard33417e72003-08-10 21:47:01 +00002668}
2669
Avi Kivity0f0cb162012-02-13 17:14:32 +02002670void cpu_register_physical_memory_log(MemoryRegionSection *section,
2671 bool readonly)
2672{
2673 MemoryRegionSection now = *section, remain = *section;
2674
2675 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2676 || (now.size < TARGET_PAGE_SIZE)) {
2677 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2678 - now.offset_within_address_space,
2679 now.size);
2680 register_subpage(&now);
2681 remain.size -= now.size;
2682 remain.offset_within_address_space += now.size;
2683 remain.offset_within_region += now.size;
2684 }
2685 now = remain;
2686 now.size &= TARGET_PAGE_MASK;
2687 if (now.size) {
2688 register_multipage(&now);
2689 remain.size -= now.size;
2690 remain.offset_within_address_space += now.size;
2691 remain.offset_within_region += now.size;
2692 }
2693 now = remain;
2694 if (now.size) {
2695 register_subpage(&now);
2696 }
2697}
2698
2699
Anthony Liguoric227f092009-10-01 16:12:16 -05002700void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002701{
2702 if (kvm_enabled())
2703 kvm_coalesce_mmio_region(addr, size);
2704}
2705
Anthony Liguoric227f092009-10-01 16:12:16 -05002706void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002707{
2708 if (kvm_enabled())
2709 kvm_uncoalesce_mmio_region(addr, size);
2710}
2711
Sheng Yang62a27442010-01-26 19:21:16 +08002712void qemu_flush_coalesced_mmio_buffer(void)
2713{
2714 if (kvm_enabled())
2715 kvm_flush_coalesced_mmio_buffer();
2716}
2717
Marcelo Tosattic9027602010-03-01 20:25:08 -03002718#if defined(__linux__) && !defined(TARGET_S390X)
2719
2720#include <sys/vfs.h>
2721
2722#define HUGETLBFS_MAGIC 0x958458f6
2723
2724static long gethugepagesize(const char *path)
2725{
2726 struct statfs fs;
2727 int ret;
2728
2729 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002730 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002731 } while (ret != 0 && errno == EINTR);
2732
2733 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002734 perror(path);
2735 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002736 }
2737
2738 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002739 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002740
2741 return fs.f_bsize;
2742}
2743
Alex Williamson04b16652010-07-02 11:13:17 -06002744static void *file_ram_alloc(RAMBlock *block,
2745 ram_addr_t memory,
2746 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002747{
2748 char *filename;
2749 void *area;
2750 int fd;
2751#ifdef MAP_POPULATE
2752 int flags;
2753#endif
2754 unsigned long hpagesize;
2755
2756 hpagesize = gethugepagesize(path);
2757 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002758 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002759 }
2760
2761 if (memory < hpagesize) {
2762 return NULL;
2763 }
2764
2765 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2766 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2767 return NULL;
2768 }
2769
2770 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002771 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002772 }
2773
2774 fd = mkstemp(filename);
2775 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002776 perror("unable to create backing store for hugepages");
2777 free(filename);
2778 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002779 }
2780 unlink(filename);
2781 free(filename);
2782
2783 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2784
2785 /*
2786 * ftruncate is not supported by hugetlbfs in older
2787 * hosts, so don't bother bailing out on errors.
2788 * If anything goes wrong with it under other filesystems,
2789 * mmap will fail.
2790 */
2791 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002792 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002793
2794#ifdef MAP_POPULATE
2795 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2796 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2797 * to sidestep this quirk.
2798 */
2799 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2800 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2801#else
2802 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2803#endif
2804 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002805 perror("file_ram_alloc: can't mmap RAM pages");
2806 close(fd);
2807 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002808 }
Alex Williamson04b16652010-07-02 11:13:17 -06002809 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002810 return area;
2811}
2812#endif
2813
Alex Williamsond17b5282010-06-25 11:08:38 -06002814static ram_addr_t find_ram_offset(ram_addr_t size)
2815{
Alex Williamson04b16652010-07-02 11:13:17 -06002816 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002817 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002818
2819 if (QLIST_EMPTY(&ram_list.blocks))
2820 return 0;
2821
2822 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002823 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002824
2825 end = block->offset + block->length;
2826
2827 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2828 if (next_block->offset >= end) {
2829 next = MIN(next, next_block->offset);
2830 }
2831 }
2832 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002833 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002834 mingap = next - end;
2835 }
2836 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002837
2838 if (offset == RAM_ADDR_MAX) {
2839 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2840 (uint64_t)size);
2841 abort();
2842 }
2843
Alex Williamson04b16652010-07-02 11:13:17 -06002844 return offset;
2845}
2846
2847static ram_addr_t last_ram_offset(void)
2848{
Alex Williamsond17b5282010-06-25 11:08:38 -06002849 RAMBlock *block;
2850 ram_addr_t last = 0;
2851
2852 QLIST_FOREACH(block, &ram_list.blocks, next)
2853 last = MAX(last, block->offset + block->length);
2854
2855 return last;
2856}
2857
Avi Kivityc5705a72011-12-20 15:59:12 +02002858void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002859{
2860 RAMBlock *new_block, *block;
2861
Avi Kivityc5705a72011-12-20 15:59:12 +02002862 new_block = NULL;
2863 QLIST_FOREACH(block, &ram_list.blocks, next) {
2864 if (block->offset == addr) {
2865 new_block = block;
2866 break;
2867 }
2868 }
2869 assert(new_block);
2870 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002871
2872 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2873 char *id = dev->parent_bus->info->get_dev_path(dev);
2874 if (id) {
2875 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002876 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002877 }
2878 }
2879 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2880
2881 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002882 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002883 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2884 new_block->idstr);
2885 abort();
2886 }
2887 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002888}
2889
2890ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2891 MemoryRegion *mr)
2892{
2893 RAMBlock *new_block;
2894
2895 size = TARGET_PAGE_ALIGN(size);
2896 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002897
Avi Kivity7c637362011-12-21 13:09:49 +02002898 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002899 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002900 if (host) {
2901 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002902 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002903 } else {
2904 if (mem_path) {
2905#if defined (__linux__) && !defined(TARGET_S390X)
2906 new_block->host = file_ram_alloc(new_block, size, mem_path);
2907 if (!new_block->host) {
2908 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002909 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002910 }
2911#else
2912 fprintf(stderr, "-mem-path option unsupported\n");
2913 exit(1);
2914#endif
2915 } else {
2916#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002917 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2918 an system defined value, which is at least 256GB. Larger systems
2919 have larger values. We put the guest between the end of data
2920 segment (system break) and this value. We use 32GB as a base to
2921 have enough room for the system break to grow. */
2922 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002923 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002924 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002925 if (new_block->host == MAP_FAILED) {
2926 fprintf(stderr, "Allocating RAM failed\n");
2927 abort();
2928 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002929#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002930 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002931 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002932 } else {
2933 new_block->host = qemu_vmalloc(size);
2934 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002935#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002936 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002937 }
2938 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002939 new_block->length = size;
2940
2941 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2942
Anthony Liguori7267c092011-08-20 22:09:37 -05002943 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002944 last_ram_offset() >> TARGET_PAGE_BITS);
2945 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2946 0xff, size >> TARGET_PAGE_BITS);
2947
2948 if (kvm_enabled())
2949 kvm_setup_guest_memory(new_block->host, size);
2950
2951 return new_block->offset;
2952}
2953
Avi Kivityc5705a72011-12-20 15:59:12 +02002954ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002955{
Avi Kivityc5705a72011-12-20 15:59:12 +02002956 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002957}
bellarde9a1ab12007-02-08 23:08:38 +00002958
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002959void qemu_ram_free_from_ptr(ram_addr_t addr)
2960{
2961 RAMBlock *block;
2962
2963 QLIST_FOREACH(block, &ram_list.blocks, next) {
2964 if (addr == block->offset) {
2965 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002966 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002967 return;
2968 }
2969 }
2970}
2971
Anthony Liguoric227f092009-10-01 16:12:16 -05002972void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002973{
Alex Williamson04b16652010-07-02 11:13:17 -06002974 RAMBlock *block;
2975
2976 QLIST_FOREACH(block, &ram_list.blocks, next) {
2977 if (addr == block->offset) {
2978 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002979 if (block->flags & RAM_PREALLOC_MASK) {
2980 ;
2981 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002982#if defined (__linux__) && !defined(TARGET_S390X)
2983 if (block->fd) {
2984 munmap(block->host, block->length);
2985 close(block->fd);
2986 } else {
2987 qemu_vfree(block->host);
2988 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002989#else
2990 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002991#endif
2992 } else {
2993#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2994 munmap(block->host, block->length);
2995#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002996 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002997 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002998 } else {
2999 qemu_vfree(block->host);
3000 }
Alex Williamson04b16652010-07-02 11:13:17 -06003001#endif
3002 }
Anthony Liguori7267c092011-08-20 22:09:37 -05003003 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06003004 return;
3005 }
3006 }
3007
bellarde9a1ab12007-02-08 23:08:38 +00003008}
3009
Huang Yingcd19cfa2011-03-02 08:56:19 +01003010#ifndef _WIN32
3011void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3012{
3013 RAMBlock *block;
3014 ram_addr_t offset;
3015 int flags;
3016 void *area, *vaddr;
3017
3018 QLIST_FOREACH(block, &ram_list.blocks, next) {
3019 offset = addr - block->offset;
3020 if (offset < block->length) {
3021 vaddr = block->host + offset;
3022 if (block->flags & RAM_PREALLOC_MASK) {
3023 ;
3024 } else {
3025 flags = MAP_FIXED;
3026 munmap(vaddr, length);
3027 if (mem_path) {
3028#if defined(__linux__) && !defined(TARGET_S390X)
3029 if (block->fd) {
3030#ifdef MAP_POPULATE
3031 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3032 MAP_PRIVATE;
3033#else
3034 flags |= MAP_PRIVATE;
3035#endif
3036 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3037 flags, block->fd, offset);
3038 } else {
3039 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3040 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3041 flags, -1, 0);
3042 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003043#else
3044 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003045#endif
3046 } else {
3047#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3048 flags |= MAP_SHARED | MAP_ANONYMOUS;
3049 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3050 flags, -1, 0);
3051#else
3052 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3053 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3054 flags, -1, 0);
3055#endif
3056 }
3057 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003058 fprintf(stderr, "Could not remap addr: "
3059 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003060 length, addr);
3061 exit(1);
3062 }
3063 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3064 }
3065 return;
3066 }
3067 }
3068}
3069#endif /* !_WIN32 */
3070
pbrookdc828ca2009-04-09 22:21:07 +00003071/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003072 With the exception of the softmmu code in this file, this should
3073 only be used for local memory (e.g. video ram) that the device owns,
3074 and knows it isn't going to access beyond the end of the block.
3075
3076 It should not be used for general purpose DMA.
3077 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3078 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003079void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003080{
pbrook94a6b542009-04-11 17:15:54 +00003081 RAMBlock *block;
3082
Alex Williamsonf471a172010-06-11 11:11:42 -06003083 QLIST_FOREACH(block, &ram_list.blocks, next) {
3084 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003085 /* Move this entry to to start of the list. */
3086 if (block != QLIST_FIRST(&ram_list.blocks)) {
3087 QLIST_REMOVE(block, next);
3088 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3089 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003090 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003091 /* We need to check if the requested address is in the RAM
3092 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003093 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003094 */
3095 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003096 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003097 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003098 block->host =
3099 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003100 }
3101 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003102 return block->host + (addr - block->offset);
3103 }
pbrook94a6b542009-04-11 17:15:54 +00003104 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003105
3106 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3107 abort();
3108
3109 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003110}
3111
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003112/* Return a host pointer to ram allocated with qemu_ram_alloc.
3113 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3114 */
3115void *qemu_safe_ram_ptr(ram_addr_t addr)
3116{
3117 RAMBlock *block;
3118
3119 QLIST_FOREACH(block, &ram_list.blocks, next) {
3120 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003121 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003122 /* We need to check if the requested address is in the RAM
3123 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003124 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003125 */
3126 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003127 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003128 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003129 block->host =
3130 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003131 }
3132 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003133 return block->host + (addr - block->offset);
3134 }
3135 }
3136
3137 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3138 abort();
3139
3140 return NULL;
3141}
3142
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003143/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3144 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003145void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003146{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003147 if (*size == 0) {
3148 return NULL;
3149 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003150 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003151 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003152 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003153 RAMBlock *block;
3154
3155 QLIST_FOREACH(block, &ram_list.blocks, next) {
3156 if (addr - block->offset < block->length) {
3157 if (addr - block->offset + *size > block->length)
3158 *size = block->length - addr + block->offset;
3159 return block->host + (addr - block->offset);
3160 }
3161 }
3162
3163 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3164 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003165 }
3166}
3167
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003168void qemu_put_ram_ptr(void *addr)
3169{
3170 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003171}
3172
Marcelo Tosattie8902612010-10-11 15:31:19 -03003173int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003174{
pbrook94a6b542009-04-11 17:15:54 +00003175 RAMBlock *block;
3176 uint8_t *host = ptr;
3177
Jan Kiszka868bb332011-06-21 22:59:09 +02003178 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003179 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003180 return 0;
3181 }
3182
Alex Williamsonf471a172010-06-11 11:11:42 -06003183 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003184 /* This case append when the block is not mapped. */
3185 if (block->host == NULL) {
3186 continue;
3187 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003188 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003189 *ram_addr = block->offset + (host - block->host);
3190 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003191 }
pbrook94a6b542009-04-11 17:15:54 +00003192 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003193
Marcelo Tosattie8902612010-10-11 15:31:19 -03003194 return -1;
3195}
Alex Williamsonf471a172010-06-11 11:11:42 -06003196
Marcelo Tosattie8902612010-10-11 15:31:19 -03003197/* Some of the softmmu routines need to translate from a host pointer
3198 (typically a TLB entry) back to a ram offset. */
3199ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3200{
3201 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003202
Marcelo Tosattie8902612010-10-11 15:31:19 -03003203 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3204 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3205 abort();
3206 }
3207 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003208}
3209
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003210static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3211 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003212{
pbrook67d3b952006-12-18 05:03:52 +00003213#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003214 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003215#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003216#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003217 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003218#endif
3219 return 0;
3220}
3221
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003222static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3223 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003224{
3225#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003226 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003227#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003228#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003229 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003230#endif
3231}
3232
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003233static const MemoryRegionOps unassigned_mem_ops = {
3234 .read = unassigned_mem_read,
3235 .write = unassigned_mem_write,
3236 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003237};
3238
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003239static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3240 unsigned size)
3241{
3242 abort();
3243}
3244
3245static void error_mem_write(void *opaque, target_phys_addr_t addr,
3246 uint64_t value, unsigned size)
3247{
3248 abort();
3249}
3250
3251static const MemoryRegionOps error_mem_ops = {
3252 .read = error_mem_read,
3253 .write = error_mem_write,
3254 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003255};
3256
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003257static const MemoryRegionOps rom_mem_ops = {
3258 .read = error_mem_read,
3259 .write = unassigned_mem_write,
3260 .endianness = DEVICE_NATIVE_ENDIAN,
3261};
3262
3263static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3264 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003265{
bellard3a7d9292005-08-21 09:26:42 +00003266 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003267 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003268 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3269#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003270 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003271 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003272#endif
3273 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003274 switch (size) {
3275 case 1:
3276 stb_p(qemu_get_ram_ptr(ram_addr), val);
3277 break;
3278 case 2:
3279 stw_p(qemu_get_ram_ptr(ram_addr), val);
3280 break;
3281 case 4:
3282 stl_p(qemu_get_ram_ptr(ram_addr), val);
3283 break;
3284 default:
3285 abort();
3286 }
bellardf23db162005-08-21 19:12:28 +00003287 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003288 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003289 /* we remove the notdirty callback only if the code has been
3290 flushed */
3291 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003292 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003293}
3294
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003295static const MemoryRegionOps notdirty_mem_ops = {
3296 .read = error_mem_read,
3297 .write = notdirty_mem_write,
3298 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003299};
3300
pbrook0f459d12008-06-09 00:20:13 +00003301/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003302static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003303{
3304 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003305 target_ulong pc, cs_base;
3306 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003307 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003308 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003309 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003310
aliguori06d55cc2008-11-18 20:24:06 +00003311 if (env->watchpoint_hit) {
3312 /* We re-entered the check after replacing the TB. Now raise
3313 * the debug interrupt so that is will trigger after the
3314 * current instruction. */
3315 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3316 return;
3317 }
pbrook2e70f6e2008-06-29 01:03:05 +00003318 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003319 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003320 if ((vaddr == (wp->vaddr & len_mask) ||
3321 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003322 wp->flags |= BP_WATCHPOINT_HIT;
3323 if (!env->watchpoint_hit) {
3324 env->watchpoint_hit = wp;
3325 tb = tb_find_pc(env->mem_io_pc);
3326 if (!tb) {
3327 cpu_abort(env, "check_watchpoint: could not find TB for "
3328 "pc=%p", (void *)env->mem_io_pc);
3329 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003330 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003331 tb_phys_invalidate(tb, -1);
3332 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3333 env->exception_index = EXCP_DEBUG;
3334 } else {
3335 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3336 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3337 }
3338 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003339 }
aliguori6e140f22008-11-18 20:37:55 +00003340 } else {
3341 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003342 }
3343 }
3344}
3345
pbrook6658ffb2007-03-16 23:58:11 +00003346/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3347 so these check for a hit then pass through to the normal out-of-line
3348 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003349static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3350 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003351{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003352 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3353 switch (size) {
3354 case 1: return ldub_phys(addr);
3355 case 2: return lduw_phys(addr);
3356 case 4: return ldl_phys(addr);
3357 default: abort();
3358 }
pbrook6658ffb2007-03-16 23:58:11 +00003359}
3360
Avi Kivity1ec9b902012-01-02 12:47:48 +02003361static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3362 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003363{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003364 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3365 switch (size) {
3366 case 1: stb_phys(addr, val);
3367 case 2: stw_phys(addr, val);
3368 case 4: stl_phys(addr, val);
3369 default: abort();
3370 }
pbrook6658ffb2007-03-16 23:58:11 +00003371}
3372
Avi Kivity1ec9b902012-01-02 12:47:48 +02003373static const MemoryRegionOps watch_mem_ops = {
3374 .read = watch_mem_read,
3375 .write = watch_mem_write,
3376 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003377};
pbrook6658ffb2007-03-16 23:58:11 +00003378
Avi Kivity70c68e42012-01-02 12:32:48 +02003379static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3380 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003381{
Avi Kivity70c68e42012-01-02 12:32:48 +02003382 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003383 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003384 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003385#if defined(DEBUG_SUBPAGE)
3386 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3387 mmio, len, addr, idx);
3388#endif
blueswir1db7b5422007-05-26 17:36:03 +00003389
Avi Kivity5312bd82012-02-12 18:32:55 +02003390 section = &phys_sections[mmio->sub_section[idx]];
3391 addr += mmio->base;
3392 addr -= section->offset_within_address_space;
3393 addr += section->offset_within_region;
3394 return io_mem_read(section->mr->ram_addr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003395}
3396
Avi Kivity70c68e42012-01-02 12:32:48 +02003397static void subpage_write(void *opaque, target_phys_addr_t addr,
3398 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003399{
Avi Kivity70c68e42012-01-02 12:32:48 +02003400 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003401 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003402 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003403#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003404 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3405 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003406 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003407#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003408
Avi Kivity5312bd82012-02-12 18:32:55 +02003409 section = &phys_sections[mmio->sub_section[idx]];
3410 addr += mmio->base;
3411 addr -= section->offset_within_address_space;
3412 addr += section->offset_within_region;
3413 io_mem_write(section->mr->ram_addr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003414}
3415
Avi Kivity70c68e42012-01-02 12:32:48 +02003416static const MemoryRegionOps subpage_ops = {
3417 .read = subpage_read,
3418 .write = subpage_write,
3419 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003420};
3421
Avi Kivityde712f92012-01-02 12:41:07 +02003422static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3423 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003424{
3425 ram_addr_t raddr = addr;
3426 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003427 switch (size) {
3428 case 1: return ldub_p(ptr);
3429 case 2: return lduw_p(ptr);
3430 case 4: return ldl_p(ptr);
3431 default: abort();
3432 }
Andreas Färber56384e82011-11-30 16:26:21 +01003433}
3434
Avi Kivityde712f92012-01-02 12:41:07 +02003435static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3436 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003437{
3438 ram_addr_t raddr = addr;
3439 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003440 switch (size) {
3441 case 1: return stb_p(ptr, value);
3442 case 2: return stw_p(ptr, value);
3443 case 4: return stl_p(ptr, value);
3444 default: abort();
3445 }
Andreas Färber56384e82011-11-30 16:26:21 +01003446}
3447
Avi Kivityde712f92012-01-02 12:41:07 +02003448static const MemoryRegionOps subpage_ram_ops = {
3449 .read = subpage_ram_read,
3450 .write = subpage_ram_write,
3451 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003452};
3453
Anthony Liguoric227f092009-10-01 16:12:16 -05003454static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003455 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003456{
3457 int idx, eidx;
3458
3459 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3460 return -1;
3461 idx = SUBPAGE_IDX(start);
3462 eidx = SUBPAGE_IDX(end);
3463#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003464 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003465 mmio, start, end, idx, eidx, memory);
3466#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003467 if (memory_region_is_ram(phys_sections[section].mr)) {
3468 MemoryRegionSection new_section = phys_sections[section];
3469 new_section.mr = &io_mem_subpage_ram;
3470 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003471 }
blueswir1db7b5422007-05-26 17:36:03 +00003472 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003473 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003474 }
3475
3476 return 0;
3477}
3478
Avi Kivity0f0cb162012-02-13 17:14:32 +02003479static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003480{
Anthony Liguoric227f092009-10-01 16:12:16 -05003481 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003482
Anthony Liguori7267c092011-08-20 22:09:37 -05003483 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003484
3485 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003486 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3487 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003488 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003489#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003490 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3491 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003492#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003493 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003494
3495 return mmio;
3496}
3497
aliguori88715652009-02-11 15:20:58 +00003498static int get_free_io_mem_idx(void)
3499{
3500 int i;
3501
3502 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3503 if (!io_mem_used[i]) {
3504 io_mem_used[i] = 1;
3505 return i;
3506 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003507 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003508 return -1;
3509}
3510
bellard33417e72003-08-10 21:47:01 +00003511/* mem_read and mem_write are arrays of functions containing the
3512 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003513 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003514 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003515 modified. If it is zero, a new io zone is allocated. The return
3516 value can be used with cpu_register_physical_memory(). (-1) is
3517 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003518static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003519{
bellard33417e72003-08-10 21:47:01 +00003520 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003521 io_index = get_free_io_mem_idx();
3522 if (io_index == -1)
3523 return io_index;
bellard33417e72003-08-10 21:47:01 +00003524 } else {
3525 if (io_index >= IO_MEM_NB_ENTRIES)
3526 return -1;
3527 }
bellardb5ff1b32005-11-26 10:38:39 +00003528
Avi Kivitya621f382012-01-02 13:12:08 +02003529 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003530
Avi Kivity11c7ef02012-01-02 17:21:07 +02003531 return io_index;
bellard33417e72003-08-10 21:47:01 +00003532}
bellard61382a52003-10-27 21:22:23 +00003533
Avi Kivitya621f382012-01-02 13:12:08 +02003534int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003535{
Avi Kivitya621f382012-01-02 13:12:08 +02003536 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003537}
3538
Avi Kivity11c7ef02012-01-02 17:21:07 +02003539void cpu_unregister_io_memory(int io_index)
aliguori88715652009-02-11 15:20:58 +00003540{
Avi Kivitya621f382012-01-02 13:12:08 +02003541 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003542 io_mem_used[io_index] = 0;
3543}
3544
Avi Kivity5312bd82012-02-12 18:32:55 +02003545static uint16_t dummy_section(MemoryRegion *mr)
3546{
3547 MemoryRegionSection section = {
3548 .mr = mr,
3549 .offset_within_address_space = 0,
3550 .offset_within_region = 0,
3551 .size = UINT64_MAX,
3552 };
3553
3554 return phys_section_add(&section);
3555}
3556
Avi Kivitye9179ce2009-06-14 11:38:52 +03003557static void io_mem_init(void)
3558{
3559 int i;
3560
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003561 /* Must be first: */
3562 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3563 assert(io_mem_ram.ram_addr == 0);
3564 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3565 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3566 "unassigned", UINT64_MAX);
3567 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3568 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003569 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3570 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003571 for (i=0; i<5; i++)
3572 io_mem_used[i] = 1;
3573
Avi Kivity1ec9b902012-01-02 12:47:48 +02003574 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3575 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003576}
3577
Avi Kivity50c1e142012-02-08 21:36:02 +02003578static void core_begin(MemoryListener *listener)
3579{
Avi Kivity54688b12012-02-09 17:34:32 +02003580 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003581 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003582 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003583 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivity50c1e142012-02-08 21:36:02 +02003584}
3585
3586static void core_commit(MemoryListener *listener)
3587{
Avi Kivity117712c2012-02-12 21:23:17 +02003588 CPUState *env;
3589
3590 /* since each CPU stores ram addresses in its TLB cache, we must
3591 reset the modified entries */
3592 /* XXX: slow ! */
3593 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3594 tlb_flush(env, 1);
3595 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003596}
3597
Avi Kivity93632742012-02-08 16:54:16 +02003598static void core_region_add(MemoryListener *listener,
3599 MemoryRegionSection *section)
3600{
Avi Kivity4855d412012-02-08 21:16:05 +02003601 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003602}
3603
3604static void core_region_del(MemoryListener *listener,
3605 MemoryRegionSection *section)
3606{
Avi Kivity93632742012-02-08 16:54:16 +02003607}
3608
Avi Kivity50c1e142012-02-08 21:36:02 +02003609static void core_region_nop(MemoryListener *listener,
3610 MemoryRegionSection *section)
3611{
Avi Kivity54688b12012-02-09 17:34:32 +02003612 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003613}
3614
Avi Kivity93632742012-02-08 16:54:16 +02003615static void core_log_start(MemoryListener *listener,
3616 MemoryRegionSection *section)
3617{
3618}
3619
3620static void core_log_stop(MemoryListener *listener,
3621 MemoryRegionSection *section)
3622{
3623}
3624
3625static void core_log_sync(MemoryListener *listener,
3626 MemoryRegionSection *section)
3627{
3628}
3629
3630static void core_log_global_start(MemoryListener *listener)
3631{
3632 cpu_physical_memory_set_dirty_tracking(1);
3633}
3634
3635static void core_log_global_stop(MemoryListener *listener)
3636{
3637 cpu_physical_memory_set_dirty_tracking(0);
3638}
3639
3640static void core_eventfd_add(MemoryListener *listener,
3641 MemoryRegionSection *section,
3642 bool match_data, uint64_t data, int fd)
3643{
3644}
3645
3646static void core_eventfd_del(MemoryListener *listener,
3647 MemoryRegionSection *section,
3648 bool match_data, uint64_t data, int fd)
3649{
3650}
3651
Avi Kivity50c1e142012-02-08 21:36:02 +02003652static void io_begin(MemoryListener *listener)
3653{
3654}
3655
3656static void io_commit(MemoryListener *listener)
3657{
3658}
3659
Avi Kivity4855d412012-02-08 21:16:05 +02003660static void io_region_add(MemoryListener *listener,
3661 MemoryRegionSection *section)
3662{
3663 iorange_init(&section->mr->iorange, &memory_region_iorange_ops,
3664 section->offset_within_address_space, section->size);
3665 ioport_register(&section->mr->iorange);
3666}
3667
3668static void io_region_del(MemoryListener *listener,
3669 MemoryRegionSection *section)
3670{
3671 isa_unassign_ioport(section->offset_within_address_space, section->size);
3672}
3673
Avi Kivity50c1e142012-02-08 21:36:02 +02003674static void io_region_nop(MemoryListener *listener,
3675 MemoryRegionSection *section)
3676{
3677}
3678
Avi Kivity4855d412012-02-08 21:16:05 +02003679static void io_log_start(MemoryListener *listener,
3680 MemoryRegionSection *section)
3681{
3682}
3683
3684static void io_log_stop(MemoryListener *listener,
3685 MemoryRegionSection *section)
3686{
3687}
3688
3689static void io_log_sync(MemoryListener *listener,
3690 MemoryRegionSection *section)
3691{
3692}
3693
3694static void io_log_global_start(MemoryListener *listener)
3695{
3696}
3697
3698static void io_log_global_stop(MemoryListener *listener)
3699{
3700}
3701
3702static void io_eventfd_add(MemoryListener *listener,
3703 MemoryRegionSection *section,
3704 bool match_data, uint64_t data, int fd)
3705{
3706}
3707
3708static void io_eventfd_del(MemoryListener *listener,
3709 MemoryRegionSection *section,
3710 bool match_data, uint64_t data, int fd)
3711{
3712}
3713
Avi Kivity93632742012-02-08 16:54:16 +02003714static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003715 .begin = core_begin,
3716 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003717 .region_add = core_region_add,
3718 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003719 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003720 .log_start = core_log_start,
3721 .log_stop = core_log_stop,
3722 .log_sync = core_log_sync,
3723 .log_global_start = core_log_global_start,
3724 .log_global_stop = core_log_global_stop,
3725 .eventfd_add = core_eventfd_add,
3726 .eventfd_del = core_eventfd_del,
3727 .priority = 0,
3728};
3729
Avi Kivity4855d412012-02-08 21:16:05 +02003730static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003731 .begin = io_begin,
3732 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003733 .region_add = io_region_add,
3734 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003735 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003736 .log_start = io_log_start,
3737 .log_stop = io_log_stop,
3738 .log_sync = io_log_sync,
3739 .log_global_start = io_log_global_start,
3740 .log_global_stop = io_log_global_stop,
3741 .eventfd_add = io_eventfd_add,
3742 .eventfd_del = io_eventfd_del,
3743 .priority = 0,
3744};
3745
Avi Kivity62152b82011-07-26 14:26:14 +03003746static void memory_map_init(void)
3747{
Anthony Liguori7267c092011-08-20 22:09:37 -05003748 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003749 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003750 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003751
Anthony Liguori7267c092011-08-20 22:09:37 -05003752 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003753 memory_region_init(system_io, "io", 65536);
3754 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003755
Avi Kivity4855d412012-02-08 21:16:05 +02003756 memory_listener_register(&core_memory_listener, system_memory);
3757 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003758}
3759
3760MemoryRegion *get_system_memory(void)
3761{
3762 return system_memory;
3763}
3764
Avi Kivity309cb472011-08-08 16:09:03 +03003765MemoryRegion *get_system_io(void)
3766{
3767 return system_io;
3768}
3769
pbrooke2eef172008-06-08 01:09:01 +00003770#endif /* !defined(CONFIG_USER_ONLY) */
3771
bellard13eb76e2004-01-24 15:23:36 +00003772/* physical memory access (slow version, mainly for debug) */
3773#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003774int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3775 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003776{
3777 int l, flags;
3778 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003779 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003780
3781 while (len > 0) {
3782 page = addr & TARGET_PAGE_MASK;
3783 l = (page + TARGET_PAGE_SIZE) - addr;
3784 if (l > len)
3785 l = len;
3786 flags = page_get_flags(page);
3787 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003788 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003789 if (is_write) {
3790 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003791 return -1;
bellard579a97f2007-11-11 14:26:47 +00003792 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003793 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003794 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003795 memcpy(p, buf, l);
3796 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003797 } else {
3798 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003799 return -1;
bellard579a97f2007-11-11 14:26:47 +00003800 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003801 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003802 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003803 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003804 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003805 }
3806 len -= l;
3807 buf += l;
3808 addr += l;
3809 }
Paul Brooka68fe892010-03-01 00:08:59 +00003810 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003811}
bellard8df1cd02005-01-28 22:37:22 +00003812
bellard13eb76e2004-01-24 15:23:36 +00003813#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003814void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003815 int len, int is_write)
3816{
3817 int l, io_index;
3818 uint8_t *ptr;
3819 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003820 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003821 MemoryRegionSection section;
ths3b46e622007-09-17 08:09:54 +00003822
bellard13eb76e2004-01-24 15:23:36 +00003823 while (len > 0) {
3824 page = addr & TARGET_PAGE_MASK;
3825 l = (page + TARGET_PAGE_SIZE) - addr;
3826 if (l > len)
3827 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003828 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003829
bellard13eb76e2004-01-24 15:23:36 +00003830 if (is_write) {
Avi Kivity06ef3522012-02-13 16:11:22 +02003831 if (!memory_region_is_ram(section.mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003832 target_phys_addr_t addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003833 io_index = memory_region_get_ram_addr(section.mr)
3834 & (IO_MEM_NB_ENTRIES - 1);
3835 addr1 = (addr & ~TARGET_PAGE_MASK)
3836 + section.offset_within_region;
bellard6a00d602005-11-21 23:25:50 +00003837 /* XXX: could force cpu_single_env to NULL to avoid
3838 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003839 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003840 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003841 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003842 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003843 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003844 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003845 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003846 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003847 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003848 l = 2;
3849 } else {
bellard1c213d12005-09-03 10:49:04 +00003850 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003851 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003852 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003853 l = 1;
3854 }
Avi Kivity06ef3522012-02-13 16:11:22 +02003855 } else if (!section.readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003856 ram_addr_t addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003857 addr1 = (memory_region_get_ram_addr(section.mr)
3858 + section.offset_within_region)
3859 | (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003860 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003861 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003862 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003863 if (!cpu_physical_memory_is_dirty(addr1)) {
3864 /* invalidate code */
3865 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3866 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003867 cpu_physical_memory_set_dirty_flags(
3868 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003869 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003870 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003871 }
3872 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02003873 if (!is_ram_rom_romd(&section)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003874 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003875 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02003876 io_index = memory_region_get_ram_addr(section.mr)
3877 & (IO_MEM_NB_ENTRIES - 1);
3878 addr1 = (addr & ~TARGET_PAGE_MASK)
3879 + section.offset_within_region;
aurel326c2934d2009-02-18 21:37:17 +00003880 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003881 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003882 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003883 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003884 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003885 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003886 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003887 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003888 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003889 l = 2;
3890 } else {
bellard1c213d12005-09-03 10:49:04 +00003891 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003892 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003893 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003894 l = 1;
3895 }
3896 } else {
3897 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02003898 ptr = qemu_get_ram_ptr(section.mr->ram_addr
3899 + section.offset_within_region);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003900 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3901 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003902 }
3903 }
3904 len -= l;
3905 buf += l;
3906 addr += l;
3907 }
3908}
bellard8df1cd02005-01-28 22:37:22 +00003909
bellardd0ecd2a2006-04-23 17:14:48 +00003910/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003911void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003912 const uint8_t *buf, int len)
3913{
3914 int l;
3915 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003916 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02003917 MemoryRegionSection section;
ths3b46e622007-09-17 08:09:54 +00003918
bellardd0ecd2a2006-04-23 17:14:48 +00003919 while (len > 0) {
3920 page = addr & TARGET_PAGE_MASK;
3921 l = (page + TARGET_PAGE_SIZE) - addr;
3922 if (l > len)
3923 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003924 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003925
Avi Kivity06ef3522012-02-13 16:11:22 +02003926 if (!is_ram_rom_romd(&section)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003927 /* do nothing */
3928 } else {
3929 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02003930 addr1 = (memory_region_get_ram_addr(section.mr)
3931 + section.offset_within_region)
3932 + (addr & ~TARGET_PAGE_MASK);
bellardd0ecd2a2006-04-23 17:14:48 +00003933 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003934 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003935 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003936 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003937 }
3938 len -= l;
3939 buf += l;
3940 addr += l;
3941 }
3942}
3943
aliguori6d16c2f2009-01-22 16:59:11 +00003944typedef struct {
3945 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003946 target_phys_addr_t addr;
3947 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003948} BounceBuffer;
3949
3950static BounceBuffer bounce;
3951
aliguoriba223c22009-01-22 16:59:16 +00003952typedef struct MapClient {
3953 void *opaque;
3954 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003955 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003956} MapClient;
3957
Blue Swirl72cf2d42009-09-12 07:36:22 +00003958static QLIST_HEAD(map_client_list, MapClient) map_client_list
3959 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003960
3961void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3962{
Anthony Liguori7267c092011-08-20 22:09:37 -05003963 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003964
3965 client->opaque = opaque;
3966 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003967 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003968 return client;
3969}
3970
3971void cpu_unregister_map_client(void *_client)
3972{
3973 MapClient *client = (MapClient *)_client;
3974
Blue Swirl72cf2d42009-09-12 07:36:22 +00003975 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003976 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003977}
3978
3979static void cpu_notify_map_clients(void)
3980{
3981 MapClient *client;
3982
Blue Swirl72cf2d42009-09-12 07:36:22 +00003983 while (!QLIST_EMPTY(&map_client_list)) {
3984 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003985 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003986 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003987 }
3988}
3989
aliguori6d16c2f2009-01-22 16:59:11 +00003990/* Map a physical memory region into a host virtual address.
3991 * May map a subset of the requested range, given by and returned in *plen.
3992 * May return NULL if resources needed to perform the mapping are exhausted.
3993 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003994 * Use cpu_register_map_client() to know when retrying the map operation is
3995 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003996 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003997void *cpu_physical_memory_map(target_phys_addr_t addr,
3998 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003999 int is_write)
4000{
Anthony Liguoric227f092009-10-01 16:12:16 -05004001 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004002 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00004003 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004004 target_phys_addr_t page;
Avi Kivity06ef3522012-02-13 16:11:22 +02004005 MemoryRegionSection section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00004006 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004007 ram_addr_t rlen;
4008 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004009
4010 while (len > 0) {
4011 page = addr & TARGET_PAGE_MASK;
4012 l = (page + TARGET_PAGE_SIZE) - addr;
4013 if (l > len)
4014 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02004015 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00004016
Avi Kivity06ef3522012-02-13 16:11:22 +02004017 if (!(memory_region_is_ram(section.mr) && !section.readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004018 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004019 break;
4020 }
4021 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4022 bounce.addr = addr;
4023 bounce.len = l;
4024 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004025 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004026 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004027
4028 *plen = l;
4029 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004030 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004031 if (!todo) {
Avi Kivity06ef3522012-02-13 16:11:22 +02004032 raddr = memory_region_get_ram_addr(section.mr)
4033 + section.offset_within_region
4034 + (addr & ~TARGET_PAGE_MASK);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004035 }
aliguori6d16c2f2009-01-22 16:59:11 +00004036
4037 len -= l;
4038 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004039 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004040 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004041 rlen = todo;
4042 ret = qemu_ram_ptr_length(raddr, &rlen);
4043 *plen = rlen;
4044 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004045}
4046
4047/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4048 * Will also mark the memory as dirty if is_write == 1. access_len gives
4049 * the amount of memory that was actually read or written by the caller.
4050 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004051void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4052 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004053{
4054 if (buffer != bounce.buffer) {
4055 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004056 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004057 while (access_len) {
4058 unsigned l;
4059 l = TARGET_PAGE_SIZE;
4060 if (l > access_len)
4061 l = access_len;
4062 if (!cpu_physical_memory_is_dirty(addr1)) {
4063 /* invalidate code */
4064 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4065 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004066 cpu_physical_memory_set_dirty_flags(
4067 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004068 }
4069 addr1 += l;
4070 access_len -= l;
4071 }
4072 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004073 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004074 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004075 }
aliguori6d16c2f2009-01-22 16:59:11 +00004076 return;
4077 }
4078 if (is_write) {
4079 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4080 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004081 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004082 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004083 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004084}
bellardd0ecd2a2006-04-23 17:14:48 +00004085
bellard8df1cd02005-01-28 22:37:22 +00004086/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004087static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4088 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004089{
4090 int io_index;
4091 uint8_t *ptr;
4092 uint32_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004093 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004094
Avi Kivity06ef3522012-02-13 16:11:22 +02004095 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004096
Avi Kivity06ef3522012-02-13 16:11:22 +02004097 if (!is_ram_rom_romd(&section)) {
bellard8df1cd02005-01-28 22:37:22 +00004098 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004099 io_index = memory_region_get_ram_addr(section.mr)
4100 & (IO_MEM_NB_ENTRIES - 1);
4101 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004102 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004103#if defined(TARGET_WORDS_BIGENDIAN)
4104 if (endian == DEVICE_LITTLE_ENDIAN) {
4105 val = bswap32(val);
4106 }
4107#else
4108 if (endian == DEVICE_BIG_ENDIAN) {
4109 val = bswap32(val);
4110 }
4111#endif
bellard8df1cd02005-01-28 22:37:22 +00004112 } else {
4113 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004114 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4115 & TARGET_PAGE_MASK)
4116 + section.offset_within_region) +
bellard8df1cd02005-01-28 22:37:22 +00004117 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004118 switch (endian) {
4119 case DEVICE_LITTLE_ENDIAN:
4120 val = ldl_le_p(ptr);
4121 break;
4122 case DEVICE_BIG_ENDIAN:
4123 val = ldl_be_p(ptr);
4124 break;
4125 default:
4126 val = ldl_p(ptr);
4127 break;
4128 }
bellard8df1cd02005-01-28 22:37:22 +00004129 }
4130 return val;
4131}
4132
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004133uint32_t ldl_phys(target_phys_addr_t addr)
4134{
4135 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4136}
4137
4138uint32_t ldl_le_phys(target_phys_addr_t addr)
4139{
4140 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4141}
4142
4143uint32_t ldl_be_phys(target_phys_addr_t addr)
4144{
4145 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4146}
4147
bellard84b7b8e2005-11-28 21:19:04 +00004148/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004149static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4150 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004151{
4152 int io_index;
4153 uint8_t *ptr;
4154 uint64_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004155 MemoryRegionSection section;
bellard84b7b8e2005-11-28 21:19:04 +00004156
Avi Kivity06ef3522012-02-13 16:11:22 +02004157 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004158
Avi Kivity06ef3522012-02-13 16:11:22 +02004159 if (!is_ram_rom_romd(&section)) {
bellard84b7b8e2005-11-28 21:19:04 +00004160 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004161 io_index = memory_region_get_ram_addr(section.mr)
4162 & (IO_MEM_NB_ENTRIES - 1);
4163 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004164
4165 /* XXX This is broken when device endian != cpu endian.
4166 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004167#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004168 val = io_mem_read(io_index, addr, 4) << 32;
4169 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004170#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004171 val = io_mem_read(io_index, addr, 4);
4172 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004173#endif
4174 } else {
4175 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004176 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4177 & TARGET_PAGE_MASK)
4178 + section.offset_within_region)
4179 + (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004180 switch (endian) {
4181 case DEVICE_LITTLE_ENDIAN:
4182 val = ldq_le_p(ptr);
4183 break;
4184 case DEVICE_BIG_ENDIAN:
4185 val = ldq_be_p(ptr);
4186 break;
4187 default:
4188 val = ldq_p(ptr);
4189 break;
4190 }
bellard84b7b8e2005-11-28 21:19:04 +00004191 }
4192 return val;
4193}
4194
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004195uint64_t ldq_phys(target_phys_addr_t addr)
4196{
4197 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4198}
4199
4200uint64_t ldq_le_phys(target_phys_addr_t addr)
4201{
4202 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4203}
4204
4205uint64_t ldq_be_phys(target_phys_addr_t addr)
4206{
4207 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4208}
4209
bellardaab33092005-10-30 20:48:42 +00004210/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004211uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004212{
4213 uint8_t val;
4214 cpu_physical_memory_read(addr, &val, 1);
4215 return val;
4216}
4217
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004218/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004219static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4220 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004221{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004222 int io_index;
4223 uint8_t *ptr;
4224 uint64_t val;
Avi Kivity06ef3522012-02-13 16:11:22 +02004225 MemoryRegionSection section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004226
Avi Kivity06ef3522012-02-13 16:11:22 +02004227 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004228
Avi Kivity06ef3522012-02-13 16:11:22 +02004229 if (!is_ram_rom_romd(&section)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004230 /* I/O case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004231 io_index = memory_region_get_ram_addr(section.mr)
4232 & (IO_MEM_NB_ENTRIES - 1);
4233 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004234 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004235#if defined(TARGET_WORDS_BIGENDIAN)
4236 if (endian == DEVICE_LITTLE_ENDIAN) {
4237 val = bswap16(val);
4238 }
4239#else
4240 if (endian == DEVICE_BIG_ENDIAN) {
4241 val = bswap16(val);
4242 }
4243#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004244 } else {
4245 /* RAM case */
Avi Kivity06ef3522012-02-13 16:11:22 +02004246 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4247 & TARGET_PAGE_MASK)
4248 + section.offset_within_region)
4249 + (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004250 switch (endian) {
4251 case DEVICE_LITTLE_ENDIAN:
4252 val = lduw_le_p(ptr);
4253 break;
4254 case DEVICE_BIG_ENDIAN:
4255 val = lduw_be_p(ptr);
4256 break;
4257 default:
4258 val = lduw_p(ptr);
4259 break;
4260 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004261 }
4262 return val;
bellardaab33092005-10-30 20:48:42 +00004263}
4264
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004265uint32_t lduw_phys(target_phys_addr_t addr)
4266{
4267 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4268}
4269
4270uint32_t lduw_le_phys(target_phys_addr_t addr)
4271{
4272 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4273}
4274
4275uint32_t lduw_be_phys(target_phys_addr_t addr)
4276{
4277 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4278}
4279
bellard8df1cd02005-01-28 22:37:22 +00004280/* warning: addr must be aligned. The ram page is not masked as dirty
4281 and the code inside is not invalidated. It is useful if the dirty
4282 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004283void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004284{
4285 int io_index;
4286 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004287 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004288
Avi Kivity06ef3522012-02-13 16:11:22 +02004289 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004290
Avi Kivity06ef3522012-02-13 16:11:22 +02004291 if (!memory_region_is_ram(section.mr) || section.readonly) {
4292 if (memory_region_is_ram(section.mr)) {
4293 io_index = io_mem_rom.ram_addr;
4294 } else {
4295 io_index = memory_region_get_ram_addr(section.mr);
4296 }
4297 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Avi Kivityacbbec52011-11-21 12:27:03 +02004298 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004299 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02004300 unsigned long addr1 = (memory_region_get_ram_addr(section.mr)
4301 & TARGET_PAGE_MASK)
4302 + section.offset_within_region
4303 + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004304 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004305 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004306
4307 if (unlikely(in_migration)) {
4308 if (!cpu_physical_memory_is_dirty(addr1)) {
4309 /* invalidate code */
4310 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4311 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004312 cpu_physical_memory_set_dirty_flags(
4313 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004314 }
4315 }
bellard8df1cd02005-01-28 22:37:22 +00004316 }
4317}
4318
Anthony Liguoric227f092009-10-01 16:12:16 -05004319void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004320{
4321 int io_index;
4322 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004323 MemoryRegionSection section;
j_mayerbc98a7e2007-04-04 07:55:12 +00004324
Avi Kivity06ef3522012-02-13 16:11:22 +02004325 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004326
Avi Kivity06ef3522012-02-13 16:11:22 +02004327 if (!memory_region_is_ram(section.mr) || section.readonly) {
4328 if (memory_region_is_ram(section.mr)) {
4329 io_index = io_mem_rom.ram_addr;
4330 } else {
4331 io_index = memory_region_get_ram_addr(section.mr)
4332 & (IO_MEM_NB_ENTRIES - 1);
4333 }
4334 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
j_mayerbc98a7e2007-04-04 07:55:12 +00004335#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004336 io_mem_write(io_index, addr, val >> 32, 4);
4337 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004338#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004339 io_mem_write(io_index, addr, (uint32_t)val, 4);
4340 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004341#endif
4342 } else {
Avi Kivity06ef3522012-02-13 16:11:22 +02004343 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
4344 & TARGET_PAGE_MASK)
4345 + section.offset_within_region)
4346 + (addr & ~TARGET_PAGE_MASK);
j_mayerbc98a7e2007-04-04 07:55:12 +00004347 stq_p(ptr, val);
4348 }
4349}
4350
bellard8df1cd02005-01-28 22:37:22 +00004351/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004352static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4353 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004354{
4355 int io_index;
4356 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004357 MemoryRegionSection section;
bellard8df1cd02005-01-28 22:37:22 +00004358
Avi Kivity06ef3522012-02-13 16:11:22 +02004359 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004360
Avi Kivity06ef3522012-02-13 16:11:22 +02004361 if (!memory_region_is_ram(section.mr) || section.readonly) {
4362 if (memory_region_is_ram(section.mr)) {
4363 io_index = io_mem_rom.ram_addr;
4364 } else {
4365 io_index = memory_region_get_ram_addr(section.mr)
4366 & (IO_MEM_NB_ENTRIES - 1);
4367 }
4368 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004369#if defined(TARGET_WORDS_BIGENDIAN)
4370 if (endian == DEVICE_LITTLE_ENDIAN) {
4371 val = bswap32(val);
4372 }
4373#else
4374 if (endian == DEVICE_BIG_ENDIAN) {
4375 val = bswap32(val);
4376 }
4377#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004378 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004379 } else {
4380 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02004381 addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
4382 + section.offset_within_region
4383 + (addr & ~TARGET_PAGE_MASK);
bellard8df1cd02005-01-28 22:37:22 +00004384 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004385 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004386 switch (endian) {
4387 case DEVICE_LITTLE_ENDIAN:
4388 stl_le_p(ptr, val);
4389 break;
4390 case DEVICE_BIG_ENDIAN:
4391 stl_be_p(ptr, val);
4392 break;
4393 default:
4394 stl_p(ptr, val);
4395 break;
4396 }
bellard3a7d9292005-08-21 09:26:42 +00004397 if (!cpu_physical_memory_is_dirty(addr1)) {
4398 /* invalidate code */
4399 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4400 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004401 cpu_physical_memory_set_dirty_flags(addr1,
4402 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004403 }
bellard8df1cd02005-01-28 22:37:22 +00004404 }
4405}
4406
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004407void stl_phys(target_phys_addr_t addr, uint32_t val)
4408{
4409 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4410}
4411
4412void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4413{
4414 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4415}
4416
4417void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4418{
4419 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4420}
4421
bellardaab33092005-10-30 20:48:42 +00004422/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004423void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004424{
4425 uint8_t v = val;
4426 cpu_physical_memory_write(addr, &v, 1);
4427}
4428
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004429/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004430static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4431 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004432{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004433 int io_index;
4434 uint8_t *ptr;
Avi Kivity06ef3522012-02-13 16:11:22 +02004435 MemoryRegionSection section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004436
Avi Kivity06ef3522012-02-13 16:11:22 +02004437 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004438
Avi Kivity06ef3522012-02-13 16:11:22 +02004439 if (!memory_region_is_ram(section.mr) || section.readonly) {
4440 if (memory_region_is_ram(section.mr)) {
4441 io_index = io_mem_rom.ram_addr;
4442 } else {
4443 io_index = memory_region_get_ram_addr(section.mr)
4444 & (IO_MEM_NB_ENTRIES - 1);
4445 }
4446 addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004447#if defined(TARGET_WORDS_BIGENDIAN)
4448 if (endian == DEVICE_LITTLE_ENDIAN) {
4449 val = bswap16(val);
4450 }
4451#else
4452 if (endian == DEVICE_BIG_ENDIAN) {
4453 val = bswap16(val);
4454 }
4455#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004456 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004457 } else {
4458 unsigned long addr1;
Avi Kivity06ef3522012-02-13 16:11:22 +02004459 addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
4460 + section.offset_within_region + (addr & ~TARGET_PAGE_MASK);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004461 /* RAM case */
4462 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004463 switch (endian) {
4464 case DEVICE_LITTLE_ENDIAN:
4465 stw_le_p(ptr, val);
4466 break;
4467 case DEVICE_BIG_ENDIAN:
4468 stw_be_p(ptr, val);
4469 break;
4470 default:
4471 stw_p(ptr, val);
4472 break;
4473 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004474 if (!cpu_physical_memory_is_dirty(addr1)) {
4475 /* invalidate code */
4476 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4477 /* set dirty bit */
4478 cpu_physical_memory_set_dirty_flags(addr1,
4479 (0xff & ~CODE_DIRTY_FLAG));
4480 }
4481 }
bellardaab33092005-10-30 20:48:42 +00004482}
4483
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004484void stw_phys(target_phys_addr_t addr, uint32_t val)
4485{
4486 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4487}
4488
4489void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4490{
4491 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4492}
4493
4494void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4495{
4496 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4497}
4498
bellardaab33092005-10-30 20:48:42 +00004499/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004500void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004501{
4502 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004503 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004504}
4505
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004506void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4507{
4508 val = cpu_to_le64(val);
4509 cpu_physical_memory_write(addr, &val, 8);
4510}
4511
4512void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4513{
4514 val = cpu_to_be64(val);
4515 cpu_physical_memory_write(addr, &val, 8);
4516}
4517
aliguori5e2972f2009-03-28 17:51:36 +00004518/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004519int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004520 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004521{
4522 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004523 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004524 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004525
4526 while (len > 0) {
4527 page = addr & TARGET_PAGE_MASK;
4528 phys_addr = cpu_get_phys_page_debug(env, page);
4529 /* if no physical page mapped, return an error */
4530 if (phys_addr == -1)
4531 return -1;
4532 l = (page + TARGET_PAGE_SIZE) - addr;
4533 if (l > len)
4534 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004535 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004536 if (is_write)
4537 cpu_physical_memory_write_rom(phys_addr, buf, l);
4538 else
aliguori5e2972f2009-03-28 17:51:36 +00004539 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004540 len -= l;
4541 buf += l;
4542 addr += l;
4543 }
4544 return 0;
4545}
Paul Brooka68fe892010-03-01 00:08:59 +00004546#endif
bellard13eb76e2004-01-24 15:23:36 +00004547
pbrook2e70f6e2008-06-29 01:03:05 +00004548/* in deterministic execution mode, instructions doing device I/Os
4549 must be at the end of the TB */
4550void cpu_io_recompile(CPUState *env, void *retaddr)
4551{
4552 TranslationBlock *tb;
4553 uint32_t n, cflags;
4554 target_ulong pc, cs_base;
4555 uint64_t flags;
4556
4557 tb = tb_find_pc((unsigned long)retaddr);
4558 if (!tb) {
4559 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4560 retaddr);
4561 }
4562 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004563 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004564 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004565 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004566 n = n - env->icount_decr.u16.low;
4567 /* Generate a new TB ending on the I/O insn. */
4568 n++;
4569 /* On MIPS and SH, delay slot instructions can only be restarted if
4570 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004571 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004572 branch. */
4573#if defined(TARGET_MIPS)
4574 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4575 env->active_tc.PC -= 4;
4576 env->icount_decr.u16.low++;
4577 env->hflags &= ~MIPS_HFLAG_BMASK;
4578 }
4579#elif defined(TARGET_SH4)
4580 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4581 && n > 1) {
4582 env->pc -= 2;
4583 env->icount_decr.u16.low++;
4584 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4585 }
4586#endif
4587 /* This should never happen. */
4588 if (n > CF_COUNT_MASK)
4589 cpu_abort(env, "TB too big during recompile");
4590
4591 cflags = n | CF_LAST_IO;
4592 pc = tb->pc;
4593 cs_base = tb->cs_base;
4594 flags = tb->flags;
4595 tb_phys_invalidate(tb, -1);
4596 /* FIXME: In theory this could raise an exception. In practice
4597 we have already translated the block once so it's probably ok. */
4598 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004599 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004600 the first in the TB) then we end up generating a whole new TB and
4601 repeating the fault, which is horribly inefficient.
4602 Better would be to execute just this insn uncached, or generate a
4603 second new TB. */
4604 cpu_resume_from_signal(env, NULL);
4605}
4606
Paul Brookb3755a92010-03-12 16:54:58 +00004607#if !defined(CONFIG_USER_ONLY)
4608
Stefan Weil055403b2010-10-22 23:03:32 +02004609void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004610{
4611 int i, target_code_size, max_target_code_size;
4612 int direct_jmp_count, direct_jmp2_count, cross_page;
4613 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004614
bellarde3db7222005-01-26 22:00:47 +00004615 target_code_size = 0;
4616 max_target_code_size = 0;
4617 cross_page = 0;
4618 direct_jmp_count = 0;
4619 direct_jmp2_count = 0;
4620 for(i = 0; i < nb_tbs; i++) {
4621 tb = &tbs[i];
4622 target_code_size += tb->size;
4623 if (tb->size > max_target_code_size)
4624 max_target_code_size = tb->size;
4625 if (tb->page_addr[1] != -1)
4626 cross_page++;
4627 if (tb->tb_next_offset[0] != 0xffff) {
4628 direct_jmp_count++;
4629 if (tb->tb_next_offset[1] != 0xffff) {
4630 direct_jmp2_count++;
4631 }
4632 }
4633 }
4634 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004635 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004636 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004637 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4638 cpu_fprintf(f, "TB count %d/%d\n",
4639 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004640 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004641 nb_tbs ? target_code_size / nb_tbs : 0,
4642 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004643 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004644 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4645 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004646 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4647 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004648 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4649 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004650 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004651 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4652 direct_jmp2_count,
4653 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004654 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004655 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4656 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4657 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004658 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004659}
4660
Avi Kivityd39e8222012-01-01 23:35:10 +02004661/* NOTE: this function can trigger an exception */
4662/* NOTE2: the returned address is not exactly the physical address: it
4663 is the offset relative to phys_ram_base */
4664tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4665{
4666 int mmu_idx, page_index, pd;
4667 void *p;
4668
4669 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4670 mmu_idx = cpu_mmu_index(env1);
4671 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4672 (addr & TARGET_PAGE_MASK))) {
4673 ldub_code(addr);
4674 }
4675 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004676 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
Avi Kivity06ef3522012-02-13 16:11:22 +02004677 && !io_mem_region[pd]->rom_device) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004678#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4679 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4680#else
4681 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4682#endif
4683 }
4684 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4685 return qemu_ram_addr_from_host_nofail(p);
4686}
4687
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004688/*
4689 * A helper function for the _utterly broken_ virtio device model to find out if
4690 * it's running on a big endian machine. Don't do this at home kids!
4691 */
4692bool virtio_is_big_endian(void);
4693bool virtio_is_big_endian(void)
4694{
4695#if defined(TARGET_WORDS_BIGENDIAN)
4696 return true;
4697#else
4698 return false;
4699#endif
4700}
4701
bellard61382a52003-10-27 21:22:23 +00004702#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004703#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004704#define GETPC() NULL
4705#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004706#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004707
4708#define SHIFT 0
4709#include "softmmu_template.h"
4710
4711#define SHIFT 1
4712#include "softmmu_template.h"
4713
4714#define SHIFT 2
4715#include "softmmu_template.h"
4716
4717#define SHIFT 3
4718#include "softmmu_template.h"
4719
4720#undef env
4721
4722#endif