blob: f26d1b00f24c3c24e8d6f57bac341c69f6046df9 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Avi Kivity3eef53d2012-02-10 14:57:31 +0200163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200189typedef struct PhysPageEntry PhysPageEntry;
190
Avi Kivity5312bd82012-02-12 18:32:55 +0200191static MemoryRegionSection *phys_sections;
192static unsigned phys_sections_nb, phys_sections_nb_alloc;
193static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200194static uint16_t phys_section_notdirty;
195static uint16_t phys_section_rom;
196static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200197
Avi Kivity4346ae32012-02-10 17:00:01 +0200198struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200199 uint16_t is_leaf : 1;
200 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
201 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200202};
203
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200204/* Simple allocator for PhysPageEntry nodes */
205static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
206static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
207
Avi Kivity07f07b32012-02-13 20:45:32 +0200208#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200209
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800210/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200211 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200212static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000213
pbrooke2eef172008-06-08 01:09:01 +0000214static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300215static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000216
bellard33417e72003-08-10 21:47:01 +0000217/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200218MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000219static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200220static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000221#endif
bellard33417e72003-08-10 21:47:01 +0000222
bellard34865132003-10-05 14:28:56 +0000223/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200224#ifdef WIN32
225static const char *logfilename = "qemu.log";
226#else
blueswir1d9b630f2008-10-05 09:57:08 +0000227static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200228#endif
bellard34865132003-10-05 14:28:56 +0000229FILE *logfile;
230int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000231static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000232
bellarde3db7222005-01-26 22:00:47 +0000233/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000234#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000235static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000236#endif
bellarde3db7222005-01-26 22:00:47 +0000237static int tb_flush_count;
238static int tb_phys_invalidate_count;
239
bellard7cb69ca2008-05-10 10:55:51 +0000240#ifdef _WIN32
241static void map_exec(void *addr, long size)
242{
243 DWORD old_protect;
244 VirtualProtect(addr, size,
245 PAGE_EXECUTE_READWRITE, &old_protect);
246
247}
248#else
249static void map_exec(void *addr, long size)
250{
bellard43694152008-05-29 09:35:57 +0000251 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000252
bellard43694152008-05-29 09:35:57 +0000253 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000254 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000255 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000256
257 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000258 end += page_size - 1;
259 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000260
261 mprotect((void *)start, end - start,
262 PROT_READ | PROT_WRITE | PROT_EXEC);
263}
264#endif
265
bellardb346ff42003-06-15 20:05:50 +0000266static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000267{
bellard83fb7ad2004-07-05 21:25:26 +0000268 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000269 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000270#ifdef _WIN32
271 {
272 SYSTEM_INFO system_info;
273
274 GetSystemInfo(&system_info);
275 qemu_real_host_page_size = system_info.dwPageSize;
276 }
277#else
278 qemu_real_host_page_size = getpagesize();
279#endif
bellard83fb7ad2004-07-05 21:25:26 +0000280 if (qemu_host_page_size == 0)
281 qemu_host_page_size = qemu_real_host_page_size;
282 if (qemu_host_page_size < TARGET_PAGE_SIZE)
283 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000284 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000285
Paul Brook2e9a5712010-05-05 16:32:59 +0100286#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000287 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100288#ifdef HAVE_KINFO_GETVMMAP
289 struct kinfo_vmentry *freep;
290 int i, cnt;
291
292 freep = kinfo_getvmmap(getpid(), &cnt);
293 if (freep) {
294 mmap_lock();
295 for (i = 0; i < cnt; i++) {
296 unsigned long startaddr, endaddr;
297
298 startaddr = freep[i].kve_start;
299 endaddr = freep[i].kve_end;
300 if (h2g_valid(startaddr)) {
301 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
302
303 if (h2g_valid(endaddr)) {
304 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200305 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100306 } else {
307#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
308 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200309 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100310#endif
311 }
312 }
313 }
314 free(freep);
315 mmap_unlock();
316 }
317#else
balrog50a95692007-12-12 01:16:23 +0000318 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000319
pbrook07765902008-05-31 16:33:53 +0000320 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800321
Aurelien Jarnofd436902010-04-10 17:20:36 +0200322 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000323 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800324 mmap_lock();
325
balrog50a95692007-12-12 01:16:23 +0000326 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800327 unsigned long startaddr, endaddr;
328 int n;
329
330 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
331
332 if (n == 2 && h2g_valid(startaddr)) {
333 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
334
335 if (h2g_valid(endaddr)) {
336 endaddr = h2g(endaddr);
337 } else {
338 endaddr = ~0ul;
339 }
340 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000341 }
342 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800343
balrog50a95692007-12-12 01:16:23 +0000344 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800345 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000346 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100347#endif
balrog50a95692007-12-12 01:16:23 +0000348 }
349#endif
bellard54936002003-05-13 00:25:15 +0000350}
351
Paul Brook41c1b1c2010-03-12 16:54:58 +0000352static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000353{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000354 PageDesc *pd;
355 void **lp;
356 int i;
357
pbrook17e23772008-06-09 13:47:45 +0000358#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500359 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800360# define ALLOC(P, SIZE) \
361 do { \
362 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
363 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800364 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000365#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800366# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500367 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000368#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800369
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800370 /* Level 1. Always allocated. */
371 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
372
373 /* Level 2..N-1. */
374 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
375 void **p = *lp;
376
377 if (p == NULL) {
378 if (!alloc) {
379 return NULL;
380 }
381 ALLOC(p, sizeof(void *) * L2_SIZE);
382 *lp = p;
383 }
384
385 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000386 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800387
388 pd = *lp;
389 if (pd == NULL) {
390 if (!alloc) {
391 return NULL;
392 }
393 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
394 *lp = pd;
395 }
396
397#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800398
399 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000400}
401
Paul Brook41c1b1c2010-03-12 16:54:58 +0000402static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000403{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800404 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000405}
406
Paul Brook6d9a1302010-02-28 23:55:53 +0000407#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200408
Avi Kivityf7bf5462012-02-13 20:12:05 +0200409static void phys_map_node_reserve(unsigned nodes)
410{
411 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
412 typedef PhysPageEntry Node[L2_SIZE];
413 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
414 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
415 phys_map_nodes_nb + nodes);
416 phys_map_nodes = g_renew(Node, phys_map_nodes,
417 phys_map_nodes_nb_alloc);
418 }
419}
420
421static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200422{
423 unsigned i;
424 uint16_t ret;
425
Avi Kivityf7bf5462012-02-13 20:12:05 +0200426 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200427 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200428 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200429 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200430 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200431 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200432 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200433 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200434}
435
436static void phys_map_nodes_reset(void)
437{
438 phys_map_nodes_nb = 0;
439}
440
Avi Kivityf7bf5462012-02-13 20:12:05 +0200441
Avi Kivity29990972012-02-13 20:21:20 +0200442static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
443 target_phys_addr_t *nb, uint16_t leaf,
444 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200445{
446 PhysPageEntry *p;
447 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200448 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200449
Avi Kivity07f07b32012-02-13 20:45:32 +0200450 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200451 lp->ptr = phys_map_node_alloc();
452 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200453 if (level == 0) {
454 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200455 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200456 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200457 }
458 }
459 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200460 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200461 }
Avi Kivity29990972012-02-13 20:21:20 +0200462 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200463
Avi Kivity29990972012-02-13 20:21:20 +0200464 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200465 if ((*index & (step - 1)) == 0 && *nb >= step) {
466 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200467 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200468 *index += step;
469 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200470 } else {
471 phys_page_set_level(lp, index, nb, leaf, level - 1);
472 }
473 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200474 }
475}
476
Avi Kivity29990972012-02-13 20:21:20 +0200477static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
478 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000479{
Avi Kivity29990972012-02-13 20:21:20 +0200480 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200481 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000482
Avi Kivity29990972012-02-13 20:21:20 +0200483 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000484}
485
Avi Kivityf3705d52012-03-08 16:16:34 +0200486static MemoryRegionSection *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000487{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200488 PhysPageEntry lp = phys_map;
489 PhysPageEntry *p;
490 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200491 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200492
Avi Kivity07f07b32012-02-13 20:45:32 +0200493 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200494 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200495 goto not_found;
496 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200497 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200498 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200499 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200500
Avi Kivityc19e8802012-02-13 20:25:31 +0200501 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200502not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200503 return &phys_sections[s_index];
504}
505
506static target_phys_addr_t section_addr(MemoryRegionSection *section,
507 target_phys_addr_t addr)
508{
509 addr -= section->offset_within_address_space;
510 addr += section->offset_within_region;
511 return addr;
bellard92e873b2004-05-21 14:52:29 +0000512}
513
Anthony Liguoric227f092009-10-01 16:12:16 -0500514static void tlb_protect_code(ram_addr_t ram_addr);
515static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000516 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000517#define mmap_lock() do { } while(0)
518#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000519#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000520
bellard43694152008-05-29 09:35:57 +0000521#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
522
523#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100524/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000525 user mode. It will change when a dedicated libc will be used */
526#define USE_STATIC_CODE_GEN_BUFFER
527#endif
528
529#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200530static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
531 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000532#endif
533
blueswir18fcd3692008-08-17 20:26:25 +0000534static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000535{
bellard43694152008-05-29 09:35:57 +0000536#ifdef USE_STATIC_CODE_GEN_BUFFER
537 code_gen_buffer = static_code_gen_buffer;
538 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
539 map_exec(code_gen_buffer, code_gen_buffer_size);
540#else
bellard26a5f132008-05-28 12:30:31 +0000541 code_gen_buffer_size = tb_size;
542 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000543#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000544 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
545#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100546 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000547 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000548#endif
bellard26a5f132008-05-28 12:30:31 +0000549 }
550 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
551 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
552 /* The code gen buffer location may have constraints depending on
553 the host cpu and OS */
554#if defined(__linux__)
555 {
556 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000557 void *start = NULL;
558
bellard26a5f132008-05-28 12:30:31 +0000559 flags = MAP_PRIVATE | MAP_ANONYMOUS;
560#if defined(__x86_64__)
561 flags |= MAP_32BIT;
562 /* Cannot map more than that */
563 if (code_gen_buffer_size > (800 * 1024 * 1024))
564 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000565#elif defined(__sparc_v9__)
566 // Map the buffer below 2G, so we can use direct calls and branches
567 flags |= MAP_FIXED;
568 start = (void *) 0x60000000UL;
569 if (code_gen_buffer_size > (512 * 1024 * 1024))
570 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000571#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100572 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000573 if (code_gen_buffer_size > 16 * 1024 * 1024)
574 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700575#elif defined(__s390x__)
576 /* Map the buffer so that we can use direct calls and branches. */
577 /* We have a +- 4GB range on the branches; leave some slop. */
578 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
579 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
580 }
581 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000582#endif
blueswir1141ac462008-07-26 15:05:57 +0000583 code_gen_buffer = mmap(start, code_gen_buffer_size,
584 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000585 flags, -1, 0);
586 if (code_gen_buffer == MAP_FAILED) {
587 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
588 exit(1);
589 }
590 }
Bradcbb608a2010-12-20 21:25:40 -0500591#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000592 || defined(__DragonFly__) || defined(__OpenBSD__) \
593 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000594 {
595 int flags;
596 void *addr = NULL;
597 flags = MAP_PRIVATE | MAP_ANONYMOUS;
598#if defined(__x86_64__)
599 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
600 * 0x40000000 is free */
601 flags |= MAP_FIXED;
602 addr = (void *)0x40000000;
603 /* Cannot map more than that */
604 if (code_gen_buffer_size > (800 * 1024 * 1024))
605 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000606#elif defined(__sparc_v9__)
607 // Map the buffer below 2G, so we can use direct calls and branches
608 flags |= MAP_FIXED;
609 addr = (void *) 0x60000000UL;
610 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
611 code_gen_buffer_size = (512 * 1024 * 1024);
612 }
aliguori06e67a82008-09-27 15:32:41 +0000613#endif
614 code_gen_buffer = mmap(addr, code_gen_buffer_size,
615 PROT_WRITE | PROT_READ | PROT_EXEC,
616 flags, -1, 0);
617 if (code_gen_buffer == MAP_FAILED) {
618 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
619 exit(1);
620 }
621 }
bellard26a5f132008-05-28 12:30:31 +0000622#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500623 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000624 map_exec(code_gen_buffer, code_gen_buffer_size);
625#endif
bellard43694152008-05-29 09:35:57 +0000626#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000627 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100628 code_gen_buffer_max_size = code_gen_buffer_size -
629 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000630 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500631 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000632}
633
634/* Must be called before using the QEMU cpus. 'tb_size' is the size
635 (in bytes) allocated to the translation buffer. Zero means default
636 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200637void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000638{
bellard26a5f132008-05-28 12:30:31 +0000639 cpu_gen_init();
640 code_gen_alloc(tb_size);
641 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000642 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700643#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
644 /* There's no guest base to take into account, so go ahead and
645 initialize the prologue now. */
646 tcg_prologue_init(&tcg_ctx);
647#endif
bellard26a5f132008-05-28 12:30:31 +0000648}
649
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200650bool tcg_enabled(void)
651{
652 return code_gen_buffer != NULL;
653}
654
655void cpu_exec_init_all(void)
656{
657#if !defined(CONFIG_USER_ONLY)
658 memory_map_init();
659 io_mem_init();
660#endif
661}
662
pbrook9656f322008-07-01 20:01:19 +0000663#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
664
Juan Quintelae59fb372009-09-29 22:48:21 +0200665static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200666{
667 CPUState *env = opaque;
668
aurel323098dba2009-03-07 21:28:24 +0000669 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
670 version_id is increased. */
671 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000672 tlb_flush(env, 1);
673
674 return 0;
675}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200676
677static const VMStateDescription vmstate_cpu_common = {
678 .name = "cpu_common",
679 .version_id = 1,
680 .minimum_version_id = 1,
681 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200682 .post_load = cpu_common_post_load,
683 .fields = (VMStateField []) {
684 VMSTATE_UINT32(halted, CPUState),
685 VMSTATE_UINT32(interrupt_request, CPUState),
686 VMSTATE_END_OF_LIST()
687 }
688};
pbrook9656f322008-07-01 20:01:19 +0000689#endif
690
Glauber Costa950f1472009-06-09 12:15:18 -0400691CPUState *qemu_get_cpu(int cpu)
692{
693 CPUState *env = first_cpu;
694
695 while (env) {
696 if (env->cpu_index == cpu)
697 break;
698 env = env->next_cpu;
699 }
700
701 return env;
702}
703
bellard6a00d602005-11-21 23:25:50 +0000704void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000705{
bellard6a00d602005-11-21 23:25:50 +0000706 CPUState **penv;
707 int cpu_index;
708
pbrookc2764712009-03-07 15:24:59 +0000709#if defined(CONFIG_USER_ONLY)
710 cpu_list_lock();
711#endif
bellard6a00d602005-11-21 23:25:50 +0000712 env->next_cpu = NULL;
713 penv = &first_cpu;
714 cpu_index = 0;
715 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700716 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000717 cpu_index++;
718 }
719 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000720 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000721 QTAILQ_INIT(&env->breakpoints);
722 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100723#ifndef CONFIG_USER_ONLY
724 env->thread_id = qemu_get_thread_id();
725#endif
bellard6a00d602005-11-21 23:25:50 +0000726 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000727#if defined(CONFIG_USER_ONLY)
728 cpu_list_unlock();
729#endif
pbrookb3c77242008-06-30 16:31:04 +0000730#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600731 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
732 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000733 cpu_save, cpu_load, env);
734#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000735}
736
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100737/* Allocate a new translation block. Flush the translation buffer if
738 too many translation blocks or too much generated code. */
739static TranslationBlock *tb_alloc(target_ulong pc)
740{
741 TranslationBlock *tb;
742
743 if (nb_tbs >= code_gen_max_blocks ||
744 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
745 return NULL;
746 tb = &tbs[nb_tbs++];
747 tb->pc = pc;
748 tb->cflags = 0;
749 return tb;
750}
751
752void tb_free(TranslationBlock *tb)
753{
754 /* In practice this is mostly used for single use temporary TB
755 Ignore the hard cases and just back up if this TB happens to
756 be the last one generated. */
757 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
758 code_gen_ptr = tb->tc_ptr;
759 nb_tbs--;
760 }
761}
762
bellard9fa3e852004-01-04 18:06:42 +0000763static inline void invalidate_page_bitmap(PageDesc *p)
764{
765 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500766 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000767 p->code_bitmap = NULL;
768 }
769 p->code_write_count = 0;
770}
771
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800772/* Set to NULL all the 'first_tb' fields in all PageDescs. */
773
774static void page_flush_tb_1 (int level, void **lp)
775{
776 int i;
777
778 if (*lp == NULL) {
779 return;
780 }
781 if (level == 0) {
782 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000783 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800784 pd[i].first_tb = NULL;
785 invalidate_page_bitmap(pd + i);
786 }
787 } else {
788 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000789 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800790 page_flush_tb_1 (level - 1, pp + i);
791 }
792 }
793}
794
bellardfd6ce8f2003-05-14 19:00:11 +0000795static void page_flush_tb(void)
796{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800797 int i;
798 for (i = 0; i < V_L1_SIZE; i++) {
799 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000800 }
801}
802
803/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000804/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000805void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000806{
bellard6a00d602005-11-21 23:25:50 +0000807 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000808#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000809 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
810 (unsigned long)(code_gen_ptr - code_gen_buffer),
811 nb_tbs, nb_tbs > 0 ?
812 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000813#endif
bellard26a5f132008-05-28 12:30:31 +0000814 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000815 cpu_abort(env1, "Internal error: code buffer overflow\n");
816
bellardfd6ce8f2003-05-14 19:00:11 +0000817 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000818
bellard6a00d602005-11-21 23:25:50 +0000819 for(env = first_cpu; env != NULL; env = env->next_cpu) {
820 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
821 }
bellard9fa3e852004-01-04 18:06:42 +0000822
bellard8a8a6082004-10-03 13:36:49 +0000823 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000824 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000825
bellardfd6ce8f2003-05-14 19:00:11 +0000826 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000827 /* XXX: flush processor icache at this point if cache flush is
828 expensive */
bellarde3db7222005-01-26 22:00:47 +0000829 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000830}
831
832#ifdef DEBUG_TB_CHECK
833
j_mayerbc98a7e2007-04-04 07:55:12 +0000834static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000835{
836 TranslationBlock *tb;
837 int i;
838 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000839 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
840 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000841 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
842 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000843 printf("ERROR invalidate: address=" TARGET_FMT_lx
844 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000845 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000846 }
847 }
848 }
849}
850
851/* verify that all the pages have correct rights for code */
852static void tb_page_check(void)
853{
854 TranslationBlock *tb;
855 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000856
pbrook99773bd2006-04-16 15:14:59 +0000857 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
858 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000859 flags1 = page_get_flags(tb->pc);
860 flags2 = page_get_flags(tb->pc + tb->size - 1);
861 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
862 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000863 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000864 }
865 }
866 }
867}
868
869#endif
870
871/* invalidate one TB */
872static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
873 int next_offset)
874{
875 TranslationBlock *tb1;
876 for(;;) {
877 tb1 = *ptb;
878 if (tb1 == tb) {
879 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
880 break;
881 }
882 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
883 }
884}
885
bellard9fa3e852004-01-04 18:06:42 +0000886static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
887{
888 TranslationBlock *tb1;
889 unsigned int n1;
890
891 for(;;) {
892 tb1 = *ptb;
893 n1 = (long)tb1 & 3;
894 tb1 = (TranslationBlock *)((long)tb1 & ~3);
895 if (tb1 == tb) {
896 *ptb = tb1->page_next[n1];
897 break;
898 }
899 ptb = &tb1->page_next[n1];
900 }
901}
902
bellardd4e81642003-05-25 16:46:15 +0000903static inline void tb_jmp_remove(TranslationBlock *tb, int n)
904{
905 TranslationBlock *tb1, **ptb;
906 unsigned int n1;
907
908 ptb = &tb->jmp_next[n];
909 tb1 = *ptb;
910 if (tb1) {
911 /* find tb(n) in circular list */
912 for(;;) {
913 tb1 = *ptb;
914 n1 = (long)tb1 & 3;
915 tb1 = (TranslationBlock *)((long)tb1 & ~3);
916 if (n1 == n && tb1 == tb)
917 break;
918 if (n1 == 2) {
919 ptb = &tb1->jmp_first;
920 } else {
921 ptb = &tb1->jmp_next[n1];
922 }
923 }
924 /* now we can suppress tb(n) from the list */
925 *ptb = tb->jmp_next[n];
926
927 tb->jmp_next[n] = NULL;
928 }
929}
930
931/* reset the jump entry 'n' of a TB so that it is not chained to
932 another TB */
933static inline void tb_reset_jump(TranslationBlock *tb, int n)
934{
935 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
936}
937
Paul Brook41c1b1c2010-03-12 16:54:58 +0000938void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000939{
bellard6a00d602005-11-21 23:25:50 +0000940 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000941 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000942 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000943 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000944 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000945
bellard9fa3e852004-01-04 18:06:42 +0000946 /* remove the TB from the hash list */
947 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
948 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000949 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000950 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000951
bellard9fa3e852004-01-04 18:06:42 +0000952 /* remove the TB from the page list */
953 if (tb->page_addr[0] != page_addr) {
954 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
955 tb_page_remove(&p->first_tb, tb);
956 invalidate_page_bitmap(p);
957 }
958 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
959 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
960 tb_page_remove(&p->first_tb, tb);
961 invalidate_page_bitmap(p);
962 }
963
bellard8a40a182005-11-20 10:35:40 +0000964 tb_invalidated_flag = 1;
965
966 /* remove the TB from the hash list */
967 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000968 for(env = first_cpu; env != NULL; env = env->next_cpu) {
969 if (env->tb_jmp_cache[h] == tb)
970 env->tb_jmp_cache[h] = NULL;
971 }
bellard8a40a182005-11-20 10:35:40 +0000972
973 /* suppress this TB from the two jump lists */
974 tb_jmp_remove(tb, 0);
975 tb_jmp_remove(tb, 1);
976
977 /* suppress any remaining jumps to this TB */
978 tb1 = tb->jmp_first;
979 for(;;) {
980 n1 = (long)tb1 & 3;
981 if (n1 == 2)
982 break;
983 tb1 = (TranslationBlock *)((long)tb1 & ~3);
984 tb2 = tb1->jmp_next[n1];
985 tb_reset_jump(tb1, n1);
986 tb1->jmp_next[n1] = NULL;
987 tb1 = tb2;
988 }
989 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
990
bellarde3db7222005-01-26 22:00:47 +0000991 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000992}
993
994static inline void set_bits(uint8_t *tab, int start, int len)
995{
996 int end, mask, end1;
997
998 end = start + len;
999 tab += start >> 3;
1000 mask = 0xff << (start & 7);
1001 if ((start & ~7) == (end & ~7)) {
1002 if (start < end) {
1003 mask &= ~(0xff << (end & 7));
1004 *tab |= mask;
1005 }
1006 } else {
1007 *tab++ |= mask;
1008 start = (start + 8) & ~7;
1009 end1 = end & ~7;
1010 while (start < end1) {
1011 *tab++ = 0xff;
1012 start += 8;
1013 }
1014 if (start < end) {
1015 mask = ~(0xff << (end & 7));
1016 *tab |= mask;
1017 }
1018 }
1019}
1020
1021static void build_page_bitmap(PageDesc *p)
1022{
1023 int n, tb_start, tb_end;
1024 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001025
Anthony Liguori7267c092011-08-20 22:09:37 -05001026 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001027
1028 tb = p->first_tb;
1029 while (tb != NULL) {
1030 n = (long)tb & 3;
1031 tb = (TranslationBlock *)((long)tb & ~3);
1032 /* NOTE: this is subtle as a TB may span two physical pages */
1033 if (n == 0) {
1034 /* NOTE: tb_end may be after the end of the page, but
1035 it is not a problem */
1036 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1037 tb_end = tb_start + tb->size;
1038 if (tb_end > TARGET_PAGE_SIZE)
1039 tb_end = TARGET_PAGE_SIZE;
1040 } else {
1041 tb_start = 0;
1042 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1043 }
1044 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1045 tb = tb->page_next[n];
1046 }
1047}
1048
pbrook2e70f6e2008-06-29 01:03:05 +00001049TranslationBlock *tb_gen_code(CPUState *env,
1050 target_ulong pc, target_ulong cs_base,
1051 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001052{
1053 TranslationBlock *tb;
1054 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001055 tb_page_addr_t phys_pc, phys_page2;
1056 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001057 int code_gen_size;
1058
Paul Brook41c1b1c2010-03-12 16:54:58 +00001059 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001060 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001061 if (!tb) {
1062 /* flush must be done */
1063 tb_flush(env);
1064 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001065 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001066 /* Don't forget to invalidate previous TB info. */
1067 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001068 }
1069 tc_ptr = code_gen_ptr;
1070 tb->tc_ptr = tc_ptr;
1071 tb->cs_base = cs_base;
1072 tb->flags = flags;
1073 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001074 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001075 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001076
bellardd720b932004-04-25 17:57:43 +00001077 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001078 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001079 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001080 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001081 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001082 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001083 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001084 return tb;
bellardd720b932004-04-25 17:57:43 +00001085}
ths3b46e622007-09-17 08:09:54 +00001086
bellard9fa3e852004-01-04 18:06:42 +00001087/* invalidate all TBs which intersect with the target physical page
1088 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001089 the same physical page. 'is_cpu_write_access' should be true if called
1090 from a real cpu write access: the virtual CPU will exit the current
1091 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001092void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001093 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001094{
aliguori6b917542008-11-18 19:46:41 +00001095 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001096 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001097 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001098 PageDesc *p;
1099 int n;
1100#ifdef TARGET_HAS_PRECISE_SMC
1101 int current_tb_not_found = is_cpu_write_access;
1102 TranslationBlock *current_tb = NULL;
1103 int current_tb_modified = 0;
1104 target_ulong current_pc = 0;
1105 target_ulong current_cs_base = 0;
1106 int current_flags = 0;
1107#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001108
1109 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001110 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001111 return;
ths5fafdf22007-09-16 21:08:06 +00001112 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001113 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1114 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001115 /* build code bitmap */
1116 build_page_bitmap(p);
1117 }
1118
1119 /* we remove all the TBs in the range [start, end[ */
1120 /* XXX: see if in some cases it could be faster to invalidate all the code */
1121 tb = p->first_tb;
1122 while (tb != NULL) {
1123 n = (long)tb & 3;
1124 tb = (TranslationBlock *)((long)tb & ~3);
1125 tb_next = tb->page_next[n];
1126 /* NOTE: this is subtle as a TB may span two physical pages */
1127 if (n == 0) {
1128 /* NOTE: tb_end may be after the end of the page, but
1129 it is not a problem */
1130 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1131 tb_end = tb_start + tb->size;
1132 } else {
1133 tb_start = tb->page_addr[1];
1134 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1135 }
1136 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001137#ifdef TARGET_HAS_PRECISE_SMC
1138 if (current_tb_not_found) {
1139 current_tb_not_found = 0;
1140 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001141 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001142 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001143 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001144 }
1145 }
1146 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001147 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001148 /* If we are modifying the current TB, we must stop
1149 its execution. We could be more precise by checking
1150 that the modification is after the current PC, but it
1151 would require a specialized function to partially
1152 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001153
bellardd720b932004-04-25 17:57:43 +00001154 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001155 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001156 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1157 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001158 }
1159#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001160 /* we need to do that to handle the case where a signal
1161 occurs while doing tb_phys_invalidate() */
1162 saved_tb = NULL;
1163 if (env) {
1164 saved_tb = env->current_tb;
1165 env->current_tb = NULL;
1166 }
bellard9fa3e852004-01-04 18:06:42 +00001167 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001168 if (env) {
1169 env->current_tb = saved_tb;
1170 if (env->interrupt_request && env->current_tb)
1171 cpu_interrupt(env, env->interrupt_request);
1172 }
bellard9fa3e852004-01-04 18:06:42 +00001173 }
1174 tb = tb_next;
1175 }
1176#if !defined(CONFIG_USER_ONLY)
1177 /* if no code remaining, no need to continue to use slow writes */
1178 if (!p->first_tb) {
1179 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001180 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001181 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001182 }
1183 }
1184#endif
1185#ifdef TARGET_HAS_PRECISE_SMC
1186 if (current_tb_modified) {
1187 /* we generate a block containing just the instruction
1188 modifying the memory. It will ensure that it cannot modify
1189 itself */
bellardea1c1802004-06-14 18:56:36 +00001190 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001191 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001192 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001193 }
1194#endif
1195}
1196
1197/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001198static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001199{
1200 PageDesc *p;
1201 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001202#if 0
bellarda4193c82004-06-03 14:01:43 +00001203 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001204 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1205 cpu_single_env->mem_io_vaddr, len,
1206 cpu_single_env->eip,
1207 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001208 }
1209#endif
bellard9fa3e852004-01-04 18:06:42 +00001210 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001211 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001212 return;
1213 if (p->code_bitmap) {
1214 offset = start & ~TARGET_PAGE_MASK;
1215 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1216 if (b & ((1 << len) - 1))
1217 goto do_invalidate;
1218 } else {
1219 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001220 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001221 }
1222}
1223
bellard9fa3e852004-01-04 18:06:42 +00001224#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001225static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001226 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001227{
aliguori6b917542008-11-18 19:46:41 +00001228 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001229 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001230 int n;
bellardd720b932004-04-25 17:57:43 +00001231#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001232 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001233 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001234 int current_tb_modified = 0;
1235 target_ulong current_pc = 0;
1236 target_ulong current_cs_base = 0;
1237 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001238#endif
bellard9fa3e852004-01-04 18:06:42 +00001239
1240 addr &= TARGET_PAGE_MASK;
1241 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001242 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001243 return;
1244 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001245#ifdef TARGET_HAS_PRECISE_SMC
1246 if (tb && pc != 0) {
1247 current_tb = tb_find_pc(pc);
1248 }
1249#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001250 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001251 n = (long)tb & 3;
1252 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001253#ifdef TARGET_HAS_PRECISE_SMC
1254 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001255 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001256 /* If we are modifying the current TB, we must stop
1257 its execution. We could be more precise by checking
1258 that the modification is after the current PC, but it
1259 would require a specialized function to partially
1260 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001261
bellardd720b932004-04-25 17:57:43 +00001262 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001263 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001264 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1265 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001266 }
1267#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001268 tb_phys_invalidate(tb, addr);
1269 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001270 }
1271 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001272#ifdef TARGET_HAS_PRECISE_SMC
1273 if (current_tb_modified) {
1274 /* we generate a block containing just the instruction
1275 modifying the memory. It will ensure that it cannot modify
1276 itself */
bellardea1c1802004-06-14 18:56:36 +00001277 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001278 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001279 cpu_resume_from_signal(env, puc);
1280 }
1281#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001282}
bellard9fa3e852004-01-04 18:06:42 +00001283#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001284
1285/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001286static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001287 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001288{
1289 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001290#ifndef CONFIG_USER_ONLY
1291 bool page_already_protected;
1292#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001293
bellard9fa3e852004-01-04 18:06:42 +00001294 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001295 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001296 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001297#ifndef CONFIG_USER_ONLY
1298 page_already_protected = p->first_tb != NULL;
1299#endif
bellard9fa3e852004-01-04 18:06:42 +00001300 p->first_tb = (TranslationBlock *)((long)tb | n);
1301 invalidate_page_bitmap(p);
1302
bellard107db442004-06-22 18:48:46 +00001303#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001304
bellard9fa3e852004-01-04 18:06:42 +00001305#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001306 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001307 target_ulong addr;
1308 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001309 int prot;
1310
bellardfd6ce8f2003-05-14 19:00:11 +00001311 /* force the host page as non writable (writes will have a
1312 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001313 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001314 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001315 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1316 addr += TARGET_PAGE_SIZE) {
1317
1318 p2 = page_find (addr >> TARGET_PAGE_BITS);
1319 if (!p2)
1320 continue;
1321 prot |= p2->flags;
1322 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001323 }
ths5fafdf22007-09-16 21:08:06 +00001324 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001325 (prot & PAGE_BITS) & ~PAGE_WRITE);
1326#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001327 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001328 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001329#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001330 }
bellard9fa3e852004-01-04 18:06:42 +00001331#else
1332 /* if some code is already present, then the pages are already
1333 protected. So we handle the case where only the first TB is
1334 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001335 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001336 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001337 }
1338#endif
bellardd720b932004-04-25 17:57:43 +00001339
1340#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001341}
1342
bellard9fa3e852004-01-04 18:06:42 +00001343/* add a new TB and link it to the physical page tables. phys_page2 is
1344 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001345void tb_link_page(TranslationBlock *tb,
1346 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001347{
bellard9fa3e852004-01-04 18:06:42 +00001348 unsigned int h;
1349 TranslationBlock **ptb;
1350
pbrookc8a706f2008-06-02 16:16:42 +00001351 /* Grab the mmap lock to stop another thread invalidating this TB
1352 before we are done. */
1353 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001354 /* add in the physical hash table */
1355 h = tb_phys_hash_func(phys_pc);
1356 ptb = &tb_phys_hash[h];
1357 tb->phys_hash_next = *ptb;
1358 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001359
1360 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001361 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1362 if (phys_page2 != -1)
1363 tb_alloc_page(tb, 1, phys_page2);
1364 else
1365 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001366
bellardd4e81642003-05-25 16:46:15 +00001367 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1368 tb->jmp_next[0] = NULL;
1369 tb->jmp_next[1] = NULL;
1370
1371 /* init original jump addresses */
1372 if (tb->tb_next_offset[0] != 0xffff)
1373 tb_reset_jump(tb, 0);
1374 if (tb->tb_next_offset[1] != 0xffff)
1375 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001376
1377#ifdef DEBUG_TB_CHECK
1378 tb_page_check();
1379#endif
pbrookc8a706f2008-06-02 16:16:42 +00001380 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001381}
1382
bellarda513fe12003-05-27 23:29:48 +00001383/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1384 tb[1].tc_ptr. Return NULL if not found */
1385TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1386{
1387 int m_min, m_max, m;
1388 unsigned long v;
1389 TranslationBlock *tb;
1390
1391 if (nb_tbs <= 0)
1392 return NULL;
1393 if (tc_ptr < (unsigned long)code_gen_buffer ||
1394 tc_ptr >= (unsigned long)code_gen_ptr)
1395 return NULL;
1396 /* binary search (cf Knuth) */
1397 m_min = 0;
1398 m_max = nb_tbs - 1;
1399 while (m_min <= m_max) {
1400 m = (m_min + m_max) >> 1;
1401 tb = &tbs[m];
1402 v = (unsigned long)tb->tc_ptr;
1403 if (v == tc_ptr)
1404 return tb;
1405 else if (tc_ptr < v) {
1406 m_max = m - 1;
1407 } else {
1408 m_min = m + 1;
1409 }
ths5fafdf22007-09-16 21:08:06 +00001410 }
bellarda513fe12003-05-27 23:29:48 +00001411 return &tbs[m_max];
1412}
bellard75012672003-06-21 13:11:07 +00001413
bellardea041c02003-06-25 16:16:50 +00001414static void tb_reset_jump_recursive(TranslationBlock *tb);
1415
1416static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1417{
1418 TranslationBlock *tb1, *tb_next, **ptb;
1419 unsigned int n1;
1420
1421 tb1 = tb->jmp_next[n];
1422 if (tb1 != NULL) {
1423 /* find head of list */
1424 for(;;) {
1425 n1 = (long)tb1 & 3;
1426 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1427 if (n1 == 2)
1428 break;
1429 tb1 = tb1->jmp_next[n1];
1430 }
1431 /* we are now sure now that tb jumps to tb1 */
1432 tb_next = tb1;
1433
1434 /* remove tb from the jmp_first list */
1435 ptb = &tb_next->jmp_first;
1436 for(;;) {
1437 tb1 = *ptb;
1438 n1 = (long)tb1 & 3;
1439 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1440 if (n1 == n && tb1 == tb)
1441 break;
1442 ptb = &tb1->jmp_next[n1];
1443 }
1444 *ptb = tb->jmp_next[n];
1445 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001446
bellardea041c02003-06-25 16:16:50 +00001447 /* suppress the jump to next tb in generated code */
1448 tb_reset_jump(tb, n);
1449
bellard01243112004-01-04 15:48:17 +00001450 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001451 tb_reset_jump_recursive(tb_next);
1452 }
1453}
1454
1455static void tb_reset_jump_recursive(TranslationBlock *tb)
1456{
1457 tb_reset_jump_recursive2(tb, 0);
1458 tb_reset_jump_recursive2(tb, 1);
1459}
1460
bellard1fddef42005-04-17 19:16:13 +00001461#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001462#if defined(CONFIG_USER_ONLY)
1463static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1464{
1465 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1466}
1467#else
bellardd720b932004-04-25 17:57:43 +00001468static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1469{
Anthony Liguoric227f092009-10-01 16:12:16 -05001470 target_phys_addr_t addr;
Anthony Liguoric227f092009-10-01 16:12:16 -05001471 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001472 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001473
pbrookc2f07f82006-04-08 17:14:56 +00001474 addr = cpu_get_phys_page_debug(env, pc);
Avi Kivity06ef3522012-02-13 16:11:22 +02001475 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001476 if (!(memory_region_is_ram(section->mr)
1477 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001478 return;
1479 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001480 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1481 + section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001482 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001483}
bellardc27004e2005-01-03 23:35:10 +00001484#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001485#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001486
Paul Brookc527ee82010-03-01 03:31:14 +00001487#if defined(CONFIG_USER_ONLY)
1488void cpu_watchpoint_remove_all(CPUState *env, int mask)
1489
1490{
1491}
1492
1493int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1494 int flags, CPUWatchpoint **watchpoint)
1495{
1496 return -ENOSYS;
1497}
1498#else
pbrook6658ffb2007-03-16 23:58:11 +00001499/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001500int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1501 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001502{
aliguorib4051332008-11-18 20:14:20 +00001503 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001504 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001505
aliguorib4051332008-11-18 20:14:20 +00001506 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001507 if ((len & (len - 1)) || (addr & ~len_mask) ||
1508 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001509 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1510 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1511 return -EINVAL;
1512 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001513 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001514
aliguoria1d1bb32008-11-18 20:07:32 +00001515 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001516 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001517 wp->flags = flags;
1518
aliguori2dc9f412008-11-18 20:56:59 +00001519 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001520 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001521 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001522 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001523 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001524
pbrook6658ffb2007-03-16 23:58:11 +00001525 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001526
1527 if (watchpoint)
1528 *watchpoint = wp;
1529 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001530}
1531
aliguoria1d1bb32008-11-18 20:07:32 +00001532/* Remove a specific watchpoint. */
1533int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1534 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001535{
aliguorib4051332008-11-18 20:14:20 +00001536 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001537 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001538
Blue Swirl72cf2d42009-09-12 07:36:22 +00001539 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001540 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001541 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001542 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001543 return 0;
1544 }
1545 }
aliguoria1d1bb32008-11-18 20:07:32 +00001546 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001547}
1548
aliguoria1d1bb32008-11-18 20:07:32 +00001549/* Remove a specific watchpoint by reference. */
1550void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1551{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001552 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001553
aliguoria1d1bb32008-11-18 20:07:32 +00001554 tlb_flush_page(env, watchpoint->vaddr);
1555
Anthony Liguori7267c092011-08-20 22:09:37 -05001556 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001557}
1558
aliguoria1d1bb32008-11-18 20:07:32 +00001559/* Remove all matching watchpoints. */
1560void cpu_watchpoint_remove_all(CPUState *env, int mask)
1561{
aliguoric0ce9982008-11-25 22:13:57 +00001562 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001563
Blue Swirl72cf2d42009-09-12 07:36:22 +00001564 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001565 if (wp->flags & mask)
1566 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001567 }
aliguoria1d1bb32008-11-18 20:07:32 +00001568}
Paul Brookc527ee82010-03-01 03:31:14 +00001569#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001570
1571/* Add a breakpoint. */
1572int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1573 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001574{
bellard1fddef42005-04-17 19:16:13 +00001575#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001576 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001577
Anthony Liguori7267c092011-08-20 22:09:37 -05001578 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001579
1580 bp->pc = pc;
1581 bp->flags = flags;
1582
aliguori2dc9f412008-11-18 20:56:59 +00001583 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001584 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001585 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001586 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001587 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001588
1589 breakpoint_invalidate(env, pc);
1590
1591 if (breakpoint)
1592 *breakpoint = bp;
1593 return 0;
1594#else
1595 return -ENOSYS;
1596#endif
1597}
1598
1599/* Remove a specific breakpoint. */
1600int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1601{
1602#if defined(TARGET_HAS_ICE)
1603 CPUBreakpoint *bp;
1604
Blue Swirl72cf2d42009-09-12 07:36:22 +00001605 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001606 if (bp->pc == pc && bp->flags == flags) {
1607 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001608 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001609 }
bellard4c3a88a2003-07-26 12:06:08 +00001610 }
aliguoria1d1bb32008-11-18 20:07:32 +00001611 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001612#else
aliguoria1d1bb32008-11-18 20:07:32 +00001613 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001614#endif
1615}
1616
aliguoria1d1bb32008-11-18 20:07:32 +00001617/* Remove a specific breakpoint by reference. */
1618void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001619{
bellard1fddef42005-04-17 19:16:13 +00001620#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001621 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001622
aliguoria1d1bb32008-11-18 20:07:32 +00001623 breakpoint_invalidate(env, breakpoint->pc);
1624
Anthony Liguori7267c092011-08-20 22:09:37 -05001625 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001626#endif
1627}
1628
1629/* Remove all matching breakpoints. */
1630void cpu_breakpoint_remove_all(CPUState *env, int mask)
1631{
1632#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001633 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001634
Blue Swirl72cf2d42009-09-12 07:36:22 +00001635 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001636 if (bp->flags & mask)
1637 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001638 }
bellard4c3a88a2003-07-26 12:06:08 +00001639#endif
1640}
1641
bellardc33a3462003-07-29 20:50:33 +00001642/* enable or disable single step mode. EXCP_DEBUG is returned by the
1643 CPU loop after each instruction */
1644void cpu_single_step(CPUState *env, int enabled)
1645{
bellard1fddef42005-04-17 19:16:13 +00001646#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001647 if (env->singlestep_enabled != enabled) {
1648 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001649 if (kvm_enabled())
1650 kvm_update_guest_debug(env, 0);
1651 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001652 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001653 /* XXX: only flush what is necessary */
1654 tb_flush(env);
1655 }
bellardc33a3462003-07-29 20:50:33 +00001656 }
1657#endif
1658}
1659
bellard34865132003-10-05 14:28:56 +00001660/* enable or disable low levels log */
1661void cpu_set_log(int log_flags)
1662{
1663 loglevel = log_flags;
1664 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001665 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001666 if (!logfile) {
1667 perror(logfilename);
1668 _exit(1);
1669 }
bellard9fa3e852004-01-04 18:06:42 +00001670#if !defined(CONFIG_SOFTMMU)
1671 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1672 {
blueswir1b55266b2008-09-20 08:07:15 +00001673 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001674 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1675 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001676#elif defined(_WIN32)
1677 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1678 setvbuf(logfile, NULL, _IONBF, 0);
1679#else
bellard34865132003-10-05 14:28:56 +00001680 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001681#endif
pbrooke735b912007-06-30 13:53:24 +00001682 log_append = 1;
1683 }
1684 if (!loglevel && logfile) {
1685 fclose(logfile);
1686 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001687 }
1688}
1689
1690void cpu_set_log_filename(const char *filename)
1691{
1692 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001693 if (logfile) {
1694 fclose(logfile);
1695 logfile = NULL;
1696 }
1697 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001698}
bellardc33a3462003-07-29 20:50:33 +00001699
aurel323098dba2009-03-07 21:28:24 +00001700static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001701{
pbrookd5975362008-06-07 20:50:51 +00001702 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1703 problem and hope the cpu will stop of its own accord. For userspace
1704 emulation this often isn't actually as bad as it sounds. Often
1705 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001706 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001707 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001708
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001709 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001710 tb = env->current_tb;
1711 /* if the cpu is currently executing code, we must unlink it and
1712 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001713 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001714 env->current_tb = NULL;
1715 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001716 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001717 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001718}
1719
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001720#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001721/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001722static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001723{
1724 int old_mask;
1725
1726 old_mask = env->interrupt_request;
1727 env->interrupt_request |= mask;
1728
aliguori8edac962009-04-24 18:03:45 +00001729 /*
1730 * If called from iothread context, wake the target cpu in
1731 * case its halted.
1732 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001733 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001734 qemu_cpu_kick(env);
1735 return;
1736 }
aliguori8edac962009-04-24 18:03:45 +00001737
pbrook2e70f6e2008-06-29 01:03:05 +00001738 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001739 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001740 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001741 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001742 cpu_abort(env, "Raised interrupt while not in I/O function");
1743 }
pbrook2e70f6e2008-06-29 01:03:05 +00001744 } else {
aurel323098dba2009-03-07 21:28:24 +00001745 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001746 }
1747}
1748
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001749CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1750
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001751#else /* CONFIG_USER_ONLY */
1752
1753void cpu_interrupt(CPUState *env, int mask)
1754{
1755 env->interrupt_request |= mask;
1756 cpu_unlink_tb(env);
1757}
1758#endif /* CONFIG_USER_ONLY */
1759
bellardb54ad042004-05-20 13:42:52 +00001760void cpu_reset_interrupt(CPUState *env, int mask)
1761{
1762 env->interrupt_request &= ~mask;
1763}
1764
aurel323098dba2009-03-07 21:28:24 +00001765void cpu_exit(CPUState *env)
1766{
1767 env->exit_request = 1;
1768 cpu_unlink_tb(env);
1769}
1770
blueswir1c7cd6a32008-10-02 18:27:46 +00001771const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001772 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001773 "show generated host assembly code for each compiled TB" },
1774 { CPU_LOG_TB_IN_ASM, "in_asm",
1775 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001776 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001777 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001778 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001779 "show micro ops "
1780#ifdef TARGET_I386
1781 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001782#endif
blueswir1e01a1152008-03-14 17:37:11 +00001783 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001784 { CPU_LOG_INT, "int",
1785 "show interrupts/exceptions in short format" },
1786 { CPU_LOG_EXEC, "exec",
1787 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001788 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001789 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001790#ifdef TARGET_I386
1791 { CPU_LOG_PCALL, "pcall",
1792 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001793 { CPU_LOG_RESET, "cpu_reset",
1794 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001795#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001796#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001797 { CPU_LOG_IOPORT, "ioport",
1798 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001799#endif
bellardf193c792004-03-21 17:06:25 +00001800 { 0, NULL, NULL },
1801};
1802
1803static int cmp1(const char *s1, int n, const char *s2)
1804{
1805 if (strlen(s2) != n)
1806 return 0;
1807 return memcmp(s1, s2, n) == 0;
1808}
ths3b46e622007-09-17 08:09:54 +00001809
bellardf193c792004-03-21 17:06:25 +00001810/* takes a comma separated list of log masks. Return 0 if error. */
1811int cpu_str_to_log_mask(const char *str)
1812{
blueswir1c7cd6a32008-10-02 18:27:46 +00001813 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001814 int mask;
1815 const char *p, *p1;
1816
1817 p = str;
1818 mask = 0;
1819 for(;;) {
1820 p1 = strchr(p, ',');
1821 if (!p1)
1822 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001823 if(cmp1(p,p1-p,"all")) {
1824 for(item = cpu_log_items; item->mask != 0; item++) {
1825 mask |= item->mask;
1826 }
1827 } else {
1828 for(item = cpu_log_items; item->mask != 0; item++) {
1829 if (cmp1(p, p1 - p, item->name))
1830 goto found;
1831 }
1832 return 0;
bellardf193c792004-03-21 17:06:25 +00001833 }
bellardf193c792004-03-21 17:06:25 +00001834 found:
1835 mask |= item->mask;
1836 if (*p1 != ',')
1837 break;
1838 p = p1 + 1;
1839 }
1840 return mask;
1841}
bellardea041c02003-06-25 16:16:50 +00001842
bellard75012672003-06-21 13:11:07 +00001843void cpu_abort(CPUState *env, const char *fmt, ...)
1844{
1845 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001846 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001847
1848 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001849 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001850 fprintf(stderr, "qemu: fatal: ");
1851 vfprintf(stderr, fmt, ap);
1852 fprintf(stderr, "\n");
1853#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001854 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1855#else
1856 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001857#endif
aliguori93fcfe32009-01-15 22:34:14 +00001858 if (qemu_log_enabled()) {
1859 qemu_log("qemu: fatal: ");
1860 qemu_log_vprintf(fmt, ap2);
1861 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001862#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001863 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001864#else
aliguori93fcfe32009-01-15 22:34:14 +00001865 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001866#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001867 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001868 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001869 }
pbrook493ae1f2007-11-23 16:53:59 +00001870 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001871 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001872#if defined(CONFIG_USER_ONLY)
1873 {
1874 struct sigaction act;
1875 sigfillset(&act.sa_mask);
1876 act.sa_handler = SIG_DFL;
1877 sigaction(SIGABRT, &act, NULL);
1878 }
1879#endif
bellard75012672003-06-21 13:11:07 +00001880 abort();
1881}
1882
thsc5be9f02007-02-28 20:20:53 +00001883CPUState *cpu_copy(CPUState *env)
1884{
ths01ba9812007-12-09 02:22:57 +00001885 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001886 CPUState *next_cpu = new_env->next_cpu;
1887 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001888#if defined(TARGET_HAS_ICE)
1889 CPUBreakpoint *bp;
1890 CPUWatchpoint *wp;
1891#endif
1892
thsc5be9f02007-02-28 20:20:53 +00001893 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001894
1895 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001896 new_env->next_cpu = next_cpu;
1897 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001898
1899 /* Clone all break/watchpoints.
1900 Note: Once we support ptrace with hw-debug register access, make sure
1901 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001902 QTAILQ_INIT(&env->breakpoints);
1903 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001904#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001905 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001906 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1907 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001908 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001909 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1910 wp->flags, NULL);
1911 }
1912#endif
1913
thsc5be9f02007-02-28 20:20:53 +00001914 return new_env;
1915}
1916
bellard01243112004-01-04 15:48:17 +00001917#if !defined(CONFIG_USER_ONLY)
1918
edgar_igl5c751e92008-05-06 08:44:21 +00001919static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1920{
1921 unsigned int i;
1922
1923 /* Discard jump cache entries for any tb which might potentially
1924 overlap the flushed page. */
1925 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1926 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001927 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001928
1929 i = tb_jmp_cache_hash_page(addr);
1930 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001931 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001932}
1933
Igor Kovalenko08738982009-07-12 02:15:40 +04001934static CPUTLBEntry s_cputlb_empty_entry = {
1935 .addr_read = -1,
1936 .addr_write = -1,
1937 .addr_code = -1,
1938 .addend = -1,
1939};
1940
Peter Maydell771124e2012-01-17 13:23:13 +00001941/* NOTE:
1942 * If flush_global is true (the usual case), flush all tlb entries.
1943 * If flush_global is false, flush (at least) all tlb entries not
1944 * marked global.
1945 *
1946 * Since QEMU doesn't currently implement a global/not-global flag
1947 * for tlb entries, at the moment tlb_flush() will also flush all
1948 * tlb entries in the flush_global == false case. This is OK because
1949 * CPU architectures generally permit an implementation to drop
1950 * entries from the TLB at any time, so flushing more entries than
1951 * required is only an efficiency issue, not a correctness issue.
1952 */
bellardee8b7022004-02-03 23:35:10 +00001953void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001954{
bellard33417e72003-08-10 21:47:01 +00001955 int i;
bellard01243112004-01-04 15:48:17 +00001956
bellard9fa3e852004-01-04 18:06:42 +00001957#if defined(DEBUG_TLB)
1958 printf("tlb_flush:\n");
1959#endif
bellard01243112004-01-04 15:48:17 +00001960 /* must reset current TB so that interrupts cannot modify the
1961 links while we are modifying them */
1962 env->current_tb = NULL;
1963
bellard33417e72003-08-10 21:47:01 +00001964 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001965 int mmu_idx;
1966 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001967 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001968 }
bellard33417e72003-08-10 21:47:01 +00001969 }
bellard9fa3e852004-01-04 18:06:42 +00001970
bellard8a40a182005-11-20 10:35:40 +00001971 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001972
Paul Brookd4c430a2010-03-17 02:14:28 +00001973 env->tlb_flush_addr = -1;
1974 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001975 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001976}
1977
bellard274da6b2004-05-20 21:56:27 +00001978static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001979{
ths5fafdf22007-09-16 21:08:06 +00001980 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001981 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001982 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001983 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001984 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001985 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001986 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001987 }
bellard61382a52003-10-27 21:22:23 +00001988}
1989
bellard2e126692004-04-25 21:28:44 +00001990void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001991{
bellard8a40a182005-11-20 10:35:40 +00001992 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001993 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001994
bellard9fa3e852004-01-04 18:06:42 +00001995#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001996 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001997#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001998 /* Check if we need to flush due to large pages. */
1999 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2000#if defined(DEBUG_TLB)
2001 printf("tlb_flush_page: forced full flush ("
2002 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2003 env->tlb_flush_addr, env->tlb_flush_mask);
2004#endif
2005 tlb_flush(env, 1);
2006 return;
2007 }
bellard01243112004-01-04 15:48:17 +00002008 /* must reset current TB so that interrupts cannot modify the
2009 links while we are modifying them */
2010 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002011
bellard61382a52003-10-27 21:22:23 +00002012 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002013 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002014 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2015 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002016
edgar_igl5c751e92008-05-06 08:44:21 +00002017 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002018}
2019
bellard9fa3e852004-01-04 18:06:42 +00002020/* update the TLBs so that writes to code in the virtual page 'addr'
2021 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002022static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002023{
ths5fafdf22007-09-16 21:08:06 +00002024 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002025 ram_addr + TARGET_PAGE_SIZE,
2026 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002027}
2028
bellard9fa3e852004-01-04 18:06:42 +00002029/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002030 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002031static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002032 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002033{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002034 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002035}
2036
ths5fafdf22007-09-16 21:08:06 +00002037static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002038 unsigned long start, unsigned long length)
2039{
2040 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002041 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00002042 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002043 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002044 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002045 }
2046 }
2047}
2048
pbrook5579c7f2009-04-11 14:47:08 +00002049/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002050void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002051 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002052{
2053 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002054 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002055 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002056
2057 start &= TARGET_PAGE_MASK;
2058 end = TARGET_PAGE_ALIGN(end);
2059
2060 length = end - start;
2061 if (length == 0)
2062 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002063 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002064
bellard1ccde1c2004-02-06 19:46:14 +00002065 /* we modify the TLB cache so that the dirty bit will be set again
2066 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002067 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002068 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002069 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002070 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002071 != (end - 1) - start) {
2072 abort();
2073 }
2074
bellard6a00d602005-11-21 23:25:50 +00002075 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002076 int mmu_idx;
2077 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2078 for(i = 0; i < CPU_TLB_SIZE; i++)
2079 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2080 start1, length);
2081 }
bellard6a00d602005-11-21 23:25:50 +00002082 }
bellard1ccde1c2004-02-06 19:46:14 +00002083}
2084
aliguori74576192008-10-06 14:02:03 +00002085int cpu_physical_memory_set_dirty_tracking(int enable)
2086{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002087 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002088 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002089 return ret;
aliguori74576192008-10-06 14:02:03 +00002090}
2091
bellard3a7d9292005-08-21 09:26:42 +00002092static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2093{
Anthony Liguoric227f092009-10-01 16:12:16 -05002094 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002095 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002096
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002097 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002098 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2099 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002100 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002101 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002102 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002103 }
2104 }
2105}
2106
2107/* update the TLB according to the current state of the dirty bits */
2108void cpu_tlb_update_dirty(CPUState *env)
2109{
2110 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002111 int mmu_idx;
2112 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2113 for(i = 0; i < CPU_TLB_SIZE; i++)
2114 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2115 }
bellard3a7d9292005-08-21 09:26:42 +00002116}
2117
pbrook0f459d12008-06-09 00:20:13 +00002118static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002119{
pbrook0f459d12008-06-09 00:20:13 +00002120 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2121 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002122}
2123
pbrook0f459d12008-06-09 00:20:13 +00002124/* update the TLB corresponding to virtual page vaddr
2125 so that it is no longer dirty */
2126static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002127{
bellard1ccde1c2004-02-06 19:46:14 +00002128 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002129 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002130
pbrook0f459d12008-06-09 00:20:13 +00002131 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002132 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002133 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2134 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002135}
2136
Paul Brookd4c430a2010-03-17 02:14:28 +00002137/* Our TLB does not support large pages, so remember the area covered by
2138 large pages and trigger a full TLB flush if these are invalidated. */
2139static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2140 target_ulong size)
2141{
2142 target_ulong mask = ~(size - 1);
2143
2144 if (env->tlb_flush_addr == (target_ulong)-1) {
2145 env->tlb_flush_addr = vaddr & mask;
2146 env->tlb_flush_mask = mask;
2147 return;
2148 }
2149 /* Extend the existing region to include the new page.
2150 This is a compromise between unnecessary flushes and the cost
2151 of maintaining a full variable size TLB. */
2152 mask &= env->tlb_flush_mask;
2153 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2154 mask <<= 1;
2155 }
2156 env->tlb_flush_addr &= mask;
2157 env->tlb_flush_mask = mask;
2158}
2159
Avi Kivity06ef3522012-02-13 16:11:22 +02002160static bool is_ram_rom(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002161{
Avi Kivity06ef3522012-02-13 16:11:22 +02002162 return memory_region_is_ram(s->mr);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002163}
2164
Avi Kivity06ef3522012-02-13 16:11:22 +02002165static bool is_romd(MemoryRegionSection *s)
Avi Kivity75c578d2012-01-02 15:40:52 +02002166{
Avi Kivity06ef3522012-02-13 16:11:22 +02002167 MemoryRegion *mr = s->mr;
Avi Kivity75c578d2012-01-02 15:40:52 +02002168
Avi Kivity75c578d2012-01-02 15:40:52 +02002169 return mr->rom_device && mr->readable;
2170}
2171
Avi Kivity06ef3522012-02-13 16:11:22 +02002172static bool is_ram_rom_romd(MemoryRegionSection *s)
Avi Kivity1d393fa2012-01-01 21:15:42 +02002173{
Avi Kivity06ef3522012-02-13 16:11:22 +02002174 return is_ram_rom(s) || is_romd(s);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002175}
2176
Paul Brookd4c430a2010-03-17 02:14:28 +00002177/* Add a new TLB entry. At most one entry for a given virtual address
2178 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2179 supplied size is only used by tlb_flush_page. */
2180void tlb_set_page(CPUState *env, target_ulong vaddr,
2181 target_phys_addr_t paddr, int prot,
2182 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002183{
Avi Kivityf3705d52012-03-08 16:16:34 +02002184 MemoryRegionSection *section;
bellard9fa3e852004-01-04 18:06:42 +00002185 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002186 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002187 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002188 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002189 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002190 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002191 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002192
Paul Brookd4c430a2010-03-17 02:14:28 +00002193 assert(size >= TARGET_PAGE_SIZE);
2194 if (size != TARGET_PAGE_SIZE) {
2195 tlb_add_large_page(env, vaddr, size);
2196 }
Avi Kivity06ef3522012-02-13 16:11:22 +02002197 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002198#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002199 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2200 " prot=%x idx=%d pd=0x%08lx\n",
2201 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002202#endif
2203
pbrook0f459d12008-06-09 00:20:13 +00002204 address = vaddr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002205 if (!is_ram_rom_romd(section)) {
pbrook0f459d12008-06-09 00:20:13 +00002206 /* IO memory case (romd handled later) */
2207 address |= TLB_MMIO;
2208 }
Avi Kivityf3705d52012-03-08 16:16:34 +02002209 if (is_ram_rom_romd(section)) {
2210 addend = (unsigned long)memory_region_get_ram_ptr(section->mr)
2211 + section_addr(section, paddr);
Avi Kivity06ef3522012-02-13 16:11:22 +02002212 } else {
2213 addend = 0;
2214 }
Avi Kivityf3705d52012-03-08 16:16:34 +02002215 if (is_ram_rom(section)) {
pbrook0f459d12008-06-09 00:20:13 +00002216 /* Normal RAM. */
Avi Kivityf3705d52012-03-08 16:16:34 +02002217 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2218 + section_addr(section, paddr);
2219 if (!section->readonly)
Avi Kivityaa102232012-03-08 17:06:55 +02002220 iotlb |= phys_section_notdirty;
pbrook0f459d12008-06-09 00:20:13 +00002221 else
Avi Kivityaa102232012-03-08 17:06:55 +02002222 iotlb |= phys_section_rom;
pbrook0f459d12008-06-09 00:20:13 +00002223 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002224 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002225 It would be nice to pass an offset from the base address
2226 of that region. This would avoid having to special case RAM,
2227 and avoid full address decoding in every device.
2228 We can't use the high bits of pd for this because
2229 IO_MEM_ROMD uses these as a ram address. */
Avi Kivityaa102232012-03-08 17:06:55 +02002230 iotlb = section - phys_sections;
Avi Kivityf3705d52012-03-08 16:16:34 +02002231 iotlb += section_addr(section, paddr);
pbrook0f459d12008-06-09 00:20:13 +00002232 }
pbrook6658ffb2007-03-16 23:58:11 +00002233
pbrook0f459d12008-06-09 00:20:13 +00002234 code_address = address;
2235 /* Make accesses to pages with watchpoints go via the
2236 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002237 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002238 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002239 /* Avoid trapping reads of pages with a write breakpoint. */
2240 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivityaa102232012-03-08 17:06:55 +02002241 iotlb = phys_section_watch + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002242 address |= TLB_MMIO;
2243 break;
2244 }
pbrook6658ffb2007-03-16 23:58:11 +00002245 }
pbrook0f459d12008-06-09 00:20:13 +00002246 }
balrogd79acba2007-06-26 20:01:13 +00002247
pbrook0f459d12008-06-09 00:20:13 +00002248 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2249 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2250 te = &env->tlb_table[mmu_idx][index];
2251 te->addend = addend - vaddr;
2252 if (prot & PAGE_READ) {
2253 te->addr_read = address;
2254 } else {
2255 te->addr_read = -1;
2256 }
edgar_igl5c751e92008-05-06 08:44:21 +00002257
pbrook0f459d12008-06-09 00:20:13 +00002258 if (prot & PAGE_EXEC) {
2259 te->addr_code = code_address;
2260 } else {
2261 te->addr_code = -1;
2262 }
2263 if (prot & PAGE_WRITE) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002264 if ((memory_region_is_ram(section->mr) && section->readonly)
2265 || is_romd(section)) {
pbrook0f459d12008-06-09 00:20:13 +00002266 /* Write access calls the I/O callback. */
2267 te->addr_write = address | TLB_MMIO;
Avi Kivityf3705d52012-03-08 16:16:34 +02002268 } else if (memory_region_is_ram(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002269 && !cpu_physical_memory_is_dirty(
Avi Kivityf3705d52012-03-08 16:16:34 +02002270 section->mr->ram_addr
2271 + section_addr(section, paddr))) {
pbrook0f459d12008-06-09 00:20:13 +00002272 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002273 } else {
pbrook0f459d12008-06-09 00:20:13 +00002274 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002275 }
pbrook0f459d12008-06-09 00:20:13 +00002276 } else {
2277 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002278 }
bellard9fa3e852004-01-04 18:06:42 +00002279}
2280
bellard01243112004-01-04 15:48:17 +00002281#else
2282
bellardee8b7022004-02-03 23:35:10 +00002283void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002284{
2285}
2286
bellard2e126692004-04-25 21:28:44 +00002287void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002288{
2289}
2290
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002291/*
2292 * Walks guest process memory "regions" one by one
2293 * and calls callback function 'fn' for each region.
2294 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002295
2296struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002297{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002298 walk_memory_regions_fn fn;
2299 void *priv;
2300 unsigned long start;
2301 int prot;
2302};
bellard9fa3e852004-01-04 18:06:42 +00002303
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002304static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002305 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002306{
2307 if (data->start != -1ul) {
2308 int rc = data->fn(data->priv, data->start, end, data->prot);
2309 if (rc != 0) {
2310 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002311 }
bellard33417e72003-08-10 21:47:01 +00002312 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002313
2314 data->start = (new_prot ? end : -1ul);
2315 data->prot = new_prot;
2316
2317 return 0;
2318}
2319
2320static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002321 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002322{
Paul Brookb480d9b2010-03-12 23:23:29 +00002323 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002324 int i, rc;
2325
2326 if (*lp == NULL) {
2327 return walk_memory_regions_end(data, base, 0);
2328 }
2329
2330 if (level == 0) {
2331 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002332 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002333 int prot = pd[i].flags;
2334
2335 pa = base | (i << TARGET_PAGE_BITS);
2336 if (prot != data->prot) {
2337 rc = walk_memory_regions_end(data, pa, prot);
2338 if (rc != 0) {
2339 return rc;
2340 }
2341 }
2342 }
2343 } else {
2344 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002345 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002346 pa = base | ((abi_ulong)i <<
2347 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002348 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2349 if (rc != 0) {
2350 return rc;
2351 }
2352 }
2353 }
2354
2355 return 0;
2356}
2357
2358int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2359{
2360 struct walk_memory_regions_data data;
2361 unsigned long i;
2362
2363 data.fn = fn;
2364 data.priv = priv;
2365 data.start = -1ul;
2366 data.prot = 0;
2367
2368 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002369 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002370 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2371 if (rc != 0) {
2372 return rc;
2373 }
2374 }
2375
2376 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002377}
2378
Paul Brookb480d9b2010-03-12 23:23:29 +00002379static int dump_region(void *priv, abi_ulong start,
2380 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002381{
2382 FILE *f = (FILE *)priv;
2383
Paul Brookb480d9b2010-03-12 23:23:29 +00002384 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2385 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002386 start, end, end - start,
2387 ((prot & PAGE_READ) ? 'r' : '-'),
2388 ((prot & PAGE_WRITE) ? 'w' : '-'),
2389 ((prot & PAGE_EXEC) ? 'x' : '-'));
2390
2391 return (0);
2392}
2393
2394/* dump memory mappings */
2395void page_dump(FILE *f)
2396{
2397 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2398 "start", "end", "size", "prot");
2399 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002400}
2401
pbrook53a59602006-03-25 19:31:22 +00002402int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002403{
bellard9fa3e852004-01-04 18:06:42 +00002404 PageDesc *p;
2405
2406 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002407 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002408 return 0;
2409 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002410}
2411
Richard Henderson376a7902010-03-10 15:57:04 -08002412/* Modify the flags of a page and invalidate the code if necessary.
2413 The flag PAGE_WRITE_ORG is positioned automatically depending
2414 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002415void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002416{
Richard Henderson376a7902010-03-10 15:57:04 -08002417 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002418
Richard Henderson376a7902010-03-10 15:57:04 -08002419 /* This function should never be called with addresses outside the
2420 guest address space. If this assert fires, it probably indicates
2421 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002422#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2423 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002424#endif
2425 assert(start < end);
2426
bellard9fa3e852004-01-04 18:06:42 +00002427 start = start & TARGET_PAGE_MASK;
2428 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002429
2430 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002431 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002432 }
2433
2434 for (addr = start, len = end - start;
2435 len != 0;
2436 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2437 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2438
2439 /* If the write protection bit is set, then we invalidate
2440 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002441 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002442 (flags & PAGE_WRITE) &&
2443 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002444 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002445 }
2446 p->flags = flags;
2447 }
bellard9fa3e852004-01-04 18:06:42 +00002448}
2449
ths3d97b402007-11-02 19:02:07 +00002450int page_check_range(target_ulong start, target_ulong len, int flags)
2451{
2452 PageDesc *p;
2453 target_ulong end;
2454 target_ulong addr;
2455
Richard Henderson376a7902010-03-10 15:57:04 -08002456 /* This function should never be called with addresses outside the
2457 guest address space. If this assert fires, it probably indicates
2458 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002459#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2460 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002461#endif
2462
Richard Henderson3e0650a2010-03-29 10:54:42 -07002463 if (len == 0) {
2464 return 0;
2465 }
Richard Henderson376a7902010-03-10 15:57:04 -08002466 if (start + len - 1 < start) {
2467 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002468 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002469 }
balrog55f280c2008-10-28 10:24:11 +00002470
ths3d97b402007-11-02 19:02:07 +00002471 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2472 start = start & TARGET_PAGE_MASK;
2473
Richard Henderson376a7902010-03-10 15:57:04 -08002474 for (addr = start, len = end - start;
2475 len != 0;
2476 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002477 p = page_find(addr >> TARGET_PAGE_BITS);
2478 if( !p )
2479 return -1;
2480 if( !(p->flags & PAGE_VALID) )
2481 return -1;
2482
bellarddae32702007-11-14 10:51:00 +00002483 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002484 return -1;
bellarddae32702007-11-14 10:51:00 +00002485 if (flags & PAGE_WRITE) {
2486 if (!(p->flags & PAGE_WRITE_ORG))
2487 return -1;
2488 /* unprotect the page if it was put read-only because it
2489 contains translated code */
2490 if (!(p->flags & PAGE_WRITE)) {
2491 if (!page_unprotect(addr, 0, NULL))
2492 return -1;
2493 }
2494 return 0;
2495 }
ths3d97b402007-11-02 19:02:07 +00002496 }
2497 return 0;
2498}
2499
bellard9fa3e852004-01-04 18:06:42 +00002500/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002501 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002502int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002503{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002504 unsigned int prot;
2505 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002506 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002507
pbrookc8a706f2008-06-02 16:16:42 +00002508 /* Technically this isn't safe inside a signal handler. However we
2509 know this only ever happens in a synchronous SEGV handler, so in
2510 practice it seems to be ok. */
2511 mmap_lock();
2512
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002513 p = page_find(address >> TARGET_PAGE_BITS);
2514 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002515 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002516 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002517 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002518
bellard9fa3e852004-01-04 18:06:42 +00002519 /* if the page was really writable, then we change its
2520 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002521 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2522 host_start = address & qemu_host_page_mask;
2523 host_end = host_start + qemu_host_page_size;
2524
2525 prot = 0;
2526 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2527 p = page_find(addr >> TARGET_PAGE_BITS);
2528 p->flags |= PAGE_WRITE;
2529 prot |= p->flags;
2530
bellard9fa3e852004-01-04 18:06:42 +00002531 /* and since the content will be modified, we must invalidate
2532 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002533 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002534#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002535 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002536#endif
bellard9fa3e852004-01-04 18:06:42 +00002537 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002538 mprotect((void *)g2h(host_start), qemu_host_page_size,
2539 prot & PAGE_BITS);
2540
2541 mmap_unlock();
2542 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002543 }
pbrookc8a706f2008-06-02 16:16:42 +00002544 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002545 return 0;
2546}
2547
bellard6a00d602005-11-21 23:25:50 +00002548static inline void tlb_set_dirty(CPUState *env,
2549 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002550{
2551}
bellard9fa3e852004-01-04 18:06:42 +00002552#endif /* defined(CONFIG_USER_ONLY) */
2553
pbrooke2eef172008-06-08 01:09:01 +00002554#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002555
Paul Brookc04b2b72010-03-01 03:31:14 +00002556#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2557typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002558 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002559 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002560 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002561} subpage_t;
2562
Anthony Liguoric227f092009-10-01 16:12:16 -05002563static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002564 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002565static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002566static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002567{
Avi Kivity5312bd82012-02-12 18:32:55 +02002568 MemoryRegionSection *section = &phys_sections[section_index];
2569 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002570
2571 if (mr->subpage) {
2572 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2573 memory_region_destroy(&subpage->iomem);
2574 g_free(subpage);
2575 }
2576}
2577
Avi Kivity4346ae32012-02-10 17:00:01 +02002578static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002579{
2580 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002581 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002582
Avi Kivityc19e8802012-02-13 20:25:31 +02002583 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002584 return;
2585 }
2586
Avi Kivityc19e8802012-02-13 20:25:31 +02002587 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002588 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002589 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002590 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002591 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002592 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002593 }
Avi Kivity54688b12012-02-09 17:34:32 +02002594 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002595 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002596 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002597}
2598
2599static void destroy_all_mappings(void)
2600{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002601 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002602 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002603}
2604
Avi Kivity5312bd82012-02-12 18:32:55 +02002605static uint16_t phys_section_add(MemoryRegionSection *section)
2606{
2607 if (phys_sections_nb == phys_sections_nb_alloc) {
2608 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2609 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2610 phys_sections_nb_alloc);
2611 }
2612 phys_sections[phys_sections_nb] = *section;
2613 return phys_sections_nb++;
2614}
2615
2616static void phys_sections_clear(void)
2617{
2618 phys_sections_nb = 0;
2619}
2620
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002621/* register physical memory.
2622 For RAM, 'size' must be a multiple of the target page size.
2623 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002624 io memory page. The address used when calling the IO function is
2625 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002626 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002627 before calculating this offset. This should not be a problem unless
2628 the low bits of start_addr and region_offset differ. */
Avi Kivity0f0cb162012-02-13 17:14:32 +02002629static void register_subpage(MemoryRegionSection *section)
2630{
2631 subpage_t *subpage;
2632 target_phys_addr_t base = section->offset_within_address_space
2633 & TARGET_PAGE_MASK;
Avi Kivityf3705d52012-03-08 16:16:34 +02002634 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002635 MemoryRegionSection subsection = {
2636 .offset_within_address_space = base,
2637 .size = TARGET_PAGE_SIZE,
2638 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002639 target_phys_addr_t start, end;
2640
Avi Kivityf3705d52012-03-08 16:16:34 +02002641 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002642
Avi Kivityf3705d52012-03-08 16:16:34 +02002643 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002644 subpage = subpage_init(base);
2645 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002646 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2647 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002648 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002649 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002650 }
2651 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2652 end = start + section->size;
2653 subpage_register(subpage, start, end, phys_section_add(section));
2654}
2655
2656
2657static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002658{
Avi Kivitydd811242012-01-02 12:17:03 +02002659 target_phys_addr_t start_addr = section->offset_within_address_space;
2660 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002661 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002662 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002663
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002664 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002665
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002666 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002667 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2668 section_index);
bellard33417e72003-08-10 21:47:01 +00002669}
2670
Avi Kivity0f0cb162012-02-13 17:14:32 +02002671void cpu_register_physical_memory_log(MemoryRegionSection *section,
2672 bool readonly)
2673{
2674 MemoryRegionSection now = *section, remain = *section;
2675
2676 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2677 || (now.size < TARGET_PAGE_SIZE)) {
2678 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2679 - now.offset_within_address_space,
2680 now.size);
2681 register_subpage(&now);
2682 remain.size -= now.size;
2683 remain.offset_within_address_space += now.size;
2684 remain.offset_within_region += now.size;
2685 }
2686 now = remain;
2687 now.size &= TARGET_PAGE_MASK;
2688 if (now.size) {
2689 register_multipage(&now);
2690 remain.size -= now.size;
2691 remain.offset_within_address_space += now.size;
2692 remain.offset_within_region += now.size;
2693 }
2694 now = remain;
2695 if (now.size) {
2696 register_subpage(&now);
2697 }
2698}
2699
2700
Anthony Liguoric227f092009-10-01 16:12:16 -05002701void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002702{
2703 if (kvm_enabled())
2704 kvm_coalesce_mmio_region(addr, size);
2705}
2706
Anthony Liguoric227f092009-10-01 16:12:16 -05002707void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002708{
2709 if (kvm_enabled())
2710 kvm_uncoalesce_mmio_region(addr, size);
2711}
2712
Sheng Yang62a27442010-01-26 19:21:16 +08002713void qemu_flush_coalesced_mmio_buffer(void)
2714{
2715 if (kvm_enabled())
2716 kvm_flush_coalesced_mmio_buffer();
2717}
2718
Marcelo Tosattic9027602010-03-01 20:25:08 -03002719#if defined(__linux__) && !defined(TARGET_S390X)
2720
2721#include <sys/vfs.h>
2722
2723#define HUGETLBFS_MAGIC 0x958458f6
2724
2725static long gethugepagesize(const char *path)
2726{
2727 struct statfs fs;
2728 int ret;
2729
2730 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002731 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002732 } while (ret != 0 && errno == EINTR);
2733
2734 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002735 perror(path);
2736 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002737 }
2738
2739 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002740 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002741
2742 return fs.f_bsize;
2743}
2744
Alex Williamson04b16652010-07-02 11:13:17 -06002745static void *file_ram_alloc(RAMBlock *block,
2746 ram_addr_t memory,
2747 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002748{
2749 char *filename;
2750 void *area;
2751 int fd;
2752#ifdef MAP_POPULATE
2753 int flags;
2754#endif
2755 unsigned long hpagesize;
2756
2757 hpagesize = gethugepagesize(path);
2758 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002759 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002760 }
2761
2762 if (memory < hpagesize) {
2763 return NULL;
2764 }
2765
2766 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2767 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2768 return NULL;
2769 }
2770
2771 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002772 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002773 }
2774
2775 fd = mkstemp(filename);
2776 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002777 perror("unable to create backing store for hugepages");
2778 free(filename);
2779 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002780 }
2781 unlink(filename);
2782 free(filename);
2783
2784 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2785
2786 /*
2787 * ftruncate is not supported by hugetlbfs in older
2788 * hosts, so don't bother bailing out on errors.
2789 * If anything goes wrong with it under other filesystems,
2790 * mmap will fail.
2791 */
2792 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002793 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002794
2795#ifdef MAP_POPULATE
2796 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2797 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2798 * to sidestep this quirk.
2799 */
2800 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2801 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2802#else
2803 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2804#endif
2805 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002806 perror("file_ram_alloc: can't mmap RAM pages");
2807 close(fd);
2808 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002809 }
Alex Williamson04b16652010-07-02 11:13:17 -06002810 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002811 return area;
2812}
2813#endif
2814
Alex Williamsond17b5282010-06-25 11:08:38 -06002815static ram_addr_t find_ram_offset(ram_addr_t size)
2816{
Alex Williamson04b16652010-07-02 11:13:17 -06002817 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002818 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002819
2820 if (QLIST_EMPTY(&ram_list.blocks))
2821 return 0;
2822
2823 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002824 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002825
2826 end = block->offset + block->length;
2827
2828 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2829 if (next_block->offset >= end) {
2830 next = MIN(next, next_block->offset);
2831 }
2832 }
2833 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002834 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002835 mingap = next - end;
2836 }
2837 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002838
2839 if (offset == RAM_ADDR_MAX) {
2840 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2841 (uint64_t)size);
2842 abort();
2843 }
2844
Alex Williamson04b16652010-07-02 11:13:17 -06002845 return offset;
2846}
2847
2848static ram_addr_t last_ram_offset(void)
2849{
Alex Williamsond17b5282010-06-25 11:08:38 -06002850 RAMBlock *block;
2851 ram_addr_t last = 0;
2852
2853 QLIST_FOREACH(block, &ram_list.blocks, next)
2854 last = MAX(last, block->offset + block->length);
2855
2856 return last;
2857}
2858
Avi Kivityc5705a72011-12-20 15:59:12 +02002859void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002860{
2861 RAMBlock *new_block, *block;
2862
Avi Kivityc5705a72011-12-20 15:59:12 +02002863 new_block = NULL;
2864 QLIST_FOREACH(block, &ram_list.blocks, next) {
2865 if (block->offset == addr) {
2866 new_block = block;
2867 break;
2868 }
2869 }
2870 assert(new_block);
2871 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002872
2873 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2874 char *id = dev->parent_bus->info->get_dev_path(dev);
2875 if (id) {
2876 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002877 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002878 }
2879 }
2880 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2881
2882 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002883 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002884 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2885 new_block->idstr);
2886 abort();
2887 }
2888 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002889}
2890
2891ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2892 MemoryRegion *mr)
2893{
2894 RAMBlock *new_block;
2895
2896 size = TARGET_PAGE_ALIGN(size);
2897 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002898
Avi Kivity7c637362011-12-21 13:09:49 +02002899 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002900 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002901 if (host) {
2902 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002903 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002904 } else {
2905 if (mem_path) {
2906#if defined (__linux__) && !defined(TARGET_S390X)
2907 new_block->host = file_ram_alloc(new_block, size, mem_path);
2908 if (!new_block->host) {
2909 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002910 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002911 }
2912#else
2913 fprintf(stderr, "-mem-path option unsupported\n");
2914 exit(1);
2915#endif
2916 } else {
2917#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002918 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2919 an system defined value, which is at least 256GB. Larger systems
2920 have larger values. We put the guest between the end of data
2921 segment (system break) and this value. We use 32GB as a base to
2922 have enough room for the system break to grow. */
2923 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002924 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002925 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002926 if (new_block->host == MAP_FAILED) {
2927 fprintf(stderr, "Allocating RAM failed\n");
2928 abort();
2929 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002930#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002931 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002932 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002933 } else {
2934 new_block->host = qemu_vmalloc(size);
2935 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002936#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002937 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002938 }
2939 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002940 new_block->length = size;
2941
2942 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2943
Anthony Liguori7267c092011-08-20 22:09:37 -05002944 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002945 last_ram_offset() >> TARGET_PAGE_BITS);
2946 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2947 0xff, size >> TARGET_PAGE_BITS);
2948
2949 if (kvm_enabled())
2950 kvm_setup_guest_memory(new_block->host, size);
2951
2952 return new_block->offset;
2953}
2954
Avi Kivityc5705a72011-12-20 15:59:12 +02002955ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002956{
Avi Kivityc5705a72011-12-20 15:59:12 +02002957 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002958}
bellarde9a1ab12007-02-08 23:08:38 +00002959
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002960void qemu_ram_free_from_ptr(ram_addr_t addr)
2961{
2962 RAMBlock *block;
2963
2964 QLIST_FOREACH(block, &ram_list.blocks, next) {
2965 if (addr == block->offset) {
2966 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002967 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002968 return;
2969 }
2970 }
2971}
2972
Anthony Liguoric227f092009-10-01 16:12:16 -05002973void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002974{
Alex Williamson04b16652010-07-02 11:13:17 -06002975 RAMBlock *block;
2976
2977 QLIST_FOREACH(block, &ram_list.blocks, next) {
2978 if (addr == block->offset) {
2979 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002980 if (block->flags & RAM_PREALLOC_MASK) {
2981 ;
2982 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002983#if defined (__linux__) && !defined(TARGET_S390X)
2984 if (block->fd) {
2985 munmap(block->host, block->length);
2986 close(block->fd);
2987 } else {
2988 qemu_vfree(block->host);
2989 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002990#else
2991 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002992#endif
2993 } else {
2994#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2995 munmap(block->host, block->length);
2996#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002997 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002998 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002999 } else {
3000 qemu_vfree(block->host);
3001 }
Alex Williamson04b16652010-07-02 11:13:17 -06003002#endif
3003 }
Anthony Liguori7267c092011-08-20 22:09:37 -05003004 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06003005 return;
3006 }
3007 }
3008
bellarde9a1ab12007-02-08 23:08:38 +00003009}
3010
Huang Yingcd19cfa2011-03-02 08:56:19 +01003011#ifndef _WIN32
3012void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3013{
3014 RAMBlock *block;
3015 ram_addr_t offset;
3016 int flags;
3017 void *area, *vaddr;
3018
3019 QLIST_FOREACH(block, &ram_list.blocks, next) {
3020 offset = addr - block->offset;
3021 if (offset < block->length) {
3022 vaddr = block->host + offset;
3023 if (block->flags & RAM_PREALLOC_MASK) {
3024 ;
3025 } else {
3026 flags = MAP_FIXED;
3027 munmap(vaddr, length);
3028 if (mem_path) {
3029#if defined(__linux__) && !defined(TARGET_S390X)
3030 if (block->fd) {
3031#ifdef MAP_POPULATE
3032 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3033 MAP_PRIVATE;
3034#else
3035 flags |= MAP_PRIVATE;
3036#endif
3037 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3038 flags, block->fd, offset);
3039 } else {
3040 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3041 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3042 flags, -1, 0);
3043 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003044#else
3045 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003046#endif
3047 } else {
3048#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3049 flags |= MAP_SHARED | MAP_ANONYMOUS;
3050 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3051 flags, -1, 0);
3052#else
3053 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3054 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3055 flags, -1, 0);
3056#endif
3057 }
3058 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003059 fprintf(stderr, "Could not remap addr: "
3060 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003061 length, addr);
3062 exit(1);
3063 }
3064 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3065 }
3066 return;
3067 }
3068 }
3069}
3070#endif /* !_WIN32 */
3071
pbrookdc828ca2009-04-09 22:21:07 +00003072/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003073 With the exception of the softmmu code in this file, this should
3074 only be used for local memory (e.g. video ram) that the device owns,
3075 and knows it isn't going to access beyond the end of the block.
3076
3077 It should not be used for general purpose DMA.
3078 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3079 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003080void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003081{
pbrook94a6b542009-04-11 17:15:54 +00003082 RAMBlock *block;
3083
Alex Williamsonf471a172010-06-11 11:11:42 -06003084 QLIST_FOREACH(block, &ram_list.blocks, next) {
3085 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003086 /* Move this entry to to start of the list. */
3087 if (block != QLIST_FIRST(&ram_list.blocks)) {
3088 QLIST_REMOVE(block, next);
3089 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3090 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003091 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003092 /* We need to check if the requested address is in the RAM
3093 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003094 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003095 */
3096 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003097 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003098 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003099 block->host =
3100 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003101 }
3102 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003103 return block->host + (addr - block->offset);
3104 }
pbrook94a6b542009-04-11 17:15:54 +00003105 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003106
3107 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3108 abort();
3109
3110 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003111}
3112
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003113/* Return a host pointer to ram allocated with qemu_ram_alloc.
3114 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3115 */
3116void *qemu_safe_ram_ptr(ram_addr_t addr)
3117{
3118 RAMBlock *block;
3119
3120 QLIST_FOREACH(block, &ram_list.blocks, next) {
3121 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003122 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003123 /* We need to check if the requested address is in the RAM
3124 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003125 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003126 */
3127 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003128 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003129 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003130 block->host =
3131 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003132 }
3133 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003134 return block->host + (addr - block->offset);
3135 }
3136 }
3137
3138 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3139 abort();
3140
3141 return NULL;
3142}
3143
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003144/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3145 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003146void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003147{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003148 if (*size == 0) {
3149 return NULL;
3150 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003151 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003152 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003153 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003154 RAMBlock *block;
3155
3156 QLIST_FOREACH(block, &ram_list.blocks, next) {
3157 if (addr - block->offset < block->length) {
3158 if (addr - block->offset + *size > block->length)
3159 *size = block->length - addr + block->offset;
3160 return block->host + (addr - block->offset);
3161 }
3162 }
3163
3164 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3165 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003166 }
3167}
3168
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003169void qemu_put_ram_ptr(void *addr)
3170{
3171 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003172}
3173
Marcelo Tosattie8902612010-10-11 15:31:19 -03003174int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003175{
pbrook94a6b542009-04-11 17:15:54 +00003176 RAMBlock *block;
3177 uint8_t *host = ptr;
3178
Jan Kiszka868bb332011-06-21 22:59:09 +02003179 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003180 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003181 return 0;
3182 }
3183
Alex Williamsonf471a172010-06-11 11:11:42 -06003184 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003185 /* This case append when the block is not mapped. */
3186 if (block->host == NULL) {
3187 continue;
3188 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003189 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003190 *ram_addr = block->offset + (host - block->host);
3191 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003192 }
pbrook94a6b542009-04-11 17:15:54 +00003193 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003194
Marcelo Tosattie8902612010-10-11 15:31:19 -03003195 return -1;
3196}
Alex Williamsonf471a172010-06-11 11:11:42 -06003197
Marcelo Tosattie8902612010-10-11 15:31:19 -03003198/* Some of the softmmu routines need to translate from a host pointer
3199 (typically a TLB entry) back to a ram offset. */
3200ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3201{
3202 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003203
Marcelo Tosattie8902612010-10-11 15:31:19 -03003204 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3205 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3206 abort();
3207 }
3208 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003209}
3210
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003211static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3212 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003213{
pbrook67d3b952006-12-18 05:03:52 +00003214#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003215 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003216#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003217#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003218 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003219#endif
3220 return 0;
3221}
3222
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003223static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3224 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003225{
3226#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003227 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003228#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003229#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003230 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003231#endif
3232}
3233
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003234static const MemoryRegionOps unassigned_mem_ops = {
3235 .read = unassigned_mem_read,
3236 .write = unassigned_mem_write,
3237 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003238};
3239
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003240static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3241 unsigned size)
3242{
3243 abort();
3244}
3245
3246static void error_mem_write(void *opaque, target_phys_addr_t addr,
3247 uint64_t value, unsigned size)
3248{
3249 abort();
3250}
3251
3252static const MemoryRegionOps error_mem_ops = {
3253 .read = error_mem_read,
3254 .write = error_mem_write,
3255 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003256};
3257
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003258static const MemoryRegionOps rom_mem_ops = {
3259 .read = error_mem_read,
3260 .write = unassigned_mem_write,
3261 .endianness = DEVICE_NATIVE_ENDIAN,
3262};
3263
3264static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3265 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003266{
bellard3a7d9292005-08-21 09:26:42 +00003267 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003268 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003269 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3270#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003271 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003272 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003273#endif
3274 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003275 switch (size) {
3276 case 1:
3277 stb_p(qemu_get_ram_ptr(ram_addr), val);
3278 break;
3279 case 2:
3280 stw_p(qemu_get_ram_ptr(ram_addr), val);
3281 break;
3282 case 4:
3283 stl_p(qemu_get_ram_ptr(ram_addr), val);
3284 break;
3285 default:
3286 abort();
3287 }
bellardf23db162005-08-21 19:12:28 +00003288 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003289 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003290 /* we remove the notdirty callback only if the code has been
3291 flushed */
3292 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003293 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003294}
3295
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003296static const MemoryRegionOps notdirty_mem_ops = {
3297 .read = error_mem_read,
3298 .write = notdirty_mem_write,
3299 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003300};
3301
pbrook0f459d12008-06-09 00:20:13 +00003302/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003303static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003304{
3305 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003306 target_ulong pc, cs_base;
3307 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003308 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003309 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003310 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003311
aliguori06d55cc2008-11-18 20:24:06 +00003312 if (env->watchpoint_hit) {
3313 /* We re-entered the check after replacing the TB. Now raise
3314 * the debug interrupt so that is will trigger after the
3315 * current instruction. */
3316 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3317 return;
3318 }
pbrook2e70f6e2008-06-29 01:03:05 +00003319 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003320 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003321 if ((vaddr == (wp->vaddr & len_mask) ||
3322 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003323 wp->flags |= BP_WATCHPOINT_HIT;
3324 if (!env->watchpoint_hit) {
3325 env->watchpoint_hit = wp;
3326 tb = tb_find_pc(env->mem_io_pc);
3327 if (!tb) {
3328 cpu_abort(env, "check_watchpoint: could not find TB for "
3329 "pc=%p", (void *)env->mem_io_pc);
3330 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003331 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003332 tb_phys_invalidate(tb, -1);
3333 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3334 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04003335 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00003336 } else {
3337 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3338 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04003339 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00003340 }
aliguori06d55cc2008-11-18 20:24:06 +00003341 }
aliguori6e140f22008-11-18 20:37:55 +00003342 } else {
3343 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003344 }
3345 }
3346}
3347
pbrook6658ffb2007-03-16 23:58:11 +00003348/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3349 so these check for a hit then pass through to the normal out-of-line
3350 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003351static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3352 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003353{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003354 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3355 switch (size) {
3356 case 1: return ldub_phys(addr);
3357 case 2: return lduw_phys(addr);
3358 case 4: return ldl_phys(addr);
3359 default: abort();
3360 }
pbrook6658ffb2007-03-16 23:58:11 +00003361}
3362
Avi Kivity1ec9b902012-01-02 12:47:48 +02003363static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3364 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003365{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003366 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3367 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003368 case 1:
3369 stb_phys(addr, val);
3370 break;
3371 case 2:
3372 stw_phys(addr, val);
3373 break;
3374 case 4:
3375 stl_phys(addr, val);
3376 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003377 default: abort();
3378 }
pbrook6658ffb2007-03-16 23:58:11 +00003379}
3380
Avi Kivity1ec9b902012-01-02 12:47:48 +02003381static const MemoryRegionOps watch_mem_ops = {
3382 .read = watch_mem_read,
3383 .write = watch_mem_write,
3384 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003385};
pbrook6658ffb2007-03-16 23:58:11 +00003386
Avi Kivity70c68e42012-01-02 12:32:48 +02003387static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3388 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003389{
Avi Kivity70c68e42012-01-02 12:32:48 +02003390 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003391 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003392 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003393#if defined(DEBUG_SUBPAGE)
3394 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3395 mmio, len, addr, idx);
3396#endif
blueswir1db7b5422007-05-26 17:36:03 +00003397
Avi Kivity5312bd82012-02-12 18:32:55 +02003398 section = &phys_sections[mmio->sub_section[idx]];
3399 addr += mmio->base;
3400 addr -= section->offset_within_address_space;
3401 addr += section->offset_within_region;
3402 return io_mem_read(section->mr->ram_addr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003403}
3404
Avi Kivity70c68e42012-01-02 12:32:48 +02003405static void subpage_write(void *opaque, target_phys_addr_t addr,
3406 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003407{
Avi Kivity70c68e42012-01-02 12:32:48 +02003408 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003409 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003410 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003411#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003412 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3413 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003414 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003415#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003416
Avi Kivity5312bd82012-02-12 18:32:55 +02003417 section = &phys_sections[mmio->sub_section[idx]];
3418 addr += mmio->base;
3419 addr -= section->offset_within_address_space;
3420 addr += section->offset_within_region;
3421 io_mem_write(section->mr->ram_addr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003422}
3423
Avi Kivity70c68e42012-01-02 12:32:48 +02003424static const MemoryRegionOps subpage_ops = {
3425 .read = subpage_read,
3426 .write = subpage_write,
3427 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003428};
3429
Avi Kivityde712f92012-01-02 12:41:07 +02003430static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3431 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003432{
3433 ram_addr_t raddr = addr;
3434 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003435 switch (size) {
3436 case 1: return ldub_p(ptr);
3437 case 2: return lduw_p(ptr);
3438 case 4: return ldl_p(ptr);
3439 default: abort();
3440 }
Andreas Färber56384e82011-11-30 16:26:21 +01003441}
3442
Avi Kivityde712f92012-01-02 12:41:07 +02003443static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3444 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003445{
3446 ram_addr_t raddr = addr;
3447 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003448 switch (size) {
3449 case 1: return stb_p(ptr, value);
3450 case 2: return stw_p(ptr, value);
3451 case 4: return stl_p(ptr, value);
3452 default: abort();
3453 }
Andreas Färber56384e82011-11-30 16:26:21 +01003454}
3455
Avi Kivityde712f92012-01-02 12:41:07 +02003456static const MemoryRegionOps subpage_ram_ops = {
3457 .read = subpage_ram_read,
3458 .write = subpage_ram_write,
3459 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003460};
3461
Anthony Liguoric227f092009-10-01 16:12:16 -05003462static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003463 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003464{
3465 int idx, eidx;
3466
3467 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3468 return -1;
3469 idx = SUBPAGE_IDX(start);
3470 eidx = SUBPAGE_IDX(end);
3471#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003472 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003473 mmio, start, end, idx, eidx, memory);
3474#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003475 if (memory_region_is_ram(phys_sections[section].mr)) {
3476 MemoryRegionSection new_section = phys_sections[section];
3477 new_section.mr = &io_mem_subpage_ram;
3478 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003479 }
blueswir1db7b5422007-05-26 17:36:03 +00003480 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003481 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003482 }
3483
3484 return 0;
3485}
3486
Avi Kivity0f0cb162012-02-13 17:14:32 +02003487static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003488{
Anthony Liguoric227f092009-10-01 16:12:16 -05003489 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003490
Anthony Liguori7267c092011-08-20 22:09:37 -05003491 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003492
3493 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003494 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3495 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003496 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003497#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003498 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3499 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003500#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003501 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003502
3503 return mmio;
3504}
3505
aliguori88715652009-02-11 15:20:58 +00003506static int get_free_io_mem_idx(void)
3507{
3508 int i;
3509
3510 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3511 if (!io_mem_used[i]) {
3512 io_mem_used[i] = 1;
3513 return i;
3514 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003515 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003516 return -1;
3517}
3518
bellard33417e72003-08-10 21:47:01 +00003519/* mem_read and mem_write are arrays of functions containing the
3520 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003521 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003522 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003523 modified. If it is zero, a new io zone is allocated. The return
3524 value can be used with cpu_register_physical_memory(). (-1) is
3525 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003526static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003527{
bellard33417e72003-08-10 21:47:01 +00003528 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003529 io_index = get_free_io_mem_idx();
3530 if (io_index == -1)
3531 return io_index;
bellard33417e72003-08-10 21:47:01 +00003532 } else {
3533 if (io_index >= IO_MEM_NB_ENTRIES)
3534 return -1;
3535 }
bellardb5ff1b32005-11-26 10:38:39 +00003536
Avi Kivitya621f382012-01-02 13:12:08 +02003537 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003538
Avi Kivity11c7ef02012-01-02 17:21:07 +02003539 return io_index;
bellard33417e72003-08-10 21:47:01 +00003540}
bellard61382a52003-10-27 21:22:23 +00003541
Avi Kivitya621f382012-01-02 13:12:08 +02003542int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003543{
Avi Kivitya621f382012-01-02 13:12:08 +02003544 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003545}
3546
Avi Kivity11c7ef02012-01-02 17:21:07 +02003547void cpu_unregister_io_memory(int io_index)
aliguori88715652009-02-11 15:20:58 +00003548{
Avi Kivitya621f382012-01-02 13:12:08 +02003549 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003550 io_mem_used[io_index] = 0;
3551}
3552
Avi Kivity5312bd82012-02-12 18:32:55 +02003553static uint16_t dummy_section(MemoryRegion *mr)
3554{
3555 MemoryRegionSection section = {
3556 .mr = mr,
3557 .offset_within_address_space = 0,
3558 .offset_within_region = 0,
3559 .size = UINT64_MAX,
3560 };
3561
3562 return phys_section_add(&section);
3563}
3564
Avi Kivityaa102232012-03-08 17:06:55 +02003565target_phys_addr_t section_to_ioaddr(target_phys_addr_t section_io_addr)
3566{
3567 MemoryRegionSection *section;
3568
3569 section = &phys_sections[section_io_addr & ~TARGET_PAGE_MASK];
3570 return (section_io_addr & TARGET_PAGE_MASK)
3571 | (section->mr->ram_addr & ~TARGET_PAGE_MASK);
3572}
3573
Avi Kivitye9179ce2009-06-14 11:38:52 +03003574static void io_mem_init(void)
3575{
3576 int i;
3577
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003578 /* Must be first: */
3579 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3580 assert(io_mem_ram.ram_addr == 0);
3581 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3582 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3583 "unassigned", UINT64_MAX);
3584 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3585 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003586 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3587 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003588 for (i=0; i<5; i++)
3589 io_mem_used[i] = 1;
3590
Avi Kivity1ec9b902012-01-02 12:47:48 +02003591 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3592 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003593}
3594
Avi Kivity50c1e142012-02-08 21:36:02 +02003595static void core_begin(MemoryListener *listener)
3596{
Avi Kivity54688b12012-02-09 17:34:32 +02003597 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003598 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003599 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003600 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003601 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3602 phys_section_rom = dummy_section(&io_mem_rom);
3603 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003604}
3605
3606static void core_commit(MemoryListener *listener)
3607{
Avi Kivity117712c2012-02-12 21:23:17 +02003608 CPUState *env;
3609
3610 /* since each CPU stores ram addresses in its TLB cache, we must
3611 reset the modified entries */
3612 /* XXX: slow ! */
3613 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3614 tlb_flush(env, 1);
3615 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003616}
3617
Avi Kivity93632742012-02-08 16:54:16 +02003618static void core_region_add(MemoryListener *listener,
3619 MemoryRegionSection *section)
3620{
Avi Kivity4855d412012-02-08 21:16:05 +02003621 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003622}
3623
3624static void core_region_del(MemoryListener *listener,
3625 MemoryRegionSection *section)
3626{
Avi Kivity93632742012-02-08 16:54:16 +02003627}
3628
Avi Kivity50c1e142012-02-08 21:36:02 +02003629static void core_region_nop(MemoryListener *listener,
3630 MemoryRegionSection *section)
3631{
Avi Kivity54688b12012-02-09 17:34:32 +02003632 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003633}
3634
Avi Kivity93632742012-02-08 16:54:16 +02003635static void core_log_start(MemoryListener *listener,
3636 MemoryRegionSection *section)
3637{
3638}
3639
3640static void core_log_stop(MemoryListener *listener,
3641 MemoryRegionSection *section)
3642{
3643}
3644
3645static void core_log_sync(MemoryListener *listener,
3646 MemoryRegionSection *section)
3647{
3648}
3649
3650static void core_log_global_start(MemoryListener *listener)
3651{
3652 cpu_physical_memory_set_dirty_tracking(1);
3653}
3654
3655static void core_log_global_stop(MemoryListener *listener)
3656{
3657 cpu_physical_memory_set_dirty_tracking(0);
3658}
3659
3660static void core_eventfd_add(MemoryListener *listener,
3661 MemoryRegionSection *section,
3662 bool match_data, uint64_t data, int fd)
3663{
3664}
3665
3666static void core_eventfd_del(MemoryListener *listener,
3667 MemoryRegionSection *section,
3668 bool match_data, uint64_t data, int fd)
3669{
3670}
3671
Avi Kivity50c1e142012-02-08 21:36:02 +02003672static void io_begin(MemoryListener *listener)
3673{
3674}
3675
3676static void io_commit(MemoryListener *listener)
3677{
3678}
3679
Avi Kivity4855d412012-02-08 21:16:05 +02003680static void io_region_add(MemoryListener *listener,
3681 MemoryRegionSection *section)
3682{
Avi Kivitya2d33522012-03-05 17:40:12 +02003683 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3684
3685 mrio->mr = section->mr;
3686 mrio->offset = section->offset_within_region;
3687 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003688 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003689 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003690}
3691
3692static void io_region_del(MemoryListener *listener,
3693 MemoryRegionSection *section)
3694{
3695 isa_unassign_ioport(section->offset_within_address_space, section->size);
3696}
3697
Avi Kivity50c1e142012-02-08 21:36:02 +02003698static void io_region_nop(MemoryListener *listener,
3699 MemoryRegionSection *section)
3700{
3701}
3702
Avi Kivity4855d412012-02-08 21:16:05 +02003703static void io_log_start(MemoryListener *listener,
3704 MemoryRegionSection *section)
3705{
3706}
3707
3708static void io_log_stop(MemoryListener *listener,
3709 MemoryRegionSection *section)
3710{
3711}
3712
3713static void io_log_sync(MemoryListener *listener,
3714 MemoryRegionSection *section)
3715{
3716}
3717
3718static void io_log_global_start(MemoryListener *listener)
3719{
3720}
3721
3722static void io_log_global_stop(MemoryListener *listener)
3723{
3724}
3725
3726static void io_eventfd_add(MemoryListener *listener,
3727 MemoryRegionSection *section,
3728 bool match_data, uint64_t data, int fd)
3729{
3730}
3731
3732static void io_eventfd_del(MemoryListener *listener,
3733 MemoryRegionSection *section,
3734 bool match_data, uint64_t data, int fd)
3735{
3736}
3737
Avi Kivity93632742012-02-08 16:54:16 +02003738static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003739 .begin = core_begin,
3740 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003741 .region_add = core_region_add,
3742 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003743 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003744 .log_start = core_log_start,
3745 .log_stop = core_log_stop,
3746 .log_sync = core_log_sync,
3747 .log_global_start = core_log_global_start,
3748 .log_global_stop = core_log_global_stop,
3749 .eventfd_add = core_eventfd_add,
3750 .eventfd_del = core_eventfd_del,
3751 .priority = 0,
3752};
3753
Avi Kivity4855d412012-02-08 21:16:05 +02003754static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003755 .begin = io_begin,
3756 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003757 .region_add = io_region_add,
3758 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003759 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003760 .log_start = io_log_start,
3761 .log_stop = io_log_stop,
3762 .log_sync = io_log_sync,
3763 .log_global_start = io_log_global_start,
3764 .log_global_stop = io_log_global_stop,
3765 .eventfd_add = io_eventfd_add,
3766 .eventfd_del = io_eventfd_del,
3767 .priority = 0,
3768};
3769
Avi Kivity62152b82011-07-26 14:26:14 +03003770static void memory_map_init(void)
3771{
Anthony Liguori7267c092011-08-20 22:09:37 -05003772 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003773 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003774 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003775
Anthony Liguori7267c092011-08-20 22:09:37 -05003776 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003777 memory_region_init(system_io, "io", 65536);
3778 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003779
Avi Kivity4855d412012-02-08 21:16:05 +02003780 memory_listener_register(&core_memory_listener, system_memory);
3781 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003782}
3783
3784MemoryRegion *get_system_memory(void)
3785{
3786 return system_memory;
3787}
3788
Avi Kivity309cb472011-08-08 16:09:03 +03003789MemoryRegion *get_system_io(void)
3790{
3791 return system_io;
3792}
3793
pbrooke2eef172008-06-08 01:09:01 +00003794#endif /* !defined(CONFIG_USER_ONLY) */
3795
bellard13eb76e2004-01-24 15:23:36 +00003796/* physical memory access (slow version, mainly for debug) */
3797#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003798int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3799 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003800{
3801 int l, flags;
3802 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003803 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003804
3805 while (len > 0) {
3806 page = addr & TARGET_PAGE_MASK;
3807 l = (page + TARGET_PAGE_SIZE) - addr;
3808 if (l > len)
3809 l = len;
3810 flags = page_get_flags(page);
3811 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003812 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003813 if (is_write) {
3814 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003815 return -1;
bellard579a97f2007-11-11 14:26:47 +00003816 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003817 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003818 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003819 memcpy(p, buf, l);
3820 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003821 } else {
3822 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003823 return -1;
bellard579a97f2007-11-11 14:26:47 +00003824 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003825 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003826 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003827 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003828 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003829 }
3830 len -= l;
3831 buf += l;
3832 addr += l;
3833 }
Paul Brooka68fe892010-03-01 00:08:59 +00003834 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003835}
bellard8df1cd02005-01-28 22:37:22 +00003836
bellard13eb76e2004-01-24 15:23:36 +00003837#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003838void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003839 int len, int is_write)
3840{
3841 int l, io_index;
3842 uint8_t *ptr;
3843 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003844 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003845 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003846
bellard13eb76e2004-01-24 15:23:36 +00003847 while (len > 0) {
3848 page = addr & TARGET_PAGE_MASK;
3849 l = (page + TARGET_PAGE_SIZE) - addr;
3850 if (l > len)
3851 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003852 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003853
bellard13eb76e2004-01-24 15:23:36 +00003854 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003855 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003856 target_phys_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003857 io_index = memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003858 & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf3705d52012-03-08 16:16:34 +02003859 addr1 = section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003860 /* XXX: could force cpu_single_env to NULL to avoid
3861 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003862 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003863 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003864 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003865 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003866 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003867 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003868 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003869 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003870 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003871 l = 2;
3872 } else {
bellard1c213d12005-09-03 10:49:04 +00003873 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003874 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003875 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003876 l = 1;
3877 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003878 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003879 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003880 addr1 = memory_region_get_ram_addr(section->mr)
3881 + section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003882 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003883 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003884 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003885 if (!cpu_physical_memory_is_dirty(addr1)) {
3886 /* invalidate code */
3887 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3888 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003889 cpu_physical_memory_set_dirty_flags(
3890 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003891 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003892 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003893 }
3894 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003895 if (!is_ram_rom_romd(section)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003896 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003897 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003898 io_index = memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003899 & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf3705d52012-03-08 16:16:34 +02003900 addr1 = section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003901 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003902 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003903 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003904 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003905 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003906 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003907 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003908 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003909 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003910 l = 2;
3911 } else {
bellard1c213d12005-09-03 10:49:04 +00003912 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003913 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003914 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003915 l = 1;
3916 }
3917 } else {
3918 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003919 ptr = qemu_get_ram_ptr(section->mr->ram_addr)
3920 + section_addr(section, addr);
3921 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003922 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003923 }
3924 }
3925 len -= l;
3926 buf += l;
3927 addr += l;
3928 }
3929}
bellard8df1cd02005-01-28 22:37:22 +00003930
bellardd0ecd2a2006-04-23 17:14:48 +00003931/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003932void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003933 const uint8_t *buf, int len)
3934{
3935 int l;
3936 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003937 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003938 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003939
bellardd0ecd2a2006-04-23 17:14:48 +00003940 while (len > 0) {
3941 page = addr & TARGET_PAGE_MASK;
3942 l = (page + TARGET_PAGE_SIZE) - addr;
3943 if (l > len)
3944 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003945 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003946
Avi Kivityf3705d52012-03-08 16:16:34 +02003947 if (!is_ram_rom_romd(section)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003948 /* do nothing */
3949 } else {
3950 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003951 addr1 = memory_region_get_ram_addr(section->mr)
3952 + section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003953 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003954 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003955 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003956 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003957 }
3958 len -= l;
3959 buf += l;
3960 addr += l;
3961 }
3962}
3963
aliguori6d16c2f2009-01-22 16:59:11 +00003964typedef struct {
3965 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003966 target_phys_addr_t addr;
3967 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003968} BounceBuffer;
3969
3970static BounceBuffer bounce;
3971
aliguoriba223c22009-01-22 16:59:16 +00003972typedef struct MapClient {
3973 void *opaque;
3974 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003975 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003976} MapClient;
3977
Blue Swirl72cf2d42009-09-12 07:36:22 +00003978static QLIST_HEAD(map_client_list, MapClient) map_client_list
3979 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003980
3981void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3982{
Anthony Liguori7267c092011-08-20 22:09:37 -05003983 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003984
3985 client->opaque = opaque;
3986 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003987 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003988 return client;
3989}
3990
3991void cpu_unregister_map_client(void *_client)
3992{
3993 MapClient *client = (MapClient *)_client;
3994
Blue Swirl72cf2d42009-09-12 07:36:22 +00003995 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003996 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003997}
3998
3999static void cpu_notify_map_clients(void)
4000{
4001 MapClient *client;
4002
Blue Swirl72cf2d42009-09-12 07:36:22 +00004003 while (!QLIST_EMPTY(&map_client_list)) {
4004 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004005 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09004006 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00004007 }
4008}
4009
aliguori6d16c2f2009-01-22 16:59:11 +00004010/* Map a physical memory region into a host virtual address.
4011 * May map a subset of the requested range, given by and returned in *plen.
4012 * May return NULL if resources needed to perform the mapping are exhausted.
4013 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00004014 * Use cpu_register_map_client() to know when retrying the map operation is
4015 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00004016 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004017void *cpu_physical_memory_map(target_phys_addr_t addr,
4018 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00004019 int is_write)
4020{
Anthony Liguoric227f092009-10-01 16:12:16 -05004021 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004022 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00004023 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004024 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02004025 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00004026 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004027 ram_addr_t rlen;
4028 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004029
4030 while (len > 0) {
4031 page = addr & TARGET_PAGE_MASK;
4032 l = (page + TARGET_PAGE_SIZE) - addr;
4033 if (l > len)
4034 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02004035 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00004036
Avi Kivityf3705d52012-03-08 16:16:34 +02004037 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004038 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004039 break;
4040 }
4041 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4042 bounce.addr = addr;
4043 bounce.len = l;
4044 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004045 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004046 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004047
4048 *plen = l;
4049 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004050 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004051 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02004052 raddr = memory_region_get_ram_addr(section->mr)
4053 + section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004054 }
aliguori6d16c2f2009-01-22 16:59:11 +00004055
4056 len -= l;
4057 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004058 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004059 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004060 rlen = todo;
4061 ret = qemu_ram_ptr_length(raddr, &rlen);
4062 *plen = rlen;
4063 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004064}
4065
4066/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4067 * Will also mark the memory as dirty if is_write == 1. access_len gives
4068 * the amount of memory that was actually read or written by the caller.
4069 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004070void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4071 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004072{
4073 if (buffer != bounce.buffer) {
4074 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004075 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004076 while (access_len) {
4077 unsigned l;
4078 l = TARGET_PAGE_SIZE;
4079 if (l > access_len)
4080 l = access_len;
4081 if (!cpu_physical_memory_is_dirty(addr1)) {
4082 /* invalidate code */
4083 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4084 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004085 cpu_physical_memory_set_dirty_flags(
4086 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004087 }
4088 addr1 += l;
4089 access_len -= l;
4090 }
4091 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004092 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004093 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004094 }
aliguori6d16c2f2009-01-22 16:59:11 +00004095 return;
4096 }
4097 if (is_write) {
4098 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4099 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004100 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004101 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004102 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004103}
bellardd0ecd2a2006-04-23 17:14:48 +00004104
bellard8df1cd02005-01-28 22:37:22 +00004105/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004106static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4107 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004108{
4109 int io_index;
4110 uint8_t *ptr;
4111 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02004112 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004113
Avi Kivity06ef3522012-02-13 16:11:22 +02004114 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004115
Avi Kivityf3705d52012-03-08 16:16:34 +02004116 if (!is_ram_rom_romd(section)) {
bellard8df1cd02005-01-28 22:37:22 +00004117 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004118 io_index = memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004119 & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf3705d52012-03-08 16:16:34 +02004120 addr = section_addr(section, addr);
Avi Kivityacbbec52011-11-21 12:27:03 +02004121 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004122#if defined(TARGET_WORDS_BIGENDIAN)
4123 if (endian == DEVICE_LITTLE_ENDIAN) {
4124 val = bswap32(val);
4125 }
4126#else
4127 if (endian == DEVICE_BIG_ENDIAN) {
4128 val = bswap32(val);
4129 }
4130#endif
bellard8df1cd02005-01-28 22:37:22 +00004131 } else {
4132 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004133 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004134 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004135 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004136 switch (endian) {
4137 case DEVICE_LITTLE_ENDIAN:
4138 val = ldl_le_p(ptr);
4139 break;
4140 case DEVICE_BIG_ENDIAN:
4141 val = ldl_be_p(ptr);
4142 break;
4143 default:
4144 val = ldl_p(ptr);
4145 break;
4146 }
bellard8df1cd02005-01-28 22:37:22 +00004147 }
4148 return val;
4149}
4150
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004151uint32_t ldl_phys(target_phys_addr_t addr)
4152{
4153 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4154}
4155
4156uint32_t ldl_le_phys(target_phys_addr_t addr)
4157{
4158 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4159}
4160
4161uint32_t ldl_be_phys(target_phys_addr_t addr)
4162{
4163 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4164}
4165
bellard84b7b8e2005-11-28 21:19:04 +00004166/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004167static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4168 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004169{
4170 int io_index;
4171 uint8_t *ptr;
4172 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02004173 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00004174
Avi Kivity06ef3522012-02-13 16:11:22 +02004175 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004176
Avi Kivityf3705d52012-03-08 16:16:34 +02004177 if (!is_ram_rom_romd(section)) {
bellard84b7b8e2005-11-28 21:19:04 +00004178 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004179 io_index = memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004180 & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf3705d52012-03-08 16:16:34 +02004181 addr = section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004182
4183 /* XXX This is broken when device endian != cpu endian.
4184 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004185#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004186 val = io_mem_read(io_index, addr, 4) << 32;
4187 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004188#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004189 val = io_mem_read(io_index, addr, 4);
4190 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004191#endif
4192 } else {
4193 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004194 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004195 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004196 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004197 switch (endian) {
4198 case DEVICE_LITTLE_ENDIAN:
4199 val = ldq_le_p(ptr);
4200 break;
4201 case DEVICE_BIG_ENDIAN:
4202 val = ldq_be_p(ptr);
4203 break;
4204 default:
4205 val = ldq_p(ptr);
4206 break;
4207 }
bellard84b7b8e2005-11-28 21:19:04 +00004208 }
4209 return val;
4210}
4211
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004212uint64_t ldq_phys(target_phys_addr_t addr)
4213{
4214 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4215}
4216
4217uint64_t ldq_le_phys(target_phys_addr_t addr)
4218{
4219 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4220}
4221
4222uint64_t ldq_be_phys(target_phys_addr_t addr)
4223{
4224 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4225}
4226
bellardaab33092005-10-30 20:48:42 +00004227/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004228uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004229{
4230 uint8_t val;
4231 cpu_physical_memory_read(addr, &val, 1);
4232 return val;
4233}
4234
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004235/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004236static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4237 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004238{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004239 int io_index;
4240 uint8_t *ptr;
4241 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02004242 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004243
Avi Kivity06ef3522012-02-13 16:11:22 +02004244 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004245
Avi Kivityf3705d52012-03-08 16:16:34 +02004246 if (!is_ram_rom_romd(section)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004247 /* I/O case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004248 io_index = memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004249 & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf3705d52012-03-08 16:16:34 +02004250 addr = section_addr(section, addr);
Avi Kivityacbbec52011-11-21 12:27:03 +02004251 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004252#if defined(TARGET_WORDS_BIGENDIAN)
4253 if (endian == DEVICE_LITTLE_ENDIAN) {
4254 val = bswap16(val);
4255 }
4256#else
4257 if (endian == DEVICE_BIG_ENDIAN) {
4258 val = bswap16(val);
4259 }
4260#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004261 } else {
4262 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02004263 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004264 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004265 + section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004266 switch (endian) {
4267 case DEVICE_LITTLE_ENDIAN:
4268 val = lduw_le_p(ptr);
4269 break;
4270 case DEVICE_BIG_ENDIAN:
4271 val = lduw_be_p(ptr);
4272 break;
4273 default:
4274 val = lduw_p(ptr);
4275 break;
4276 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004277 }
4278 return val;
bellardaab33092005-10-30 20:48:42 +00004279}
4280
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004281uint32_t lduw_phys(target_phys_addr_t addr)
4282{
4283 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4284}
4285
4286uint32_t lduw_le_phys(target_phys_addr_t addr)
4287{
4288 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4289}
4290
4291uint32_t lduw_be_phys(target_phys_addr_t addr)
4292{
4293 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4294}
4295
bellard8df1cd02005-01-28 22:37:22 +00004296/* warning: addr must be aligned. The ram page is not masked as dirty
4297 and the code inside is not invalidated. It is useful if the dirty
4298 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004299void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004300{
4301 int io_index;
4302 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004303 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004304
Avi Kivity06ef3522012-02-13 16:11:22 +02004305 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004306
Avi Kivityf3705d52012-03-08 16:16:34 +02004307 if (!memory_region_is_ram(section->mr) || section->readonly) {
4308 if (memory_region_is_ram(section->mr)) {
Avi Kivity06ef3522012-02-13 16:11:22 +02004309 io_index = io_mem_rom.ram_addr;
4310 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004311 io_index = memory_region_get_ram_addr(section->mr);
Avi Kivity06ef3522012-02-13 16:11:22 +02004312 }
Avi Kivityf3705d52012-03-08 16:16:34 +02004313 addr = section_addr(section, addr);
Avi Kivityacbbec52011-11-21 12:27:03 +02004314 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004315 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004316 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004317 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004318 + section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00004319 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004320 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004321
4322 if (unlikely(in_migration)) {
4323 if (!cpu_physical_memory_is_dirty(addr1)) {
4324 /* invalidate code */
4325 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4326 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004327 cpu_physical_memory_set_dirty_flags(
4328 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004329 }
4330 }
bellard8df1cd02005-01-28 22:37:22 +00004331 }
4332}
4333
Anthony Liguoric227f092009-10-01 16:12:16 -05004334void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004335{
4336 int io_index;
4337 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004338 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00004339
Avi Kivity06ef3522012-02-13 16:11:22 +02004340 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004341
Avi Kivityf3705d52012-03-08 16:16:34 +02004342 if (!memory_region_is_ram(section->mr) || section->readonly) {
4343 if (memory_region_is_ram(section->mr)) {
Avi Kivity06ef3522012-02-13 16:11:22 +02004344 io_index = io_mem_rom.ram_addr;
4345 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004346 io_index = memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004347 & (IO_MEM_NB_ENTRIES - 1);
4348 }
Avi Kivityf3705d52012-03-08 16:16:34 +02004349 addr = section_addr(section, addr);
j_mayerbc98a7e2007-04-04 07:55:12 +00004350#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004351 io_mem_write(io_index, addr, val >> 32, 4);
4352 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004353#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004354 io_mem_write(io_index, addr, (uint32_t)val, 4);
4355 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004356#endif
4357 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004358 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004359 & TARGET_PAGE_MASK)
Avi Kivityf3705d52012-03-08 16:16:34 +02004360 + section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00004361 stq_p(ptr, val);
4362 }
4363}
4364
bellard8df1cd02005-01-28 22:37:22 +00004365/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004366static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4367 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004368{
4369 int io_index;
4370 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004371 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00004372
Avi Kivity06ef3522012-02-13 16:11:22 +02004373 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00004374
Avi Kivityf3705d52012-03-08 16:16:34 +02004375 if (!memory_region_is_ram(section->mr) || section->readonly) {
4376 if (memory_region_is_ram(section->mr)) {
Avi Kivity06ef3522012-02-13 16:11:22 +02004377 io_index = io_mem_rom.ram_addr;
4378 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004379 io_index = memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004380 & (IO_MEM_NB_ENTRIES - 1);
4381 }
Avi Kivityf3705d52012-03-08 16:16:34 +02004382 addr = section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004383#if defined(TARGET_WORDS_BIGENDIAN)
4384 if (endian == DEVICE_LITTLE_ENDIAN) {
4385 val = bswap32(val);
4386 }
4387#else
4388 if (endian == DEVICE_BIG_ENDIAN) {
4389 val = bswap32(val);
4390 }
4391#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004392 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004393 } else {
4394 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004395 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4396 + section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00004397 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004398 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004399 switch (endian) {
4400 case DEVICE_LITTLE_ENDIAN:
4401 stl_le_p(ptr, val);
4402 break;
4403 case DEVICE_BIG_ENDIAN:
4404 stl_be_p(ptr, val);
4405 break;
4406 default:
4407 stl_p(ptr, val);
4408 break;
4409 }
bellard3a7d9292005-08-21 09:26:42 +00004410 if (!cpu_physical_memory_is_dirty(addr1)) {
4411 /* invalidate code */
4412 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4413 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004414 cpu_physical_memory_set_dirty_flags(addr1,
4415 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004416 }
bellard8df1cd02005-01-28 22:37:22 +00004417 }
4418}
4419
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004420void stl_phys(target_phys_addr_t addr, uint32_t val)
4421{
4422 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4423}
4424
4425void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4426{
4427 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4428}
4429
4430void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4431{
4432 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4433}
4434
bellardaab33092005-10-30 20:48:42 +00004435/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004436void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004437{
4438 uint8_t v = val;
4439 cpu_physical_memory_write(addr, &v, 1);
4440}
4441
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004442/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004443static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4444 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004445{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004446 int io_index;
4447 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004448 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004449
Avi Kivity06ef3522012-02-13 16:11:22 +02004450 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004451
Avi Kivityf3705d52012-03-08 16:16:34 +02004452 if (!memory_region_is_ram(section->mr) || section->readonly) {
4453 if (memory_region_is_ram(section->mr)) {
Avi Kivity06ef3522012-02-13 16:11:22 +02004454 io_index = io_mem_rom.ram_addr;
4455 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02004456 io_index = memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02004457 & (IO_MEM_NB_ENTRIES - 1);
4458 }
Avi Kivityf3705d52012-03-08 16:16:34 +02004459 addr = section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004460#if defined(TARGET_WORDS_BIGENDIAN)
4461 if (endian == DEVICE_LITTLE_ENDIAN) {
4462 val = bswap16(val);
4463 }
4464#else
4465 if (endian == DEVICE_BIG_ENDIAN) {
4466 val = bswap16(val);
4467 }
4468#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004469 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004470 } else {
4471 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004472 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4473 + section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004474 /* RAM case */
4475 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004476 switch (endian) {
4477 case DEVICE_LITTLE_ENDIAN:
4478 stw_le_p(ptr, val);
4479 break;
4480 case DEVICE_BIG_ENDIAN:
4481 stw_be_p(ptr, val);
4482 break;
4483 default:
4484 stw_p(ptr, val);
4485 break;
4486 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004487 if (!cpu_physical_memory_is_dirty(addr1)) {
4488 /* invalidate code */
4489 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4490 /* set dirty bit */
4491 cpu_physical_memory_set_dirty_flags(addr1,
4492 (0xff & ~CODE_DIRTY_FLAG));
4493 }
4494 }
bellardaab33092005-10-30 20:48:42 +00004495}
4496
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004497void stw_phys(target_phys_addr_t addr, uint32_t val)
4498{
4499 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4500}
4501
4502void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4503{
4504 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4505}
4506
4507void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4508{
4509 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4510}
4511
bellardaab33092005-10-30 20:48:42 +00004512/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004513void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004514{
4515 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004516 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004517}
4518
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004519void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4520{
4521 val = cpu_to_le64(val);
4522 cpu_physical_memory_write(addr, &val, 8);
4523}
4524
4525void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4526{
4527 val = cpu_to_be64(val);
4528 cpu_physical_memory_write(addr, &val, 8);
4529}
4530
aliguori5e2972f2009-03-28 17:51:36 +00004531/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004532int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004533 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004534{
4535 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004536 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004537 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004538
4539 while (len > 0) {
4540 page = addr & TARGET_PAGE_MASK;
4541 phys_addr = cpu_get_phys_page_debug(env, page);
4542 /* if no physical page mapped, return an error */
4543 if (phys_addr == -1)
4544 return -1;
4545 l = (page + TARGET_PAGE_SIZE) - addr;
4546 if (l > len)
4547 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004548 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004549 if (is_write)
4550 cpu_physical_memory_write_rom(phys_addr, buf, l);
4551 else
aliguori5e2972f2009-03-28 17:51:36 +00004552 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004553 len -= l;
4554 buf += l;
4555 addr += l;
4556 }
4557 return 0;
4558}
Paul Brooka68fe892010-03-01 00:08:59 +00004559#endif
bellard13eb76e2004-01-24 15:23:36 +00004560
pbrook2e70f6e2008-06-29 01:03:05 +00004561/* in deterministic execution mode, instructions doing device I/Os
4562 must be at the end of the TB */
4563void cpu_io_recompile(CPUState *env, void *retaddr)
4564{
4565 TranslationBlock *tb;
4566 uint32_t n, cflags;
4567 target_ulong pc, cs_base;
4568 uint64_t flags;
4569
4570 tb = tb_find_pc((unsigned long)retaddr);
4571 if (!tb) {
4572 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4573 retaddr);
4574 }
4575 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004576 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004577 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004578 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004579 n = n - env->icount_decr.u16.low;
4580 /* Generate a new TB ending on the I/O insn. */
4581 n++;
4582 /* On MIPS and SH, delay slot instructions can only be restarted if
4583 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004584 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004585 branch. */
4586#if defined(TARGET_MIPS)
4587 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4588 env->active_tc.PC -= 4;
4589 env->icount_decr.u16.low++;
4590 env->hflags &= ~MIPS_HFLAG_BMASK;
4591 }
4592#elif defined(TARGET_SH4)
4593 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4594 && n > 1) {
4595 env->pc -= 2;
4596 env->icount_decr.u16.low++;
4597 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4598 }
4599#endif
4600 /* This should never happen. */
4601 if (n > CF_COUNT_MASK)
4602 cpu_abort(env, "TB too big during recompile");
4603
4604 cflags = n | CF_LAST_IO;
4605 pc = tb->pc;
4606 cs_base = tb->cs_base;
4607 flags = tb->flags;
4608 tb_phys_invalidate(tb, -1);
4609 /* FIXME: In theory this could raise an exception. In practice
4610 we have already translated the block once so it's probably ok. */
4611 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004612 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004613 the first in the TB) then we end up generating a whole new TB and
4614 repeating the fault, which is horribly inefficient.
4615 Better would be to execute just this insn uncached, or generate a
4616 second new TB. */
4617 cpu_resume_from_signal(env, NULL);
4618}
4619
Paul Brookb3755a92010-03-12 16:54:58 +00004620#if !defined(CONFIG_USER_ONLY)
4621
Stefan Weil055403b2010-10-22 23:03:32 +02004622void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004623{
4624 int i, target_code_size, max_target_code_size;
4625 int direct_jmp_count, direct_jmp2_count, cross_page;
4626 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004627
bellarde3db7222005-01-26 22:00:47 +00004628 target_code_size = 0;
4629 max_target_code_size = 0;
4630 cross_page = 0;
4631 direct_jmp_count = 0;
4632 direct_jmp2_count = 0;
4633 for(i = 0; i < nb_tbs; i++) {
4634 tb = &tbs[i];
4635 target_code_size += tb->size;
4636 if (tb->size > max_target_code_size)
4637 max_target_code_size = tb->size;
4638 if (tb->page_addr[1] != -1)
4639 cross_page++;
4640 if (tb->tb_next_offset[0] != 0xffff) {
4641 direct_jmp_count++;
4642 if (tb->tb_next_offset[1] != 0xffff) {
4643 direct_jmp2_count++;
4644 }
4645 }
4646 }
4647 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004648 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004649 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004650 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4651 cpu_fprintf(f, "TB count %d/%d\n",
4652 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004653 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004654 nb_tbs ? target_code_size / nb_tbs : 0,
4655 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004656 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004657 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4658 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004659 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4660 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004661 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4662 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004663 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004664 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4665 direct_jmp2_count,
4666 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004667 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004668 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4669 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4670 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004671 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004672}
4673
Avi Kivityd39e8222012-01-01 23:35:10 +02004674/* NOTE: this function can trigger an exception */
4675/* NOTE2: the returned address is not exactly the physical address: it
4676 is the offset relative to phys_ram_base */
4677tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4678{
4679 int mmu_idx, page_index, pd;
4680 void *p;
4681
4682 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4683 mmu_idx = cpu_mmu_index(env1);
4684 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4685 (addr & TARGET_PAGE_MASK))) {
4686 ldub_code(addr);
4687 }
Avi Kivityce5d64c2012-03-08 18:50:18 +02004688 pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004689 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
Avi Kivity06ef3522012-02-13 16:11:22 +02004690 && !io_mem_region[pd]->rom_device) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004691#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4692 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4693#else
4694 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4695#endif
4696 }
4697 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4698 return qemu_ram_addr_from_host_nofail(p);
4699}
4700
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004701/*
4702 * A helper function for the _utterly broken_ virtio device model to find out if
4703 * it's running on a big endian machine. Don't do this at home kids!
4704 */
4705bool virtio_is_big_endian(void);
4706bool virtio_is_big_endian(void)
4707{
4708#if defined(TARGET_WORDS_BIGENDIAN)
4709 return true;
4710#else
4711 return false;
4712#endif
4713}
4714
bellard61382a52003-10-27 21:22:23 +00004715#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004716#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004717#define GETPC() NULL
4718#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004719#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004720
4721#define SHIFT 0
4722#include "softmmu_template.h"
4723
4724#define SHIFT 1
4725#include "softmmu_template.h"
4726
4727#define SHIFT 2
4728#include "softmmu_template.h"
4729
4730#define SHIFT 3
4731#include "softmmu_template.h"
4732
4733#undef env
4734
4735#endif