blob: 957bc6d5bff79eab16d7d64a7a57c271ad8eb21a [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Avi Kivity3eef53d2012-02-10 14:57:31 +0200163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000189typedef struct PhysPageDesc {
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset;
192 ram_addr_t region_offset;
193} PhysPageDesc;
194
Avi Kivity4346ae32012-02-10 17:00:01 +0200195typedef struct PhysPageEntry PhysPageEntry;
196
Avi Kivity5312bd82012-02-12 18:32:55 +0200197static MemoryRegionSection *phys_sections;
198static unsigned phys_sections_nb, phys_sections_nb_alloc;
199static uint16_t phys_section_unassigned;
200
Avi Kivity4346ae32012-02-10 17:00:01 +0200201struct PhysPageEntry {
202 union {
Avi Kivity5312bd82012-02-12 18:32:55 +0200203 uint16_t leaf; /* index into phys_sections */
Avi Kivity4346ae32012-02-10 17:00:01 +0200204 PhysPageEntry *node;
205 } u;
206};
207
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800208/* This is a multi-level map on the physical address space.
209 The bottom level has pointers to PhysPageDesc. */
Avi Kivity4346ae32012-02-10 17:00:01 +0200210static PhysPageEntry phys_map;
Paul Brook6d9a1302010-02-28 23:55:53 +0000211
pbrooke2eef172008-06-08 01:09:01 +0000212static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300213static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000214
bellard33417e72003-08-10 21:47:01 +0000215/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200216MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000217static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200218static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000219#endif
bellard33417e72003-08-10 21:47:01 +0000220
bellard34865132003-10-05 14:28:56 +0000221/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200222#ifdef WIN32
223static const char *logfilename = "qemu.log";
224#else
blueswir1d9b630f2008-10-05 09:57:08 +0000225static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200226#endif
bellard34865132003-10-05 14:28:56 +0000227FILE *logfile;
228int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000229static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000230
bellarde3db7222005-01-26 22:00:47 +0000231/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000232#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000233static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000234#endif
bellarde3db7222005-01-26 22:00:47 +0000235static int tb_flush_count;
236static int tb_phys_invalidate_count;
237
bellard7cb69ca2008-05-10 10:55:51 +0000238#ifdef _WIN32
239static void map_exec(void *addr, long size)
240{
241 DWORD old_protect;
242 VirtualProtect(addr, size,
243 PAGE_EXECUTE_READWRITE, &old_protect);
244
245}
246#else
247static void map_exec(void *addr, long size)
248{
bellard43694152008-05-29 09:35:57 +0000249 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000250
bellard43694152008-05-29 09:35:57 +0000251 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000252 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000253 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000254
255 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000256 end += page_size - 1;
257 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000258
259 mprotect((void *)start, end - start,
260 PROT_READ | PROT_WRITE | PROT_EXEC);
261}
262#endif
263
bellardb346ff42003-06-15 20:05:50 +0000264static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000265{
bellard83fb7ad2004-07-05 21:25:26 +0000266 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000267 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000268#ifdef _WIN32
269 {
270 SYSTEM_INFO system_info;
271
272 GetSystemInfo(&system_info);
273 qemu_real_host_page_size = system_info.dwPageSize;
274 }
275#else
276 qemu_real_host_page_size = getpagesize();
277#endif
bellard83fb7ad2004-07-05 21:25:26 +0000278 if (qemu_host_page_size == 0)
279 qemu_host_page_size = qemu_real_host_page_size;
280 if (qemu_host_page_size < TARGET_PAGE_SIZE)
281 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000282 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000283
Paul Brook2e9a5712010-05-05 16:32:59 +0100284#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000285 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100286#ifdef HAVE_KINFO_GETVMMAP
287 struct kinfo_vmentry *freep;
288 int i, cnt;
289
290 freep = kinfo_getvmmap(getpid(), &cnt);
291 if (freep) {
292 mmap_lock();
293 for (i = 0; i < cnt; i++) {
294 unsigned long startaddr, endaddr;
295
296 startaddr = freep[i].kve_start;
297 endaddr = freep[i].kve_end;
298 if (h2g_valid(startaddr)) {
299 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
300
301 if (h2g_valid(endaddr)) {
302 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200303 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100304 } else {
305#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
306 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200307 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100308#endif
309 }
310 }
311 }
312 free(freep);
313 mmap_unlock();
314 }
315#else
balrog50a95692007-12-12 01:16:23 +0000316 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000317
pbrook07765902008-05-31 16:33:53 +0000318 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800319
Aurelien Jarnofd436902010-04-10 17:20:36 +0200320 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000321 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800322 mmap_lock();
323
balrog50a95692007-12-12 01:16:23 +0000324 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800325 unsigned long startaddr, endaddr;
326 int n;
327
328 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
329
330 if (n == 2 && h2g_valid(startaddr)) {
331 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
332
333 if (h2g_valid(endaddr)) {
334 endaddr = h2g(endaddr);
335 } else {
336 endaddr = ~0ul;
337 }
338 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000339 }
340 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800341
balrog50a95692007-12-12 01:16:23 +0000342 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800343 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000344 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100345#endif
balrog50a95692007-12-12 01:16:23 +0000346 }
347#endif
bellard54936002003-05-13 00:25:15 +0000348}
349
Paul Brook41c1b1c2010-03-12 16:54:58 +0000350static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000351{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000352 PageDesc *pd;
353 void **lp;
354 int i;
355
pbrook17e23772008-06-09 13:47:45 +0000356#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500357 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800358# define ALLOC(P, SIZE) \
359 do { \
360 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
361 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000363#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800364# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500365 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000366#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800367
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800368 /* Level 1. Always allocated. */
369 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
370
371 /* Level 2..N-1. */
372 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
373 void **p = *lp;
374
375 if (p == NULL) {
376 if (!alloc) {
377 return NULL;
378 }
379 ALLOC(p, sizeof(void *) * L2_SIZE);
380 *lp = p;
381 }
382
383 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000384 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800385
386 pd = *lp;
387 if (pd == NULL) {
388 if (!alloc) {
389 return NULL;
390 }
391 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
392 *lp = pd;
393 }
394
395#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800396
397 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000398}
399
Paul Brook41c1b1c2010-03-12 16:54:58 +0000400static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000401{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800402 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000403}
404
Paul Brook6d9a1302010-02-28 23:55:53 +0000405#if !defined(CONFIG_USER_ONLY)
Avi Kivity5312bd82012-02-12 18:32:55 +0200406static uint16_t *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000407{
Avi Kivity4346ae32012-02-10 17:00:01 +0200408 PhysPageEntry *lp, *p;
409 int i, j;
bellard92e873b2004-05-21 14:52:29 +0000410
Avi Kivity3eef53d2012-02-10 14:57:31 +0200411 lp = &phys_map;
bellard108c49b2005-07-24 12:55:09 +0000412
Avi Kivity4346ae32012-02-10 17:00:01 +0200413 /* Level 1..N. */
414 for (i = P_L2_LEVELS - 1; i >= 0; i--) {
415 if (lp->u.node == NULL) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800416 if (!alloc) {
417 return NULL;
418 }
Avi Kivity4346ae32012-02-10 17:00:01 +0200419 lp->u.node = p = g_malloc0(sizeof(PhysPageEntry) * L2_SIZE);
420 if (i == 0) {
Avi Kivity4346ae32012-02-10 17:00:01 +0200421 for (j = 0; j < L2_SIZE; j++) {
Avi Kivity5312bd82012-02-12 18:32:55 +0200422 p[j].u.leaf = phys_section_unassigned;
Avi Kivity4346ae32012-02-10 17:00:01 +0200423 }
424 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800425 }
Avi Kivity4346ae32012-02-10 17:00:01 +0200426 lp = &lp->u.node[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
bellard108c49b2005-07-24 12:55:09 +0000427 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800428
Avi Kivity4346ae32012-02-10 17:00:01 +0200429 return &lp->u.leaf;
bellard92e873b2004-05-21 14:52:29 +0000430}
431
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200432static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000433{
Avi Kivity5312bd82012-02-12 18:32:55 +0200434 uint16_t *p = phys_page_find_alloc(index, 0);
435 uint16_t s_index = phys_section_unassigned;
436 MemoryRegionSection *section;
437 PhysPageDesc pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200438
439 if (p) {
Avi Kivity5312bd82012-02-12 18:32:55 +0200440 s_index = *p;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200441 }
Avi Kivity5312bd82012-02-12 18:32:55 +0200442 section = &phys_sections[s_index];
443 index <<= TARGET_PAGE_BITS;
444 assert(section->offset_within_address_space <= index
445 && index <= section->offset_within_address_space + section->size-1);
446 pd.phys_offset = section->mr->ram_addr;
447 pd.region_offset = (index - section->offset_within_address_space)
448 + section->offset_within_region;
449 if (memory_region_is_ram(section->mr)) {
450 pd.phys_offset += pd.region_offset;
451 pd.region_offset = 0;
452 } else if (section->mr->rom_device) {
453 pd.phys_offset += pd.region_offset;
454 }
455 if (section->readonly) {
456 pd.phys_offset |= io_mem_rom.ram_addr;
457 }
458 return pd;
bellard92e873b2004-05-21 14:52:29 +0000459}
460
Anthony Liguoric227f092009-10-01 16:12:16 -0500461static void tlb_protect_code(ram_addr_t ram_addr);
462static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000463 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000464#define mmap_lock() do { } while(0)
465#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000466#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000467
bellard43694152008-05-29 09:35:57 +0000468#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
469
470#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100471/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000472 user mode. It will change when a dedicated libc will be used */
473#define USE_STATIC_CODE_GEN_BUFFER
474#endif
475
476#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200477static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
478 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000479#endif
480
blueswir18fcd3692008-08-17 20:26:25 +0000481static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000482{
bellard43694152008-05-29 09:35:57 +0000483#ifdef USE_STATIC_CODE_GEN_BUFFER
484 code_gen_buffer = static_code_gen_buffer;
485 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
486 map_exec(code_gen_buffer, code_gen_buffer_size);
487#else
bellard26a5f132008-05-28 12:30:31 +0000488 code_gen_buffer_size = tb_size;
489 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000490#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000491 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
492#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100493 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000494 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000495#endif
bellard26a5f132008-05-28 12:30:31 +0000496 }
497 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
498 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
499 /* The code gen buffer location may have constraints depending on
500 the host cpu and OS */
501#if defined(__linux__)
502 {
503 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000504 void *start = NULL;
505
bellard26a5f132008-05-28 12:30:31 +0000506 flags = MAP_PRIVATE | MAP_ANONYMOUS;
507#if defined(__x86_64__)
508 flags |= MAP_32BIT;
509 /* Cannot map more than that */
510 if (code_gen_buffer_size > (800 * 1024 * 1024))
511 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000512#elif defined(__sparc_v9__)
513 // Map the buffer below 2G, so we can use direct calls and branches
514 flags |= MAP_FIXED;
515 start = (void *) 0x60000000UL;
516 if (code_gen_buffer_size > (512 * 1024 * 1024))
517 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000518#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100519 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000520 if (code_gen_buffer_size > 16 * 1024 * 1024)
521 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700522#elif defined(__s390x__)
523 /* Map the buffer so that we can use direct calls and branches. */
524 /* We have a +- 4GB range on the branches; leave some slop. */
525 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
526 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
527 }
528 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000529#endif
blueswir1141ac462008-07-26 15:05:57 +0000530 code_gen_buffer = mmap(start, code_gen_buffer_size,
531 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000532 flags, -1, 0);
533 if (code_gen_buffer == MAP_FAILED) {
534 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
535 exit(1);
536 }
537 }
Bradcbb608a2010-12-20 21:25:40 -0500538#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000539 || defined(__DragonFly__) || defined(__OpenBSD__) \
540 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000541 {
542 int flags;
543 void *addr = NULL;
544 flags = MAP_PRIVATE | MAP_ANONYMOUS;
545#if defined(__x86_64__)
546 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
547 * 0x40000000 is free */
548 flags |= MAP_FIXED;
549 addr = (void *)0x40000000;
550 /* Cannot map more than that */
551 if (code_gen_buffer_size > (800 * 1024 * 1024))
552 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000553#elif defined(__sparc_v9__)
554 // Map the buffer below 2G, so we can use direct calls and branches
555 flags |= MAP_FIXED;
556 addr = (void *) 0x60000000UL;
557 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
558 code_gen_buffer_size = (512 * 1024 * 1024);
559 }
aliguori06e67a82008-09-27 15:32:41 +0000560#endif
561 code_gen_buffer = mmap(addr, code_gen_buffer_size,
562 PROT_WRITE | PROT_READ | PROT_EXEC,
563 flags, -1, 0);
564 if (code_gen_buffer == MAP_FAILED) {
565 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
566 exit(1);
567 }
568 }
bellard26a5f132008-05-28 12:30:31 +0000569#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500570 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000571 map_exec(code_gen_buffer, code_gen_buffer_size);
572#endif
bellard43694152008-05-29 09:35:57 +0000573#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000574 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100575 code_gen_buffer_max_size = code_gen_buffer_size -
576 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000577 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500578 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000579}
580
581/* Must be called before using the QEMU cpus. 'tb_size' is the size
582 (in bytes) allocated to the translation buffer. Zero means default
583 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200584void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000585{
bellard26a5f132008-05-28 12:30:31 +0000586 cpu_gen_init();
587 code_gen_alloc(tb_size);
588 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000589 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700590#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
591 /* There's no guest base to take into account, so go ahead and
592 initialize the prologue now. */
593 tcg_prologue_init(&tcg_ctx);
594#endif
bellard26a5f132008-05-28 12:30:31 +0000595}
596
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200597bool tcg_enabled(void)
598{
599 return code_gen_buffer != NULL;
600}
601
602void cpu_exec_init_all(void)
603{
604#if !defined(CONFIG_USER_ONLY)
605 memory_map_init();
606 io_mem_init();
607#endif
608}
609
pbrook9656f322008-07-01 20:01:19 +0000610#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
611
Juan Quintelae59fb372009-09-29 22:48:21 +0200612static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200613{
614 CPUState *env = opaque;
615
aurel323098dba2009-03-07 21:28:24 +0000616 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
617 version_id is increased. */
618 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000619 tlb_flush(env, 1);
620
621 return 0;
622}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200623
624static const VMStateDescription vmstate_cpu_common = {
625 .name = "cpu_common",
626 .version_id = 1,
627 .minimum_version_id = 1,
628 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200629 .post_load = cpu_common_post_load,
630 .fields = (VMStateField []) {
631 VMSTATE_UINT32(halted, CPUState),
632 VMSTATE_UINT32(interrupt_request, CPUState),
633 VMSTATE_END_OF_LIST()
634 }
635};
pbrook9656f322008-07-01 20:01:19 +0000636#endif
637
Glauber Costa950f1472009-06-09 12:15:18 -0400638CPUState *qemu_get_cpu(int cpu)
639{
640 CPUState *env = first_cpu;
641
642 while (env) {
643 if (env->cpu_index == cpu)
644 break;
645 env = env->next_cpu;
646 }
647
648 return env;
649}
650
bellard6a00d602005-11-21 23:25:50 +0000651void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000652{
bellard6a00d602005-11-21 23:25:50 +0000653 CPUState **penv;
654 int cpu_index;
655
pbrookc2764712009-03-07 15:24:59 +0000656#if defined(CONFIG_USER_ONLY)
657 cpu_list_lock();
658#endif
bellard6a00d602005-11-21 23:25:50 +0000659 env->next_cpu = NULL;
660 penv = &first_cpu;
661 cpu_index = 0;
662 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700663 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000664 cpu_index++;
665 }
666 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000667 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000668 QTAILQ_INIT(&env->breakpoints);
669 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100670#ifndef CONFIG_USER_ONLY
671 env->thread_id = qemu_get_thread_id();
672#endif
bellard6a00d602005-11-21 23:25:50 +0000673 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000674#if defined(CONFIG_USER_ONLY)
675 cpu_list_unlock();
676#endif
pbrookb3c77242008-06-30 16:31:04 +0000677#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600678 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
679 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000680 cpu_save, cpu_load, env);
681#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000682}
683
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100684/* Allocate a new translation block. Flush the translation buffer if
685 too many translation blocks or too much generated code. */
686static TranslationBlock *tb_alloc(target_ulong pc)
687{
688 TranslationBlock *tb;
689
690 if (nb_tbs >= code_gen_max_blocks ||
691 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
692 return NULL;
693 tb = &tbs[nb_tbs++];
694 tb->pc = pc;
695 tb->cflags = 0;
696 return tb;
697}
698
699void tb_free(TranslationBlock *tb)
700{
701 /* In practice this is mostly used for single use temporary TB
702 Ignore the hard cases and just back up if this TB happens to
703 be the last one generated. */
704 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
705 code_gen_ptr = tb->tc_ptr;
706 nb_tbs--;
707 }
708}
709
bellard9fa3e852004-01-04 18:06:42 +0000710static inline void invalidate_page_bitmap(PageDesc *p)
711{
712 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500713 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000714 p->code_bitmap = NULL;
715 }
716 p->code_write_count = 0;
717}
718
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800719/* Set to NULL all the 'first_tb' fields in all PageDescs. */
720
721static void page_flush_tb_1 (int level, void **lp)
722{
723 int i;
724
725 if (*lp == NULL) {
726 return;
727 }
728 if (level == 0) {
729 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000730 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800731 pd[i].first_tb = NULL;
732 invalidate_page_bitmap(pd + i);
733 }
734 } else {
735 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000736 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800737 page_flush_tb_1 (level - 1, pp + i);
738 }
739 }
740}
741
bellardfd6ce8f2003-05-14 19:00:11 +0000742static void page_flush_tb(void)
743{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800744 int i;
745 for (i = 0; i < V_L1_SIZE; i++) {
746 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000747 }
748}
749
750/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000751/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000752void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000753{
bellard6a00d602005-11-21 23:25:50 +0000754 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000755#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000756 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
757 (unsigned long)(code_gen_ptr - code_gen_buffer),
758 nb_tbs, nb_tbs > 0 ?
759 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000760#endif
bellard26a5f132008-05-28 12:30:31 +0000761 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000762 cpu_abort(env1, "Internal error: code buffer overflow\n");
763
bellardfd6ce8f2003-05-14 19:00:11 +0000764 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000765
bellard6a00d602005-11-21 23:25:50 +0000766 for(env = first_cpu; env != NULL; env = env->next_cpu) {
767 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
768 }
bellard9fa3e852004-01-04 18:06:42 +0000769
bellard8a8a6082004-10-03 13:36:49 +0000770 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000771 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000772
bellardfd6ce8f2003-05-14 19:00:11 +0000773 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000774 /* XXX: flush processor icache at this point if cache flush is
775 expensive */
bellarde3db7222005-01-26 22:00:47 +0000776 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000777}
778
779#ifdef DEBUG_TB_CHECK
780
j_mayerbc98a7e2007-04-04 07:55:12 +0000781static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000782{
783 TranslationBlock *tb;
784 int i;
785 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000786 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
787 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000788 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
789 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000790 printf("ERROR invalidate: address=" TARGET_FMT_lx
791 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000792 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000793 }
794 }
795 }
796}
797
798/* verify that all the pages have correct rights for code */
799static void tb_page_check(void)
800{
801 TranslationBlock *tb;
802 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000803
pbrook99773bd2006-04-16 15:14:59 +0000804 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
805 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000806 flags1 = page_get_flags(tb->pc);
807 flags2 = page_get_flags(tb->pc + tb->size - 1);
808 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
809 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000810 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000811 }
812 }
813 }
814}
815
816#endif
817
818/* invalidate one TB */
819static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
820 int next_offset)
821{
822 TranslationBlock *tb1;
823 for(;;) {
824 tb1 = *ptb;
825 if (tb1 == tb) {
826 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
827 break;
828 }
829 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
830 }
831}
832
bellard9fa3e852004-01-04 18:06:42 +0000833static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
834{
835 TranslationBlock *tb1;
836 unsigned int n1;
837
838 for(;;) {
839 tb1 = *ptb;
840 n1 = (long)tb1 & 3;
841 tb1 = (TranslationBlock *)((long)tb1 & ~3);
842 if (tb1 == tb) {
843 *ptb = tb1->page_next[n1];
844 break;
845 }
846 ptb = &tb1->page_next[n1];
847 }
848}
849
bellardd4e81642003-05-25 16:46:15 +0000850static inline void tb_jmp_remove(TranslationBlock *tb, int n)
851{
852 TranslationBlock *tb1, **ptb;
853 unsigned int n1;
854
855 ptb = &tb->jmp_next[n];
856 tb1 = *ptb;
857 if (tb1) {
858 /* find tb(n) in circular list */
859 for(;;) {
860 tb1 = *ptb;
861 n1 = (long)tb1 & 3;
862 tb1 = (TranslationBlock *)((long)tb1 & ~3);
863 if (n1 == n && tb1 == tb)
864 break;
865 if (n1 == 2) {
866 ptb = &tb1->jmp_first;
867 } else {
868 ptb = &tb1->jmp_next[n1];
869 }
870 }
871 /* now we can suppress tb(n) from the list */
872 *ptb = tb->jmp_next[n];
873
874 tb->jmp_next[n] = NULL;
875 }
876}
877
878/* reset the jump entry 'n' of a TB so that it is not chained to
879 another TB */
880static inline void tb_reset_jump(TranslationBlock *tb, int n)
881{
882 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
883}
884
Paul Brook41c1b1c2010-03-12 16:54:58 +0000885void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000886{
bellard6a00d602005-11-21 23:25:50 +0000887 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000888 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000889 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000890 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000891 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000892
bellard9fa3e852004-01-04 18:06:42 +0000893 /* remove the TB from the hash list */
894 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
895 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000896 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000897 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000898
bellard9fa3e852004-01-04 18:06:42 +0000899 /* remove the TB from the page list */
900 if (tb->page_addr[0] != page_addr) {
901 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
902 tb_page_remove(&p->first_tb, tb);
903 invalidate_page_bitmap(p);
904 }
905 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
906 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
907 tb_page_remove(&p->first_tb, tb);
908 invalidate_page_bitmap(p);
909 }
910
bellard8a40a182005-11-20 10:35:40 +0000911 tb_invalidated_flag = 1;
912
913 /* remove the TB from the hash list */
914 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000915 for(env = first_cpu; env != NULL; env = env->next_cpu) {
916 if (env->tb_jmp_cache[h] == tb)
917 env->tb_jmp_cache[h] = NULL;
918 }
bellard8a40a182005-11-20 10:35:40 +0000919
920 /* suppress this TB from the two jump lists */
921 tb_jmp_remove(tb, 0);
922 tb_jmp_remove(tb, 1);
923
924 /* suppress any remaining jumps to this TB */
925 tb1 = tb->jmp_first;
926 for(;;) {
927 n1 = (long)tb1 & 3;
928 if (n1 == 2)
929 break;
930 tb1 = (TranslationBlock *)((long)tb1 & ~3);
931 tb2 = tb1->jmp_next[n1];
932 tb_reset_jump(tb1, n1);
933 tb1->jmp_next[n1] = NULL;
934 tb1 = tb2;
935 }
936 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
937
bellarde3db7222005-01-26 22:00:47 +0000938 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000939}
940
941static inline void set_bits(uint8_t *tab, int start, int len)
942{
943 int end, mask, end1;
944
945 end = start + len;
946 tab += start >> 3;
947 mask = 0xff << (start & 7);
948 if ((start & ~7) == (end & ~7)) {
949 if (start < end) {
950 mask &= ~(0xff << (end & 7));
951 *tab |= mask;
952 }
953 } else {
954 *tab++ |= mask;
955 start = (start + 8) & ~7;
956 end1 = end & ~7;
957 while (start < end1) {
958 *tab++ = 0xff;
959 start += 8;
960 }
961 if (start < end) {
962 mask = ~(0xff << (end & 7));
963 *tab |= mask;
964 }
965 }
966}
967
968static void build_page_bitmap(PageDesc *p)
969{
970 int n, tb_start, tb_end;
971 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000972
Anthony Liguori7267c092011-08-20 22:09:37 -0500973 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000974
975 tb = p->first_tb;
976 while (tb != NULL) {
977 n = (long)tb & 3;
978 tb = (TranslationBlock *)((long)tb & ~3);
979 /* NOTE: this is subtle as a TB may span two physical pages */
980 if (n == 0) {
981 /* NOTE: tb_end may be after the end of the page, but
982 it is not a problem */
983 tb_start = tb->pc & ~TARGET_PAGE_MASK;
984 tb_end = tb_start + tb->size;
985 if (tb_end > TARGET_PAGE_SIZE)
986 tb_end = TARGET_PAGE_SIZE;
987 } else {
988 tb_start = 0;
989 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
990 }
991 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
992 tb = tb->page_next[n];
993 }
994}
995
pbrook2e70f6e2008-06-29 01:03:05 +0000996TranslationBlock *tb_gen_code(CPUState *env,
997 target_ulong pc, target_ulong cs_base,
998 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000999{
1000 TranslationBlock *tb;
1001 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001002 tb_page_addr_t phys_pc, phys_page2;
1003 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001004 int code_gen_size;
1005
Paul Brook41c1b1c2010-03-12 16:54:58 +00001006 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001007 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001008 if (!tb) {
1009 /* flush must be done */
1010 tb_flush(env);
1011 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001012 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001013 /* Don't forget to invalidate previous TB info. */
1014 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001015 }
1016 tc_ptr = code_gen_ptr;
1017 tb->tc_ptr = tc_ptr;
1018 tb->cs_base = cs_base;
1019 tb->flags = flags;
1020 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001021 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001022 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001023
bellardd720b932004-04-25 17:57:43 +00001024 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001025 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001026 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001027 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001028 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001029 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001030 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001031 return tb;
bellardd720b932004-04-25 17:57:43 +00001032}
ths3b46e622007-09-17 08:09:54 +00001033
bellard9fa3e852004-01-04 18:06:42 +00001034/* invalidate all TBs which intersect with the target physical page
1035 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001036 the same physical page. 'is_cpu_write_access' should be true if called
1037 from a real cpu write access: the virtual CPU will exit the current
1038 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001039void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001040 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001041{
aliguori6b917542008-11-18 19:46:41 +00001042 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001043 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001044 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001045 PageDesc *p;
1046 int n;
1047#ifdef TARGET_HAS_PRECISE_SMC
1048 int current_tb_not_found = is_cpu_write_access;
1049 TranslationBlock *current_tb = NULL;
1050 int current_tb_modified = 0;
1051 target_ulong current_pc = 0;
1052 target_ulong current_cs_base = 0;
1053 int current_flags = 0;
1054#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001055
1056 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001057 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001058 return;
ths5fafdf22007-09-16 21:08:06 +00001059 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001060 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1061 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001062 /* build code bitmap */
1063 build_page_bitmap(p);
1064 }
1065
1066 /* we remove all the TBs in the range [start, end[ */
1067 /* XXX: see if in some cases it could be faster to invalidate all the code */
1068 tb = p->first_tb;
1069 while (tb != NULL) {
1070 n = (long)tb & 3;
1071 tb = (TranslationBlock *)((long)tb & ~3);
1072 tb_next = tb->page_next[n];
1073 /* NOTE: this is subtle as a TB may span two physical pages */
1074 if (n == 0) {
1075 /* NOTE: tb_end may be after the end of the page, but
1076 it is not a problem */
1077 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1078 tb_end = tb_start + tb->size;
1079 } else {
1080 tb_start = tb->page_addr[1];
1081 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1082 }
1083 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001084#ifdef TARGET_HAS_PRECISE_SMC
1085 if (current_tb_not_found) {
1086 current_tb_not_found = 0;
1087 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001088 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001089 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001090 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001091 }
1092 }
1093 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001094 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001095 /* If we are modifying the current TB, we must stop
1096 its execution. We could be more precise by checking
1097 that the modification is after the current PC, but it
1098 would require a specialized function to partially
1099 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001100
bellardd720b932004-04-25 17:57:43 +00001101 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001102 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001103 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1104 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001105 }
1106#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001107 /* we need to do that to handle the case where a signal
1108 occurs while doing tb_phys_invalidate() */
1109 saved_tb = NULL;
1110 if (env) {
1111 saved_tb = env->current_tb;
1112 env->current_tb = NULL;
1113 }
bellard9fa3e852004-01-04 18:06:42 +00001114 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001115 if (env) {
1116 env->current_tb = saved_tb;
1117 if (env->interrupt_request && env->current_tb)
1118 cpu_interrupt(env, env->interrupt_request);
1119 }
bellard9fa3e852004-01-04 18:06:42 +00001120 }
1121 tb = tb_next;
1122 }
1123#if !defined(CONFIG_USER_ONLY)
1124 /* if no code remaining, no need to continue to use slow writes */
1125 if (!p->first_tb) {
1126 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001127 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001128 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001129 }
1130 }
1131#endif
1132#ifdef TARGET_HAS_PRECISE_SMC
1133 if (current_tb_modified) {
1134 /* we generate a block containing just the instruction
1135 modifying the memory. It will ensure that it cannot modify
1136 itself */
bellardea1c1802004-06-14 18:56:36 +00001137 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001138 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001139 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001140 }
1141#endif
1142}
1143
1144/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001145static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001146{
1147 PageDesc *p;
1148 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001149#if 0
bellarda4193c82004-06-03 14:01:43 +00001150 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001151 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1152 cpu_single_env->mem_io_vaddr, len,
1153 cpu_single_env->eip,
1154 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001155 }
1156#endif
bellard9fa3e852004-01-04 18:06:42 +00001157 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001158 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001159 return;
1160 if (p->code_bitmap) {
1161 offset = start & ~TARGET_PAGE_MASK;
1162 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1163 if (b & ((1 << len) - 1))
1164 goto do_invalidate;
1165 } else {
1166 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001167 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001168 }
1169}
1170
bellard9fa3e852004-01-04 18:06:42 +00001171#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001172static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001173 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001174{
aliguori6b917542008-11-18 19:46:41 +00001175 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001176 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001177 int n;
bellardd720b932004-04-25 17:57:43 +00001178#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001179 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001180 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001181 int current_tb_modified = 0;
1182 target_ulong current_pc = 0;
1183 target_ulong current_cs_base = 0;
1184 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001185#endif
bellard9fa3e852004-01-04 18:06:42 +00001186
1187 addr &= TARGET_PAGE_MASK;
1188 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001189 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001190 return;
1191 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001192#ifdef TARGET_HAS_PRECISE_SMC
1193 if (tb && pc != 0) {
1194 current_tb = tb_find_pc(pc);
1195 }
1196#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001197 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001198 n = (long)tb & 3;
1199 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001200#ifdef TARGET_HAS_PRECISE_SMC
1201 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001202 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001203 /* If we are modifying the current TB, we must stop
1204 its execution. We could be more precise by checking
1205 that the modification is after the current PC, but it
1206 would require a specialized function to partially
1207 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001208
bellardd720b932004-04-25 17:57:43 +00001209 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001210 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001211 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1212 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001213 }
1214#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001215 tb_phys_invalidate(tb, addr);
1216 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001217 }
1218 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001219#ifdef TARGET_HAS_PRECISE_SMC
1220 if (current_tb_modified) {
1221 /* we generate a block containing just the instruction
1222 modifying the memory. It will ensure that it cannot modify
1223 itself */
bellardea1c1802004-06-14 18:56:36 +00001224 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001225 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001226 cpu_resume_from_signal(env, puc);
1227 }
1228#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001229}
bellard9fa3e852004-01-04 18:06:42 +00001230#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001231
1232/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001233static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001234 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001235{
1236 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001237#ifndef CONFIG_USER_ONLY
1238 bool page_already_protected;
1239#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001240
bellard9fa3e852004-01-04 18:06:42 +00001241 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001242 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001243 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001244#ifndef CONFIG_USER_ONLY
1245 page_already_protected = p->first_tb != NULL;
1246#endif
bellard9fa3e852004-01-04 18:06:42 +00001247 p->first_tb = (TranslationBlock *)((long)tb | n);
1248 invalidate_page_bitmap(p);
1249
bellard107db442004-06-22 18:48:46 +00001250#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001251
bellard9fa3e852004-01-04 18:06:42 +00001252#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001253 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001254 target_ulong addr;
1255 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001256 int prot;
1257
bellardfd6ce8f2003-05-14 19:00:11 +00001258 /* force the host page as non writable (writes will have a
1259 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001260 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001261 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001262 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1263 addr += TARGET_PAGE_SIZE) {
1264
1265 p2 = page_find (addr >> TARGET_PAGE_BITS);
1266 if (!p2)
1267 continue;
1268 prot |= p2->flags;
1269 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001270 }
ths5fafdf22007-09-16 21:08:06 +00001271 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001272 (prot & PAGE_BITS) & ~PAGE_WRITE);
1273#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001274 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001275 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001276#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001277 }
bellard9fa3e852004-01-04 18:06:42 +00001278#else
1279 /* if some code is already present, then the pages are already
1280 protected. So we handle the case where only the first TB is
1281 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001282 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001283 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001284 }
1285#endif
bellardd720b932004-04-25 17:57:43 +00001286
1287#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001288}
1289
bellard9fa3e852004-01-04 18:06:42 +00001290/* add a new TB and link it to the physical page tables. phys_page2 is
1291 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001292void tb_link_page(TranslationBlock *tb,
1293 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001294{
bellard9fa3e852004-01-04 18:06:42 +00001295 unsigned int h;
1296 TranslationBlock **ptb;
1297
pbrookc8a706f2008-06-02 16:16:42 +00001298 /* Grab the mmap lock to stop another thread invalidating this TB
1299 before we are done. */
1300 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001301 /* add in the physical hash table */
1302 h = tb_phys_hash_func(phys_pc);
1303 ptb = &tb_phys_hash[h];
1304 tb->phys_hash_next = *ptb;
1305 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001306
1307 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001308 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1309 if (phys_page2 != -1)
1310 tb_alloc_page(tb, 1, phys_page2);
1311 else
1312 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001313
bellardd4e81642003-05-25 16:46:15 +00001314 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1315 tb->jmp_next[0] = NULL;
1316 tb->jmp_next[1] = NULL;
1317
1318 /* init original jump addresses */
1319 if (tb->tb_next_offset[0] != 0xffff)
1320 tb_reset_jump(tb, 0);
1321 if (tb->tb_next_offset[1] != 0xffff)
1322 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001323
1324#ifdef DEBUG_TB_CHECK
1325 tb_page_check();
1326#endif
pbrookc8a706f2008-06-02 16:16:42 +00001327 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001328}
1329
bellarda513fe12003-05-27 23:29:48 +00001330/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1331 tb[1].tc_ptr. Return NULL if not found */
1332TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1333{
1334 int m_min, m_max, m;
1335 unsigned long v;
1336 TranslationBlock *tb;
1337
1338 if (nb_tbs <= 0)
1339 return NULL;
1340 if (tc_ptr < (unsigned long)code_gen_buffer ||
1341 tc_ptr >= (unsigned long)code_gen_ptr)
1342 return NULL;
1343 /* binary search (cf Knuth) */
1344 m_min = 0;
1345 m_max = nb_tbs - 1;
1346 while (m_min <= m_max) {
1347 m = (m_min + m_max) >> 1;
1348 tb = &tbs[m];
1349 v = (unsigned long)tb->tc_ptr;
1350 if (v == tc_ptr)
1351 return tb;
1352 else if (tc_ptr < v) {
1353 m_max = m - 1;
1354 } else {
1355 m_min = m + 1;
1356 }
ths5fafdf22007-09-16 21:08:06 +00001357 }
bellarda513fe12003-05-27 23:29:48 +00001358 return &tbs[m_max];
1359}
bellard75012672003-06-21 13:11:07 +00001360
bellardea041c02003-06-25 16:16:50 +00001361static void tb_reset_jump_recursive(TranslationBlock *tb);
1362
1363static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1364{
1365 TranslationBlock *tb1, *tb_next, **ptb;
1366 unsigned int n1;
1367
1368 tb1 = tb->jmp_next[n];
1369 if (tb1 != NULL) {
1370 /* find head of list */
1371 for(;;) {
1372 n1 = (long)tb1 & 3;
1373 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1374 if (n1 == 2)
1375 break;
1376 tb1 = tb1->jmp_next[n1];
1377 }
1378 /* we are now sure now that tb jumps to tb1 */
1379 tb_next = tb1;
1380
1381 /* remove tb from the jmp_first list */
1382 ptb = &tb_next->jmp_first;
1383 for(;;) {
1384 tb1 = *ptb;
1385 n1 = (long)tb1 & 3;
1386 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1387 if (n1 == n && tb1 == tb)
1388 break;
1389 ptb = &tb1->jmp_next[n1];
1390 }
1391 *ptb = tb->jmp_next[n];
1392 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001393
bellardea041c02003-06-25 16:16:50 +00001394 /* suppress the jump to next tb in generated code */
1395 tb_reset_jump(tb, n);
1396
bellard01243112004-01-04 15:48:17 +00001397 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001398 tb_reset_jump_recursive(tb_next);
1399 }
1400}
1401
1402static void tb_reset_jump_recursive(TranslationBlock *tb)
1403{
1404 tb_reset_jump_recursive2(tb, 0);
1405 tb_reset_jump_recursive2(tb, 1);
1406}
1407
bellard1fddef42005-04-17 19:16:13 +00001408#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001409#if defined(CONFIG_USER_ONLY)
1410static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1411{
1412 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1413}
1414#else
bellardd720b932004-04-25 17:57:43 +00001415static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1416{
Anthony Liguoric227f092009-10-01 16:12:16 -05001417 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001418 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001419 ram_addr_t ram_addr;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001420 PhysPageDesc p;
bellardd720b932004-04-25 17:57:43 +00001421
pbrookc2f07f82006-04-08 17:14:56 +00001422 addr = cpu_get_phys_page_debug(env, pc);
1423 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001424 pd = p.phys_offset;
pbrookc2f07f82006-04-08 17:14:56 +00001425 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001426 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001427}
bellardc27004e2005-01-03 23:35:10 +00001428#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001429#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001430
Paul Brookc527ee82010-03-01 03:31:14 +00001431#if defined(CONFIG_USER_ONLY)
1432void cpu_watchpoint_remove_all(CPUState *env, int mask)
1433
1434{
1435}
1436
1437int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1438 int flags, CPUWatchpoint **watchpoint)
1439{
1440 return -ENOSYS;
1441}
1442#else
pbrook6658ffb2007-03-16 23:58:11 +00001443/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001444int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1445 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001446{
aliguorib4051332008-11-18 20:14:20 +00001447 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001448 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001449
aliguorib4051332008-11-18 20:14:20 +00001450 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1451 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1452 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1453 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1454 return -EINVAL;
1455 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001456 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001457
aliguoria1d1bb32008-11-18 20:07:32 +00001458 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001459 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001460 wp->flags = flags;
1461
aliguori2dc9f412008-11-18 20:56:59 +00001462 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001463 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001464 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001465 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001466 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001467
pbrook6658ffb2007-03-16 23:58:11 +00001468 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001469
1470 if (watchpoint)
1471 *watchpoint = wp;
1472 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001473}
1474
aliguoria1d1bb32008-11-18 20:07:32 +00001475/* Remove a specific watchpoint. */
1476int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1477 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001478{
aliguorib4051332008-11-18 20:14:20 +00001479 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001480 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001481
Blue Swirl72cf2d42009-09-12 07:36:22 +00001482 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001483 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001484 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001485 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001486 return 0;
1487 }
1488 }
aliguoria1d1bb32008-11-18 20:07:32 +00001489 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001490}
1491
aliguoria1d1bb32008-11-18 20:07:32 +00001492/* Remove a specific watchpoint by reference. */
1493void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1494{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001495 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001496
aliguoria1d1bb32008-11-18 20:07:32 +00001497 tlb_flush_page(env, watchpoint->vaddr);
1498
Anthony Liguori7267c092011-08-20 22:09:37 -05001499 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001500}
1501
aliguoria1d1bb32008-11-18 20:07:32 +00001502/* Remove all matching watchpoints. */
1503void cpu_watchpoint_remove_all(CPUState *env, int mask)
1504{
aliguoric0ce9982008-11-25 22:13:57 +00001505 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001506
Blue Swirl72cf2d42009-09-12 07:36:22 +00001507 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001508 if (wp->flags & mask)
1509 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001510 }
aliguoria1d1bb32008-11-18 20:07:32 +00001511}
Paul Brookc527ee82010-03-01 03:31:14 +00001512#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001513
1514/* Add a breakpoint. */
1515int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1516 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001517{
bellard1fddef42005-04-17 19:16:13 +00001518#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001519 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001520
Anthony Liguori7267c092011-08-20 22:09:37 -05001521 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001522
1523 bp->pc = pc;
1524 bp->flags = flags;
1525
aliguori2dc9f412008-11-18 20:56:59 +00001526 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001527 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001528 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001529 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001530 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001531
1532 breakpoint_invalidate(env, pc);
1533
1534 if (breakpoint)
1535 *breakpoint = bp;
1536 return 0;
1537#else
1538 return -ENOSYS;
1539#endif
1540}
1541
1542/* Remove a specific breakpoint. */
1543int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1544{
1545#if defined(TARGET_HAS_ICE)
1546 CPUBreakpoint *bp;
1547
Blue Swirl72cf2d42009-09-12 07:36:22 +00001548 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001549 if (bp->pc == pc && bp->flags == flags) {
1550 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001551 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001552 }
bellard4c3a88a2003-07-26 12:06:08 +00001553 }
aliguoria1d1bb32008-11-18 20:07:32 +00001554 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001555#else
aliguoria1d1bb32008-11-18 20:07:32 +00001556 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001557#endif
1558}
1559
aliguoria1d1bb32008-11-18 20:07:32 +00001560/* Remove a specific breakpoint by reference. */
1561void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001562{
bellard1fddef42005-04-17 19:16:13 +00001563#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001564 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001565
aliguoria1d1bb32008-11-18 20:07:32 +00001566 breakpoint_invalidate(env, breakpoint->pc);
1567
Anthony Liguori7267c092011-08-20 22:09:37 -05001568 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001569#endif
1570}
1571
1572/* Remove all matching breakpoints. */
1573void cpu_breakpoint_remove_all(CPUState *env, int mask)
1574{
1575#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001576 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001577
Blue Swirl72cf2d42009-09-12 07:36:22 +00001578 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001579 if (bp->flags & mask)
1580 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001581 }
bellard4c3a88a2003-07-26 12:06:08 +00001582#endif
1583}
1584
bellardc33a3462003-07-29 20:50:33 +00001585/* enable or disable single step mode. EXCP_DEBUG is returned by the
1586 CPU loop after each instruction */
1587void cpu_single_step(CPUState *env, int enabled)
1588{
bellard1fddef42005-04-17 19:16:13 +00001589#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001590 if (env->singlestep_enabled != enabled) {
1591 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001592 if (kvm_enabled())
1593 kvm_update_guest_debug(env, 0);
1594 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001595 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001596 /* XXX: only flush what is necessary */
1597 tb_flush(env);
1598 }
bellardc33a3462003-07-29 20:50:33 +00001599 }
1600#endif
1601}
1602
bellard34865132003-10-05 14:28:56 +00001603/* enable or disable low levels log */
1604void cpu_set_log(int log_flags)
1605{
1606 loglevel = log_flags;
1607 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001608 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001609 if (!logfile) {
1610 perror(logfilename);
1611 _exit(1);
1612 }
bellard9fa3e852004-01-04 18:06:42 +00001613#if !defined(CONFIG_SOFTMMU)
1614 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1615 {
blueswir1b55266b2008-09-20 08:07:15 +00001616 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001617 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1618 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001619#elif defined(_WIN32)
1620 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1621 setvbuf(logfile, NULL, _IONBF, 0);
1622#else
bellard34865132003-10-05 14:28:56 +00001623 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001624#endif
pbrooke735b912007-06-30 13:53:24 +00001625 log_append = 1;
1626 }
1627 if (!loglevel && logfile) {
1628 fclose(logfile);
1629 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001630 }
1631}
1632
1633void cpu_set_log_filename(const char *filename)
1634{
1635 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001636 if (logfile) {
1637 fclose(logfile);
1638 logfile = NULL;
1639 }
1640 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001641}
bellardc33a3462003-07-29 20:50:33 +00001642
aurel323098dba2009-03-07 21:28:24 +00001643static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001644{
pbrookd5975362008-06-07 20:50:51 +00001645 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1646 problem and hope the cpu will stop of its own accord. For userspace
1647 emulation this often isn't actually as bad as it sounds. Often
1648 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001649 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001650 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001651
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001652 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001653 tb = env->current_tb;
1654 /* if the cpu is currently executing code, we must unlink it and
1655 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001656 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001657 env->current_tb = NULL;
1658 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001659 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001660 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001661}
1662
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001663#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001664/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001665static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001666{
1667 int old_mask;
1668
1669 old_mask = env->interrupt_request;
1670 env->interrupt_request |= mask;
1671
aliguori8edac962009-04-24 18:03:45 +00001672 /*
1673 * If called from iothread context, wake the target cpu in
1674 * case its halted.
1675 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001676 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001677 qemu_cpu_kick(env);
1678 return;
1679 }
aliguori8edac962009-04-24 18:03:45 +00001680
pbrook2e70f6e2008-06-29 01:03:05 +00001681 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001682 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001683 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001684 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001685 cpu_abort(env, "Raised interrupt while not in I/O function");
1686 }
pbrook2e70f6e2008-06-29 01:03:05 +00001687 } else {
aurel323098dba2009-03-07 21:28:24 +00001688 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001689 }
1690}
1691
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001692CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1693
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001694#else /* CONFIG_USER_ONLY */
1695
1696void cpu_interrupt(CPUState *env, int mask)
1697{
1698 env->interrupt_request |= mask;
1699 cpu_unlink_tb(env);
1700}
1701#endif /* CONFIG_USER_ONLY */
1702
bellardb54ad042004-05-20 13:42:52 +00001703void cpu_reset_interrupt(CPUState *env, int mask)
1704{
1705 env->interrupt_request &= ~mask;
1706}
1707
aurel323098dba2009-03-07 21:28:24 +00001708void cpu_exit(CPUState *env)
1709{
1710 env->exit_request = 1;
1711 cpu_unlink_tb(env);
1712}
1713
blueswir1c7cd6a32008-10-02 18:27:46 +00001714const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001715 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001716 "show generated host assembly code for each compiled TB" },
1717 { CPU_LOG_TB_IN_ASM, "in_asm",
1718 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001719 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001720 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001721 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001722 "show micro ops "
1723#ifdef TARGET_I386
1724 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001725#endif
blueswir1e01a1152008-03-14 17:37:11 +00001726 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001727 { CPU_LOG_INT, "int",
1728 "show interrupts/exceptions in short format" },
1729 { CPU_LOG_EXEC, "exec",
1730 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001731 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001732 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001733#ifdef TARGET_I386
1734 { CPU_LOG_PCALL, "pcall",
1735 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001736 { CPU_LOG_RESET, "cpu_reset",
1737 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001738#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001739#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001740 { CPU_LOG_IOPORT, "ioport",
1741 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001742#endif
bellardf193c792004-03-21 17:06:25 +00001743 { 0, NULL, NULL },
1744};
1745
1746static int cmp1(const char *s1, int n, const char *s2)
1747{
1748 if (strlen(s2) != n)
1749 return 0;
1750 return memcmp(s1, s2, n) == 0;
1751}
ths3b46e622007-09-17 08:09:54 +00001752
bellardf193c792004-03-21 17:06:25 +00001753/* takes a comma separated list of log masks. Return 0 if error. */
1754int cpu_str_to_log_mask(const char *str)
1755{
blueswir1c7cd6a32008-10-02 18:27:46 +00001756 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001757 int mask;
1758 const char *p, *p1;
1759
1760 p = str;
1761 mask = 0;
1762 for(;;) {
1763 p1 = strchr(p, ',');
1764 if (!p1)
1765 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001766 if(cmp1(p,p1-p,"all")) {
1767 for(item = cpu_log_items; item->mask != 0; item++) {
1768 mask |= item->mask;
1769 }
1770 } else {
1771 for(item = cpu_log_items; item->mask != 0; item++) {
1772 if (cmp1(p, p1 - p, item->name))
1773 goto found;
1774 }
1775 return 0;
bellardf193c792004-03-21 17:06:25 +00001776 }
bellardf193c792004-03-21 17:06:25 +00001777 found:
1778 mask |= item->mask;
1779 if (*p1 != ',')
1780 break;
1781 p = p1 + 1;
1782 }
1783 return mask;
1784}
bellardea041c02003-06-25 16:16:50 +00001785
bellard75012672003-06-21 13:11:07 +00001786void cpu_abort(CPUState *env, const char *fmt, ...)
1787{
1788 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001789 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001790
1791 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001792 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001793 fprintf(stderr, "qemu: fatal: ");
1794 vfprintf(stderr, fmt, ap);
1795 fprintf(stderr, "\n");
1796#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001797 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1798#else
1799 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001800#endif
aliguori93fcfe32009-01-15 22:34:14 +00001801 if (qemu_log_enabled()) {
1802 qemu_log("qemu: fatal: ");
1803 qemu_log_vprintf(fmt, ap2);
1804 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001805#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001806 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001807#else
aliguori93fcfe32009-01-15 22:34:14 +00001808 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001809#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001810 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001811 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001812 }
pbrook493ae1f2007-11-23 16:53:59 +00001813 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001814 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001815#if defined(CONFIG_USER_ONLY)
1816 {
1817 struct sigaction act;
1818 sigfillset(&act.sa_mask);
1819 act.sa_handler = SIG_DFL;
1820 sigaction(SIGABRT, &act, NULL);
1821 }
1822#endif
bellard75012672003-06-21 13:11:07 +00001823 abort();
1824}
1825
thsc5be9f02007-02-28 20:20:53 +00001826CPUState *cpu_copy(CPUState *env)
1827{
ths01ba9812007-12-09 02:22:57 +00001828 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001829 CPUState *next_cpu = new_env->next_cpu;
1830 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001831#if defined(TARGET_HAS_ICE)
1832 CPUBreakpoint *bp;
1833 CPUWatchpoint *wp;
1834#endif
1835
thsc5be9f02007-02-28 20:20:53 +00001836 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001837
1838 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001839 new_env->next_cpu = next_cpu;
1840 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001841
1842 /* Clone all break/watchpoints.
1843 Note: Once we support ptrace with hw-debug register access, make sure
1844 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001845 QTAILQ_INIT(&env->breakpoints);
1846 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001847#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001848 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001849 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1850 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001851 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001852 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1853 wp->flags, NULL);
1854 }
1855#endif
1856
thsc5be9f02007-02-28 20:20:53 +00001857 return new_env;
1858}
1859
bellard01243112004-01-04 15:48:17 +00001860#if !defined(CONFIG_USER_ONLY)
1861
edgar_igl5c751e92008-05-06 08:44:21 +00001862static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1863{
1864 unsigned int i;
1865
1866 /* Discard jump cache entries for any tb which might potentially
1867 overlap the flushed page. */
1868 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1869 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001870 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001871
1872 i = tb_jmp_cache_hash_page(addr);
1873 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001874 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001875}
1876
Igor Kovalenko08738982009-07-12 02:15:40 +04001877static CPUTLBEntry s_cputlb_empty_entry = {
1878 .addr_read = -1,
1879 .addr_write = -1,
1880 .addr_code = -1,
1881 .addend = -1,
1882};
1883
Peter Maydell771124e2012-01-17 13:23:13 +00001884/* NOTE:
1885 * If flush_global is true (the usual case), flush all tlb entries.
1886 * If flush_global is false, flush (at least) all tlb entries not
1887 * marked global.
1888 *
1889 * Since QEMU doesn't currently implement a global/not-global flag
1890 * for tlb entries, at the moment tlb_flush() will also flush all
1891 * tlb entries in the flush_global == false case. This is OK because
1892 * CPU architectures generally permit an implementation to drop
1893 * entries from the TLB at any time, so flushing more entries than
1894 * required is only an efficiency issue, not a correctness issue.
1895 */
bellardee8b7022004-02-03 23:35:10 +00001896void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001897{
bellard33417e72003-08-10 21:47:01 +00001898 int i;
bellard01243112004-01-04 15:48:17 +00001899
bellard9fa3e852004-01-04 18:06:42 +00001900#if defined(DEBUG_TLB)
1901 printf("tlb_flush:\n");
1902#endif
bellard01243112004-01-04 15:48:17 +00001903 /* must reset current TB so that interrupts cannot modify the
1904 links while we are modifying them */
1905 env->current_tb = NULL;
1906
bellard33417e72003-08-10 21:47:01 +00001907 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001908 int mmu_idx;
1909 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001910 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001911 }
bellard33417e72003-08-10 21:47:01 +00001912 }
bellard9fa3e852004-01-04 18:06:42 +00001913
bellard8a40a182005-11-20 10:35:40 +00001914 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001915
Paul Brookd4c430a2010-03-17 02:14:28 +00001916 env->tlb_flush_addr = -1;
1917 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001918 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001919}
1920
bellard274da6b2004-05-20 21:56:27 +00001921static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001922{
ths5fafdf22007-09-16 21:08:06 +00001923 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001924 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001925 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001926 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001927 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001928 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001929 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001930 }
bellard61382a52003-10-27 21:22:23 +00001931}
1932
bellard2e126692004-04-25 21:28:44 +00001933void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001934{
bellard8a40a182005-11-20 10:35:40 +00001935 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001936 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001937
bellard9fa3e852004-01-04 18:06:42 +00001938#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001939 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001940#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001941 /* Check if we need to flush due to large pages. */
1942 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1943#if defined(DEBUG_TLB)
1944 printf("tlb_flush_page: forced full flush ("
1945 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1946 env->tlb_flush_addr, env->tlb_flush_mask);
1947#endif
1948 tlb_flush(env, 1);
1949 return;
1950 }
bellard01243112004-01-04 15:48:17 +00001951 /* must reset current TB so that interrupts cannot modify the
1952 links while we are modifying them */
1953 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001954
bellard61382a52003-10-27 21:22:23 +00001955 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001956 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001957 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1958 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001959
edgar_igl5c751e92008-05-06 08:44:21 +00001960 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001961}
1962
bellard9fa3e852004-01-04 18:06:42 +00001963/* update the TLBs so that writes to code in the virtual page 'addr'
1964 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001965static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001966{
ths5fafdf22007-09-16 21:08:06 +00001967 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001968 ram_addr + TARGET_PAGE_SIZE,
1969 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001970}
1971
bellard9fa3e852004-01-04 18:06:42 +00001972/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001973 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001974static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001975 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001976{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001977 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00001978}
1979
ths5fafdf22007-09-16 21:08:06 +00001980static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001981 unsigned long start, unsigned long length)
1982{
1983 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001984 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00001985 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001986 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001987 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001988 }
1989 }
1990}
1991
pbrook5579c7f2009-04-11 14:47:08 +00001992/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001993void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001994 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001995{
1996 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001997 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001998 int i;
bellard1ccde1c2004-02-06 19:46:14 +00001999
2000 start &= TARGET_PAGE_MASK;
2001 end = TARGET_PAGE_ALIGN(end);
2002
2003 length = end - start;
2004 if (length == 0)
2005 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002006 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002007
bellard1ccde1c2004-02-06 19:46:14 +00002008 /* we modify the TLB cache so that the dirty bit will be set again
2009 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002010 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002011 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002012 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002013 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002014 != (end - 1) - start) {
2015 abort();
2016 }
2017
bellard6a00d602005-11-21 23:25:50 +00002018 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002019 int mmu_idx;
2020 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2021 for(i = 0; i < CPU_TLB_SIZE; i++)
2022 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2023 start1, length);
2024 }
bellard6a00d602005-11-21 23:25:50 +00002025 }
bellard1ccde1c2004-02-06 19:46:14 +00002026}
2027
aliguori74576192008-10-06 14:02:03 +00002028int cpu_physical_memory_set_dirty_tracking(int enable)
2029{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002030 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002031 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002032 return ret;
aliguori74576192008-10-06 14:02:03 +00002033}
2034
bellard3a7d9292005-08-21 09:26:42 +00002035static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2036{
Anthony Liguoric227f092009-10-01 16:12:16 -05002037 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002038 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002039
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002040 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002041 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2042 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002043 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002044 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002045 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002046 }
2047 }
2048}
2049
2050/* update the TLB according to the current state of the dirty bits */
2051void cpu_tlb_update_dirty(CPUState *env)
2052{
2053 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002054 int mmu_idx;
2055 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2056 for(i = 0; i < CPU_TLB_SIZE; i++)
2057 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2058 }
bellard3a7d9292005-08-21 09:26:42 +00002059}
2060
pbrook0f459d12008-06-09 00:20:13 +00002061static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002062{
pbrook0f459d12008-06-09 00:20:13 +00002063 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2064 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002065}
2066
pbrook0f459d12008-06-09 00:20:13 +00002067/* update the TLB corresponding to virtual page vaddr
2068 so that it is no longer dirty */
2069static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002070{
bellard1ccde1c2004-02-06 19:46:14 +00002071 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002072 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002073
pbrook0f459d12008-06-09 00:20:13 +00002074 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002075 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002076 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2077 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002078}
2079
Paul Brookd4c430a2010-03-17 02:14:28 +00002080/* Our TLB does not support large pages, so remember the area covered by
2081 large pages and trigger a full TLB flush if these are invalidated. */
2082static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2083 target_ulong size)
2084{
2085 target_ulong mask = ~(size - 1);
2086
2087 if (env->tlb_flush_addr == (target_ulong)-1) {
2088 env->tlb_flush_addr = vaddr & mask;
2089 env->tlb_flush_mask = mask;
2090 return;
2091 }
2092 /* Extend the existing region to include the new page.
2093 This is a compromise between unnecessary flushes and the cost
2094 of maintaining a full variable size TLB. */
2095 mask &= env->tlb_flush_mask;
2096 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2097 mask <<= 1;
2098 }
2099 env->tlb_flush_addr &= mask;
2100 env->tlb_flush_mask = mask;
2101}
2102
Avi Kivity1d393fa2012-01-01 21:15:42 +02002103static bool is_ram_rom(ram_addr_t pd)
2104{
2105 pd &= ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002106 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002107}
2108
Avi Kivity75c578d2012-01-02 15:40:52 +02002109static bool is_romd(ram_addr_t pd)
2110{
2111 MemoryRegion *mr;
2112
2113 pd &= ~TARGET_PAGE_MASK;
Avi Kivity11c7ef02012-01-02 17:21:07 +02002114 mr = io_mem_region[pd];
Avi Kivity75c578d2012-01-02 15:40:52 +02002115 return mr->rom_device && mr->readable;
2116}
2117
Avi Kivity1d393fa2012-01-01 21:15:42 +02002118static bool is_ram_rom_romd(ram_addr_t pd)
2119{
Avi Kivity75c578d2012-01-02 15:40:52 +02002120 return is_ram_rom(pd) || is_romd(pd);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002121}
2122
Paul Brookd4c430a2010-03-17 02:14:28 +00002123/* Add a new TLB entry. At most one entry for a given virtual address
2124 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2125 supplied size is only used by tlb_flush_page. */
2126void tlb_set_page(CPUState *env, target_ulong vaddr,
2127 target_phys_addr_t paddr, int prot,
2128 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002129{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002130 PhysPageDesc p;
bellard4f2ac232004-04-26 19:44:02 +00002131 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002132 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002133 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002134 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002135 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002136 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002137 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002138 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002139
Paul Brookd4c430a2010-03-17 02:14:28 +00002140 assert(size >= TARGET_PAGE_SIZE);
2141 if (size != TARGET_PAGE_SIZE) {
2142 tlb_add_large_page(env, vaddr, size);
2143 }
bellard92e873b2004-05-21 14:52:29 +00002144 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002145 pd = p.phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002146#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002147 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2148 " prot=%x idx=%d pd=0x%08lx\n",
2149 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002150#endif
2151
pbrook0f459d12008-06-09 00:20:13 +00002152 address = vaddr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002153 if (!is_ram_rom_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002154 /* IO memory case (romd handled later) */
2155 address |= TLB_MMIO;
2156 }
pbrook5579c7f2009-04-11 14:47:08 +00002157 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002158 if (is_ram_rom(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002159 /* Normal RAM. */
2160 iotlb = pd & TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002161 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2162 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002163 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002164 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002165 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002166 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002167 It would be nice to pass an offset from the base address
2168 of that region. This would avoid having to special case RAM,
2169 and avoid full address decoding in every device.
2170 We can't use the high bits of pd for this because
2171 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002172 iotlb = (pd & ~TARGET_PAGE_MASK);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002173 iotlb += p.region_offset;
pbrook0f459d12008-06-09 00:20:13 +00002174 }
pbrook6658ffb2007-03-16 23:58:11 +00002175
pbrook0f459d12008-06-09 00:20:13 +00002176 code_address = address;
2177 /* Make accesses to pages with watchpoints go via the
2178 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002179 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002180 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002181 /* Avoid trapping reads of pages with a write breakpoint. */
2182 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivity1ec9b902012-01-02 12:47:48 +02002183 iotlb = io_mem_watch.ram_addr + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002184 address |= TLB_MMIO;
2185 break;
2186 }
pbrook6658ffb2007-03-16 23:58:11 +00002187 }
pbrook0f459d12008-06-09 00:20:13 +00002188 }
balrogd79acba2007-06-26 20:01:13 +00002189
pbrook0f459d12008-06-09 00:20:13 +00002190 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2191 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2192 te = &env->tlb_table[mmu_idx][index];
2193 te->addend = addend - vaddr;
2194 if (prot & PAGE_READ) {
2195 te->addr_read = address;
2196 } else {
2197 te->addr_read = -1;
2198 }
edgar_igl5c751e92008-05-06 08:44:21 +00002199
pbrook0f459d12008-06-09 00:20:13 +00002200 if (prot & PAGE_EXEC) {
2201 te->addr_code = code_address;
2202 } else {
2203 te->addr_code = -1;
2204 }
2205 if (prot & PAGE_WRITE) {
Avi Kivity75c578d2012-01-02 15:40:52 +02002206 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr || is_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002207 /* Write access calls the I/O callback. */
2208 te->addr_write = address | TLB_MMIO;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002209 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
pbrook0f459d12008-06-09 00:20:13 +00002210 !cpu_physical_memory_is_dirty(pd)) {
2211 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002212 } else {
pbrook0f459d12008-06-09 00:20:13 +00002213 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002214 }
pbrook0f459d12008-06-09 00:20:13 +00002215 } else {
2216 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002217 }
bellard9fa3e852004-01-04 18:06:42 +00002218}
2219
bellard01243112004-01-04 15:48:17 +00002220#else
2221
bellardee8b7022004-02-03 23:35:10 +00002222void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002223{
2224}
2225
bellard2e126692004-04-25 21:28:44 +00002226void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002227{
2228}
2229
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002230/*
2231 * Walks guest process memory "regions" one by one
2232 * and calls callback function 'fn' for each region.
2233 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002234
2235struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002236{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002237 walk_memory_regions_fn fn;
2238 void *priv;
2239 unsigned long start;
2240 int prot;
2241};
bellard9fa3e852004-01-04 18:06:42 +00002242
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002243static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002244 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002245{
2246 if (data->start != -1ul) {
2247 int rc = data->fn(data->priv, data->start, end, data->prot);
2248 if (rc != 0) {
2249 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002250 }
bellard33417e72003-08-10 21:47:01 +00002251 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002252
2253 data->start = (new_prot ? end : -1ul);
2254 data->prot = new_prot;
2255
2256 return 0;
2257}
2258
2259static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002260 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002261{
Paul Brookb480d9b2010-03-12 23:23:29 +00002262 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002263 int i, rc;
2264
2265 if (*lp == NULL) {
2266 return walk_memory_regions_end(data, base, 0);
2267 }
2268
2269 if (level == 0) {
2270 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002271 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002272 int prot = pd[i].flags;
2273
2274 pa = base | (i << TARGET_PAGE_BITS);
2275 if (prot != data->prot) {
2276 rc = walk_memory_regions_end(data, pa, prot);
2277 if (rc != 0) {
2278 return rc;
2279 }
2280 }
2281 }
2282 } else {
2283 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002284 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002285 pa = base | ((abi_ulong)i <<
2286 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002287 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2288 if (rc != 0) {
2289 return rc;
2290 }
2291 }
2292 }
2293
2294 return 0;
2295}
2296
2297int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2298{
2299 struct walk_memory_regions_data data;
2300 unsigned long i;
2301
2302 data.fn = fn;
2303 data.priv = priv;
2304 data.start = -1ul;
2305 data.prot = 0;
2306
2307 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002308 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002309 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2310 if (rc != 0) {
2311 return rc;
2312 }
2313 }
2314
2315 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002316}
2317
Paul Brookb480d9b2010-03-12 23:23:29 +00002318static int dump_region(void *priv, abi_ulong start,
2319 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002320{
2321 FILE *f = (FILE *)priv;
2322
Paul Brookb480d9b2010-03-12 23:23:29 +00002323 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2324 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002325 start, end, end - start,
2326 ((prot & PAGE_READ) ? 'r' : '-'),
2327 ((prot & PAGE_WRITE) ? 'w' : '-'),
2328 ((prot & PAGE_EXEC) ? 'x' : '-'));
2329
2330 return (0);
2331}
2332
2333/* dump memory mappings */
2334void page_dump(FILE *f)
2335{
2336 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2337 "start", "end", "size", "prot");
2338 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002339}
2340
pbrook53a59602006-03-25 19:31:22 +00002341int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002342{
bellard9fa3e852004-01-04 18:06:42 +00002343 PageDesc *p;
2344
2345 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002346 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002347 return 0;
2348 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002349}
2350
Richard Henderson376a7902010-03-10 15:57:04 -08002351/* Modify the flags of a page and invalidate the code if necessary.
2352 The flag PAGE_WRITE_ORG is positioned automatically depending
2353 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002354void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002355{
Richard Henderson376a7902010-03-10 15:57:04 -08002356 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002357
Richard Henderson376a7902010-03-10 15:57:04 -08002358 /* This function should never be called with addresses outside the
2359 guest address space. If this assert fires, it probably indicates
2360 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002361#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2362 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002363#endif
2364 assert(start < end);
2365
bellard9fa3e852004-01-04 18:06:42 +00002366 start = start & TARGET_PAGE_MASK;
2367 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002368
2369 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002370 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002371 }
2372
2373 for (addr = start, len = end - start;
2374 len != 0;
2375 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2376 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2377
2378 /* If the write protection bit is set, then we invalidate
2379 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002380 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002381 (flags & PAGE_WRITE) &&
2382 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002383 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002384 }
2385 p->flags = flags;
2386 }
bellard9fa3e852004-01-04 18:06:42 +00002387}
2388
ths3d97b402007-11-02 19:02:07 +00002389int page_check_range(target_ulong start, target_ulong len, int flags)
2390{
2391 PageDesc *p;
2392 target_ulong end;
2393 target_ulong addr;
2394
Richard Henderson376a7902010-03-10 15:57:04 -08002395 /* This function should never be called with addresses outside the
2396 guest address space. If this assert fires, it probably indicates
2397 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002398#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2399 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002400#endif
2401
Richard Henderson3e0650a2010-03-29 10:54:42 -07002402 if (len == 0) {
2403 return 0;
2404 }
Richard Henderson376a7902010-03-10 15:57:04 -08002405 if (start + len - 1 < start) {
2406 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002407 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002408 }
balrog55f280c2008-10-28 10:24:11 +00002409
ths3d97b402007-11-02 19:02:07 +00002410 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2411 start = start & TARGET_PAGE_MASK;
2412
Richard Henderson376a7902010-03-10 15:57:04 -08002413 for (addr = start, len = end - start;
2414 len != 0;
2415 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002416 p = page_find(addr >> TARGET_PAGE_BITS);
2417 if( !p )
2418 return -1;
2419 if( !(p->flags & PAGE_VALID) )
2420 return -1;
2421
bellarddae32702007-11-14 10:51:00 +00002422 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002423 return -1;
bellarddae32702007-11-14 10:51:00 +00002424 if (flags & PAGE_WRITE) {
2425 if (!(p->flags & PAGE_WRITE_ORG))
2426 return -1;
2427 /* unprotect the page if it was put read-only because it
2428 contains translated code */
2429 if (!(p->flags & PAGE_WRITE)) {
2430 if (!page_unprotect(addr, 0, NULL))
2431 return -1;
2432 }
2433 return 0;
2434 }
ths3d97b402007-11-02 19:02:07 +00002435 }
2436 return 0;
2437}
2438
bellard9fa3e852004-01-04 18:06:42 +00002439/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002440 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002441int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002442{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002443 unsigned int prot;
2444 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002445 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002446
pbrookc8a706f2008-06-02 16:16:42 +00002447 /* Technically this isn't safe inside a signal handler. However we
2448 know this only ever happens in a synchronous SEGV handler, so in
2449 practice it seems to be ok. */
2450 mmap_lock();
2451
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002452 p = page_find(address >> TARGET_PAGE_BITS);
2453 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002454 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002455 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002456 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002457
bellard9fa3e852004-01-04 18:06:42 +00002458 /* if the page was really writable, then we change its
2459 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002460 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2461 host_start = address & qemu_host_page_mask;
2462 host_end = host_start + qemu_host_page_size;
2463
2464 prot = 0;
2465 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2466 p = page_find(addr >> TARGET_PAGE_BITS);
2467 p->flags |= PAGE_WRITE;
2468 prot |= p->flags;
2469
bellard9fa3e852004-01-04 18:06:42 +00002470 /* and since the content will be modified, we must invalidate
2471 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002472 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002473#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002474 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002475#endif
bellard9fa3e852004-01-04 18:06:42 +00002476 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002477 mprotect((void *)g2h(host_start), qemu_host_page_size,
2478 prot & PAGE_BITS);
2479
2480 mmap_unlock();
2481 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002482 }
pbrookc8a706f2008-06-02 16:16:42 +00002483 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002484 return 0;
2485}
2486
bellard6a00d602005-11-21 23:25:50 +00002487static inline void tlb_set_dirty(CPUState *env,
2488 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002489{
2490}
bellard9fa3e852004-01-04 18:06:42 +00002491#endif /* defined(CONFIG_USER_ONLY) */
2492
pbrooke2eef172008-06-08 01:09:01 +00002493#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002494
Paul Brookc04b2b72010-03-01 03:31:14 +00002495#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2496typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002497 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002498 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002499 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002500} subpage_t;
2501
Anthony Liguoric227f092009-10-01 16:12:16 -05002502static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002503 uint16_t section);
2504static subpage_t *subpage_init (target_phys_addr_t base, uint16_t *section,
2505 uint16_t orig_section);
blueswir1db7b5422007-05-26 17:36:03 +00002506#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2507 need_subpage) \
2508 do { \
2509 if (addr > start_addr) \
2510 start_addr2 = 0; \
2511 else { \
2512 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2513 if (start_addr2 > 0) \
2514 need_subpage = 1; \
2515 } \
2516 \
blueswir149e9fba2007-05-30 17:25:06 +00002517 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002518 end_addr2 = TARGET_PAGE_SIZE - 1; \
2519 else { \
2520 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2521 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2522 need_subpage = 1; \
2523 } \
2524 } while (0)
2525
Avi Kivity5312bd82012-02-12 18:32:55 +02002526static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002527{
Avi Kivity5312bd82012-02-12 18:32:55 +02002528 MemoryRegionSection *section = &phys_sections[section_index];
2529 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002530
2531 if (mr->subpage) {
2532 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2533 memory_region_destroy(&subpage->iomem);
2534 g_free(subpage);
2535 }
2536}
2537
Avi Kivity4346ae32012-02-10 17:00:01 +02002538static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002539{
2540 unsigned i;
Avi Kivity4346ae32012-02-10 17:00:01 +02002541 PhysPageEntry *p = lp->u.node;
Avi Kivity54688b12012-02-09 17:34:32 +02002542
Avi Kivity4346ae32012-02-10 17:00:01 +02002543 if (!p) {
Avi Kivity54688b12012-02-09 17:34:32 +02002544 return;
2545 }
2546
Avi Kivity4346ae32012-02-10 17:00:01 +02002547 for (i = 0; i < L2_SIZE; ++i) {
2548 if (level > 0) {
Avi Kivity54688b12012-02-09 17:34:32 +02002549 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002550 } else {
2551 destroy_page_desc(p[i].u.leaf);
Avi Kivity54688b12012-02-09 17:34:32 +02002552 }
Avi Kivity54688b12012-02-09 17:34:32 +02002553 }
Avi Kivity4346ae32012-02-10 17:00:01 +02002554 g_free(p);
2555 lp->u.node = NULL;
Avi Kivity54688b12012-02-09 17:34:32 +02002556}
2557
2558static void destroy_all_mappings(void)
2559{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002560 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivity54688b12012-02-09 17:34:32 +02002561}
2562
Avi Kivity5312bd82012-02-12 18:32:55 +02002563static uint16_t phys_section_add(MemoryRegionSection *section)
2564{
2565 if (phys_sections_nb == phys_sections_nb_alloc) {
2566 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2567 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2568 phys_sections_nb_alloc);
2569 }
2570 phys_sections[phys_sections_nb] = *section;
2571 return phys_sections_nb++;
2572}
2573
2574static void phys_sections_clear(void)
2575{
2576 phys_sections_nb = 0;
2577}
2578
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002579/* register physical memory.
2580 For RAM, 'size' must be a multiple of the target page size.
2581 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002582 io memory page. The address used when calling the IO function is
2583 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002584 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002585 before calculating this offset. This should not be a problem unless
2586 the low bits of start_addr and region_offset differ. */
Avi Kivitydd811242012-01-02 12:17:03 +02002587void cpu_register_physical_memory_log(MemoryRegionSection *section,
Avi Kivityd7ec83e2012-02-08 17:07:26 +02002588 bool readonly)
bellard33417e72003-08-10 21:47:01 +00002589{
Avi Kivitydd811242012-01-02 12:17:03 +02002590 target_phys_addr_t start_addr = section->offset_within_address_space;
2591 ram_addr_t size = section->size;
Anthony Liguoric227f092009-10-01 16:12:16 -05002592 target_phys_addr_t addr, end_addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002593 uint16_t *p;
bellard9d420372006-06-25 22:25:22 +00002594 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002595 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002596 subpage_t *subpage;
Avi Kivity5312bd82012-02-12 18:32:55 +02002597 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002598
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002599 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002600
bellard5fd386f2004-05-23 21:11:22 +00002601 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002602 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002603
2604 addr = start_addr;
2605 do {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002606 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
Avi Kivity5312bd82012-02-12 18:32:55 +02002607 if (p && *p != phys_section_unassigned) {
2608 uint16_t orig_memory= *p;
Anthony Liguoric227f092009-10-01 16:12:16 -05002609 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002610 int need_subpage = 0;
Avi Kivity5312bd82012-02-12 18:32:55 +02002611 MemoryRegion *mr = phys_sections[orig_memory].mr;
blueswir1db7b5422007-05-26 17:36:03 +00002612
2613 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2614 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002615 if (need_subpage) {
Avi Kivityb3b00c72012-01-02 13:20:11 +02002616 if (!(mr->subpage)) {
blueswir1db7b5422007-05-26 17:36:03 +00002617 subpage = subpage_init((addr & TARGET_PAGE_MASK),
Avi Kivity5312bd82012-02-12 18:32:55 +02002618 p, orig_memory);
blueswir1db7b5422007-05-26 17:36:03 +00002619 } else {
Avi Kivitya621f382012-01-02 13:12:08 +02002620 subpage = container_of(mr, subpage_t, iomem);
blueswir1db7b5422007-05-26 17:36:03 +00002621 }
Avi Kivity5312bd82012-02-12 18:32:55 +02002622 subpage_register(subpage, start_addr2, end_addr2,
2623 section_index);
blueswir1db7b5422007-05-26 17:36:03 +00002624 } else {
Avi Kivity5312bd82012-02-12 18:32:55 +02002625 *p = section_index;
blueswir1db7b5422007-05-26 17:36:03 +00002626 }
2627 } else {
Avi Kivity5312bd82012-02-12 18:32:55 +02002628 MemoryRegion *mr = section->mr;
blueswir1db7b5422007-05-26 17:36:03 +00002629 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
Avi Kivity5312bd82012-02-12 18:32:55 +02002630 *p = section_index;
2631 if (!(memory_region_is_ram(mr) || mr->rom_device)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002632 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002633 int need_subpage = 0;
2634
2635 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2636 end_addr2, need_subpage);
2637
Richard Hendersonf6405242010-04-22 16:47:31 -07002638 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002639 subpage = subpage_init((addr & TARGET_PAGE_MASK),
Avi Kivity5312bd82012-02-12 18:32:55 +02002640 p, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00002641 subpage_register(subpage, start_addr2, end_addr2,
Avi Kivity5312bd82012-02-12 18:32:55 +02002642 section_index);
blueswir1db7b5422007-05-26 17:36:03 +00002643 }
2644 }
2645 }
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002646 addr += TARGET_PAGE_SIZE;
2647 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002648
bellard9d420372006-06-25 22:25:22 +00002649 /* since each CPU stores ram addresses in its TLB cache, we must
2650 reset the modified entries */
2651 /* XXX: slow ! */
2652 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2653 tlb_flush(env, 1);
2654 }
bellard33417e72003-08-10 21:47:01 +00002655}
2656
Anthony Liguoric227f092009-10-01 16:12:16 -05002657void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002658{
2659 if (kvm_enabled())
2660 kvm_coalesce_mmio_region(addr, size);
2661}
2662
Anthony Liguoric227f092009-10-01 16:12:16 -05002663void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002664{
2665 if (kvm_enabled())
2666 kvm_uncoalesce_mmio_region(addr, size);
2667}
2668
Sheng Yang62a27442010-01-26 19:21:16 +08002669void qemu_flush_coalesced_mmio_buffer(void)
2670{
2671 if (kvm_enabled())
2672 kvm_flush_coalesced_mmio_buffer();
2673}
2674
Marcelo Tosattic9027602010-03-01 20:25:08 -03002675#if defined(__linux__) && !defined(TARGET_S390X)
2676
2677#include <sys/vfs.h>
2678
2679#define HUGETLBFS_MAGIC 0x958458f6
2680
2681static long gethugepagesize(const char *path)
2682{
2683 struct statfs fs;
2684 int ret;
2685
2686 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002687 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002688 } while (ret != 0 && errno == EINTR);
2689
2690 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002691 perror(path);
2692 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002693 }
2694
2695 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002696 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002697
2698 return fs.f_bsize;
2699}
2700
Alex Williamson04b16652010-07-02 11:13:17 -06002701static void *file_ram_alloc(RAMBlock *block,
2702 ram_addr_t memory,
2703 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002704{
2705 char *filename;
2706 void *area;
2707 int fd;
2708#ifdef MAP_POPULATE
2709 int flags;
2710#endif
2711 unsigned long hpagesize;
2712
2713 hpagesize = gethugepagesize(path);
2714 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002715 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002716 }
2717
2718 if (memory < hpagesize) {
2719 return NULL;
2720 }
2721
2722 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2723 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2724 return NULL;
2725 }
2726
2727 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002728 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002729 }
2730
2731 fd = mkstemp(filename);
2732 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002733 perror("unable to create backing store for hugepages");
2734 free(filename);
2735 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002736 }
2737 unlink(filename);
2738 free(filename);
2739
2740 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2741
2742 /*
2743 * ftruncate is not supported by hugetlbfs in older
2744 * hosts, so don't bother bailing out on errors.
2745 * If anything goes wrong with it under other filesystems,
2746 * mmap will fail.
2747 */
2748 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002749 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002750
2751#ifdef MAP_POPULATE
2752 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2753 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2754 * to sidestep this quirk.
2755 */
2756 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2757 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2758#else
2759 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2760#endif
2761 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002762 perror("file_ram_alloc: can't mmap RAM pages");
2763 close(fd);
2764 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002765 }
Alex Williamson04b16652010-07-02 11:13:17 -06002766 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002767 return area;
2768}
2769#endif
2770
Alex Williamsond17b5282010-06-25 11:08:38 -06002771static ram_addr_t find_ram_offset(ram_addr_t size)
2772{
Alex Williamson04b16652010-07-02 11:13:17 -06002773 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002774 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002775
2776 if (QLIST_EMPTY(&ram_list.blocks))
2777 return 0;
2778
2779 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002780 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002781
2782 end = block->offset + block->length;
2783
2784 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2785 if (next_block->offset >= end) {
2786 next = MIN(next, next_block->offset);
2787 }
2788 }
2789 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002790 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002791 mingap = next - end;
2792 }
2793 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002794
2795 if (offset == RAM_ADDR_MAX) {
2796 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2797 (uint64_t)size);
2798 abort();
2799 }
2800
Alex Williamson04b16652010-07-02 11:13:17 -06002801 return offset;
2802}
2803
2804static ram_addr_t last_ram_offset(void)
2805{
Alex Williamsond17b5282010-06-25 11:08:38 -06002806 RAMBlock *block;
2807 ram_addr_t last = 0;
2808
2809 QLIST_FOREACH(block, &ram_list.blocks, next)
2810 last = MAX(last, block->offset + block->length);
2811
2812 return last;
2813}
2814
Avi Kivityc5705a72011-12-20 15:59:12 +02002815void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002816{
2817 RAMBlock *new_block, *block;
2818
Avi Kivityc5705a72011-12-20 15:59:12 +02002819 new_block = NULL;
2820 QLIST_FOREACH(block, &ram_list.blocks, next) {
2821 if (block->offset == addr) {
2822 new_block = block;
2823 break;
2824 }
2825 }
2826 assert(new_block);
2827 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002828
2829 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2830 char *id = dev->parent_bus->info->get_dev_path(dev);
2831 if (id) {
2832 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002833 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002834 }
2835 }
2836 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2837
2838 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002839 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002840 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2841 new_block->idstr);
2842 abort();
2843 }
2844 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002845}
2846
2847ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2848 MemoryRegion *mr)
2849{
2850 RAMBlock *new_block;
2851
2852 size = TARGET_PAGE_ALIGN(size);
2853 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002854
Avi Kivity7c637362011-12-21 13:09:49 +02002855 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002856 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002857 if (host) {
2858 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002859 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002860 } else {
2861 if (mem_path) {
2862#if defined (__linux__) && !defined(TARGET_S390X)
2863 new_block->host = file_ram_alloc(new_block, size, mem_path);
2864 if (!new_block->host) {
2865 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002866 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002867 }
2868#else
2869 fprintf(stderr, "-mem-path option unsupported\n");
2870 exit(1);
2871#endif
2872 } else {
2873#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002874 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2875 an system defined value, which is at least 256GB. Larger systems
2876 have larger values. We put the guest between the end of data
2877 segment (system break) and this value. We use 32GB as a base to
2878 have enough room for the system break to grow. */
2879 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002880 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002881 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002882 if (new_block->host == MAP_FAILED) {
2883 fprintf(stderr, "Allocating RAM failed\n");
2884 abort();
2885 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002886#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002887 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002888 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002889 } else {
2890 new_block->host = qemu_vmalloc(size);
2891 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002892#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002893 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002894 }
2895 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002896 new_block->length = size;
2897
2898 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2899
Anthony Liguori7267c092011-08-20 22:09:37 -05002900 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002901 last_ram_offset() >> TARGET_PAGE_BITS);
2902 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2903 0xff, size >> TARGET_PAGE_BITS);
2904
2905 if (kvm_enabled())
2906 kvm_setup_guest_memory(new_block->host, size);
2907
2908 return new_block->offset;
2909}
2910
Avi Kivityc5705a72011-12-20 15:59:12 +02002911ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002912{
Avi Kivityc5705a72011-12-20 15:59:12 +02002913 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002914}
bellarde9a1ab12007-02-08 23:08:38 +00002915
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002916void qemu_ram_free_from_ptr(ram_addr_t addr)
2917{
2918 RAMBlock *block;
2919
2920 QLIST_FOREACH(block, &ram_list.blocks, next) {
2921 if (addr == block->offset) {
2922 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002923 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002924 return;
2925 }
2926 }
2927}
2928
Anthony Liguoric227f092009-10-01 16:12:16 -05002929void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002930{
Alex Williamson04b16652010-07-02 11:13:17 -06002931 RAMBlock *block;
2932
2933 QLIST_FOREACH(block, &ram_list.blocks, next) {
2934 if (addr == block->offset) {
2935 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002936 if (block->flags & RAM_PREALLOC_MASK) {
2937 ;
2938 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002939#if defined (__linux__) && !defined(TARGET_S390X)
2940 if (block->fd) {
2941 munmap(block->host, block->length);
2942 close(block->fd);
2943 } else {
2944 qemu_vfree(block->host);
2945 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002946#else
2947 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002948#endif
2949 } else {
2950#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2951 munmap(block->host, block->length);
2952#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002953 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002954 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002955 } else {
2956 qemu_vfree(block->host);
2957 }
Alex Williamson04b16652010-07-02 11:13:17 -06002958#endif
2959 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002960 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002961 return;
2962 }
2963 }
2964
bellarde9a1ab12007-02-08 23:08:38 +00002965}
2966
Huang Yingcd19cfa2011-03-02 08:56:19 +01002967#ifndef _WIN32
2968void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2969{
2970 RAMBlock *block;
2971 ram_addr_t offset;
2972 int flags;
2973 void *area, *vaddr;
2974
2975 QLIST_FOREACH(block, &ram_list.blocks, next) {
2976 offset = addr - block->offset;
2977 if (offset < block->length) {
2978 vaddr = block->host + offset;
2979 if (block->flags & RAM_PREALLOC_MASK) {
2980 ;
2981 } else {
2982 flags = MAP_FIXED;
2983 munmap(vaddr, length);
2984 if (mem_path) {
2985#if defined(__linux__) && !defined(TARGET_S390X)
2986 if (block->fd) {
2987#ifdef MAP_POPULATE
2988 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2989 MAP_PRIVATE;
2990#else
2991 flags |= MAP_PRIVATE;
2992#endif
2993 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2994 flags, block->fd, offset);
2995 } else {
2996 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2997 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2998 flags, -1, 0);
2999 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003000#else
3001 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003002#endif
3003 } else {
3004#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3005 flags |= MAP_SHARED | MAP_ANONYMOUS;
3006 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3007 flags, -1, 0);
3008#else
3009 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3010 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3011 flags, -1, 0);
3012#endif
3013 }
3014 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003015 fprintf(stderr, "Could not remap addr: "
3016 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003017 length, addr);
3018 exit(1);
3019 }
3020 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3021 }
3022 return;
3023 }
3024 }
3025}
3026#endif /* !_WIN32 */
3027
pbrookdc828ca2009-04-09 22:21:07 +00003028/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003029 With the exception of the softmmu code in this file, this should
3030 only be used for local memory (e.g. video ram) that the device owns,
3031 and knows it isn't going to access beyond the end of the block.
3032
3033 It should not be used for general purpose DMA.
3034 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3035 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003036void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003037{
pbrook94a6b542009-04-11 17:15:54 +00003038 RAMBlock *block;
3039
Alex Williamsonf471a172010-06-11 11:11:42 -06003040 QLIST_FOREACH(block, &ram_list.blocks, next) {
3041 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003042 /* Move this entry to to start of the list. */
3043 if (block != QLIST_FIRST(&ram_list.blocks)) {
3044 QLIST_REMOVE(block, next);
3045 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3046 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003047 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003048 /* We need to check if the requested address is in the RAM
3049 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003050 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003051 */
3052 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003053 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003054 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003055 block->host =
3056 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003057 }
3058 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003059 return block->host + (addr - block->offset);
3060 }
pbrook94a6b542009-04-11 17:15:54 +00003061 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003062
3063 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3064 abort();
3065
3066 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003067}
3068
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003069/* Return a host pointer to ram allocated with qemu_ram_alloc.
3070 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3071 */
3072void *qemu_safe_ram_ptr(ram_addr_t addr)
3073{
3074 RAMBlock *block;
3075
3076 QLIST_FOREACH(block, &ram_list.blocks, next) {
3077 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003078 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003079 /* We need to check if the requested address is in the RAM
3080 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003081 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003082 */
3083 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003084 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003085 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003086 block->host =
3087 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003088 }
3089 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003090 return block->host + (addr - block->offset);
3091 }
3092 }
3093
3094 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3095 abort();
3096
3097 return NULL;
3098}
3099
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003100/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3101 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003102void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003103{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003104 if (*size == 0) {
3105 return NULL;
3106 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003107 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003108 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003109 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003110 RAMBlock *block;
3111
3112 QLIST_FOREACH(block, &ram_list.blocks, next) {
3113 if (addr - block->offset < block->length) {
3114 if (addr - block->offset + *size > block->length)
3115 *size = block->length - addr + block->offset;
3116 return block->host + (addr - block->offset);
3117 }
3118 }
3119
3120 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3121 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003122 }
3123}
3124
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003125void qemu_put_ram_ptr(void *addr)
3126{
3127 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003128}
3129
Marcelo Tosattie8902612010-10-11 15:31:19 -03003130int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003131{
pbrook94a6b542009-04-11 17:15:54 +00003132 RAMBlock *block;
3133 uint8_t *host = ptr;
3134
Jan Kiszka868bb332011-06-21 22:59:09 +02003135 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003136 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003137 return 0;
3138 }
3139
Alex Williamsonf471a172010-06-11 11:11:42 -06003140 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003141 /* This case append when the block is not mapped. */
3142 if (block->host == NULL) {
3143 continue;
3144 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003145 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003146 *ram_addr = block->offset + (host - block->host);
3147 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003148 }
pbrook94a6b542009-04-11 17:15:54 +00003149 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003150
Marcelo Tosattie8902612010-10-11 15:31:19 -03003151 return -1;
3152}
Alex Williamsonf471a172010-06-11 11:11:42 -06003153
Marcelo Tosattie8902612010-10-11 15:31:19 -03003154/* Some of the softmmu routines need to translate from a host pointer
3155 (typically a TLB entry) back to a ram offset. */
3156ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3157{
3158 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003159
Marcelo Tosattie8902612010-10-11 15:31:19 -03003160 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3161 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3162 abort();
3163 }
3164 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003165}
3166
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003167static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3168 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003169{
pbrook67d3b952006-12-18 05:03:52 +00003170#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003171 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003172#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003173#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003174 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003175#endif
3176 return 0;
3177}
3178
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003179static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3180 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003181{
3182#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003183 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003184#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003185#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003186 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003187#endif
3188}
3189
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003190static const MemoryRegionOps unassigned_mem_ops = {
3191 .read = unassigned_mem_read,
3192 .write = unassigned_mem_write,
3193 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003194};
3195
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003196static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3197 unsigned size)
3198{
3199 abort();
3200}
3201
3202static void error_mem_write(void *opaque, target_phys_addr_t addr,
3203 uint64_t value, unsigned size)
3204{
3205 abort();
3206}
3207
3208static const MemoryRegionOps error_mem_ops = {
3209 .read = error_mem_read,
3210 .write = error_mem_write,
3211 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003212};
3213
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003214static const MemoryRegionOps rom_mem_ops = {
3215 .read = error_mem_read,
3216 .write = unassigned_mem_write,
3217 .endianness = DEVICE_NATIVE_ENDIAN,
3218};
3219
3220static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3221 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003222{
bellard3a7d9292005-08-21 09:26:42 +00003223 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003224 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003225 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3226#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003227 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003228 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003229#endif
3230 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003231 switch (size) {
3232 case 1:
3233 stb_p(qemu_get_ram_ptr(ram_addr), val);
3234 break;
3235 case 2:
3236 stw_p(qemu_get_ram_ptr(ram_addr), val);
3237 break;
3238 case 4:
3239 stl_p(qemu_get_ram_ptr(ram_addr), val);
3240 break;
3241 default:
3242 abort();
3243 }
bellardf23db162005-08-21 19:12:28 +00003244 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003245 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003246 /* we remove the notdirty callback only if the code has been
3247 flushed */
3248 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003249 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003250}
3251
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003252static const MemoryRegionOps notdirty_mem_ops = {
3253 .read = error_mem_read,
3254 .write = notdirty_mem_write,
3255 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003256};
3257
pbrook0f459d12008-06-09 00:20:13 +00003258/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003259static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003260{
3261 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003262 target_ulong pc, cs_base;
3263 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003264 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003265 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003266 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003267
aliguori06d55cc2008-11-18 20:24:06 +00003268 if (env->watchpoint_hit) {
3269 /* We re-entered the check after replacing the TB. Now raise
3270 * the debug interrupt so that is will trigger after the
3271 * current instruction. */
3272 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3273 return;
3274 }
pbrook2e70f6e2008-06-29 01:03:05 +00003275 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003276 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003277 if ((vaddr == (wp->vaddr & len_mask) ||
3278 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003279 wp->flags |= BP_WATCHPOINT_HIT;
3280 if (!env->watchpoint_hit) {
3281 env->watchpoint_hit = wp;
3282 tb = tb_find_pc(env->mem_io_pc);
3283 if (!tb) {
3284 cpu_abort(env, "check_watchpoint: could not find TB for "
3285 "pc=%p", (void *)env->mem_io_pc);
3286 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003287 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003288 tb_phys_invalidate(tb, -1);
3289 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3290 env->exception_index = EXCP_DEBUG;
3291 } else {
3292 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3293 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3294 }
3295 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003296 }
aliguori6e140f22008-11-18 20:37:55 +00003297 } else {
3298 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003299 }
3300 }
3301}
3302
pbrook6658ffb2007-03-16 23:58:11 +00003303/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3304 so these check for a hit then pass through to the normal out-of-line
3305 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003306static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3307 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003308{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003309 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3310 switch (size) {
3311 case 1: return ldub_phys(addr);
3312 case 2: return lduw_phys(addr);
3313 case 4: return ldl_phys(addr);
3314 default: abort();
3315 }
pbrook6658ffb2007-03-16 23:58:11 +00003316}
3317
Avi Kivity1ec9b902012-01-02 12:47:48 +02003318static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3319 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003320{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003321 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3322 switch (size) {
3323 case 1: stb_phys(addr, val);
3324 case 2: stw_phys(addr, val);
3325 case 4: stl_phys(addr, val);
3326 default: abort();
3327 }
pbrook6658ffb2007-03-16 23:58:11 +00003328}
3329
Avi Kivity1ec9b902012-01-02 12:47:48 +02003330static const MemoryRegionOps watch_mem_ops = {
3331 .read = watch_mem_read,
3332 .write = watch_mem_write,
3333 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003334};
pbrook6658ffb2007-03-16 23:58:11 +00003335
Avi Kivity70c68e42012-01-02 12:32:48 +02003336static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3337 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003338{
Avi Kivity70c68e42012-01-02 12:32:48 +02003339 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003340 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003341 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003342#if defined(DEBUG_SUBPAGE)
3343 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3344 mmio, len, addr, idx);
3345#endif
blueswir1db7b5422007-05-26 17:36:03 +00003346
Avi Kivity5312bd82012-02-12 18:32:55 +02003347 section = &phys_sections[mmio->sub_section[idx]];
3348 addr += mmio->base;
3349 addr -= section->offset_within_address_space;
3350 addr += section->offset_within_region;
3351 return io_mem_read(section->mr->ram_addr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003352}
3353
Avi Kivity70c68e42012-01-02 12:32:48 +02003354static void subpage_write(void *opaque, target_phys_addr_t addr,
3355 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003356{
Avi Kivity70c68e42012-01-02 12:32:48 +02003357 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003358 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003359 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003360#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003361 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3362 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003363 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003364#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003365
Avi Kivity5312bd82012-02-12 18:32:55 +02003366 section = &phys_sections[mmio->sub_section[idx]];
3367 addr += mmio->base;
3368 addr -= section->offset_within_address_space;
3369 addr += section->offset_within_region;
3370 io_mem_write(section->mr->ram_addr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003371}
3372
Avi Kivity70c68e42012-01-02 12:32:48 +02003373static const MemoryRegionOps subpage_ops = {
3374 .read = subpage_read,
3375 .write = subpage_write,
3376 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003377};
3378
Avi Kivityde712f92012-01-02 12:41:07 +02003379static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3380 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003381{
3382 ram_addr_t raddr = addr;
3383 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003384 switch (size) {
3385 case 1: return ldub_p(ptr);
3386 case 2: return lduw_p(ptr);
3387 case 4: return ldl_p(ptr);
3388 default: abort();
3389 }
Andreas Färber56384e82011-11-30 16:26:21 +01003390}
3391
Avi Kivityde712f92012-01-02 12:41:07 +02003392static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3393 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003394{
3395 ram_addr_t raddr = addr;
3396 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003397 switch (size) {
3398 case 1: return stb_p(ptr, value);
3399 case 2: return stw_p(ptr, value);
3400 case 4: return stl_p(ptr, value);
3401 default: abort();
3402 }
Andreas Färber56384e82011-11-30 16:26:21 +01003403}
3404
Avi Kivityde712f92012-01-02 12:41:07 +02003405static const MemoryRegionOps subpage_ram_ops = {
3406 .read = subpage_ram_read,
3407 .write = subpage_ram_write,
3408 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003409};
3410
Anthony Liguoric227f092009-10-01 16:12:16 -05003411static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003412 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003413{
3414 int idx, eidx;
3415
3416 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3417 return -1;
3418 idx = SUBPAGE_IDX(start);
3419 eidx = SUBPAGE_IDX(end);
3420#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003421 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003422 mmio, start, end, idx, eidx, memory);
3423#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003424 if (memory_region_is_ram(phys_sections[section].mr)) {
3425 MemoryRegionSection new_section = phys_sections[section];
3426 new_section.mr = &io_mem_subpage_ram;
3427 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003428 }
blueswir1db7b5422007-05-26 17:36:03 +00003429 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003430 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003431 }
3432
3433 return 0;
3434}
3435
Avi Kivity5312bd82012-02-12 18:32:55 +02003436static subpage_t *subpage_init (target_phys_addr_t base, uint16_t *section_ind,
3437 uint16_t orig_section)
blueswir1db7b5422007-05-26 17:36:03 +00003438{
Anthony Liguoric227f092009-10-01 16:12:16 -05003439 subpage_t *mmio;
Avi Kivity5312bd82012-02-12 18:32:55 +02003440 MemoryRegionSection section = {
3441 .offset_within_address_space = base,
3442 .size = TARGET_PAGE_SIZE,
3443 };
blueswir1db7b5422007-05-26 17:36:03 +00003444
Anthony Liguori7267c092011-08-20 22:09:37 -05003445 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003446
3447 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003448 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3449 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003450 mmio->iomem.subpage = true;
Avi Kivity5312bd82012-02-12 18:32:55 +02003451 section.mr = &mmio->iomem;
blueswir1db7b5422007-05-26 17:36:03 +00003452#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003453 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3454 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003455#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003456 *section_ind = phys_section_add(&section);
3457 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_section);
blueswir1db7b5422007-05-26 17:36:03 +00003458
3459 return mmio;
3460}
3461
aliguori88715652009-02-11 15:20:58 +00003462static int get_free_io_mem_idx(void)
3463{
3464 int i;
3465
3466 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3467 if (!io_mem_used[i]) {
3468 io_mem_used[i] = 1;
3469 return i;
3470 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003471 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003472 return -1;
3473}
3474
bellard33417e72003-08-10 21:47:01 +00003475/* mem_read and mem_write are arrays of functions containing the
3476 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003477 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003478 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003479 modified. If it is zero, a new io zone is allocated. The return
3480 value can be used with cpu_register_physical_memory(). (-1) is
3481 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003482static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003483{
bellard33417e72003-08-10 21:47:01 +00003484 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003485 io_index = get_free_io_mem_idx();
3486 if (io_index == -1)
3487 return io_index;
bellard33417e72003-08-10 21:47:01 +00003488 } else {
3489 if (io_index >= IO_MEM_NB_ENTRIES)
3490 return -1;
3491 }
bellardb5ff1b32005-11-26 10:38:39 +00003492
Avi Kivitya621f382012-01-02 13:12:08 +02003493 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003494
Avi Kivity11c7ef02012-01-02 17:21:07 +02003495 return io_index;
bellard33417e72003-08-10 21:47:01 +00003496}
bellard61382a52003-10-27 21:22:23 +00003497
Avi Kivitya621f382012-01-02 13:12:08 +02003498int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003499{
Avi Kivitya621f382012-01-02 13:12:08 +02003500 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003501}
3502
Avi Kivity11c7ef02012-01-02 17:21:07 +02003503void cpu_unregister_io_memory(int io_index)
aliguori88715652009-02-11 15:20:58 +00003504{
Avi Kivitya621f382012-01-02 13:12:08 +02003505 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003506 io_mem_used[io_index] = 0;
3507}
3508
Avi Kivity5312bd82012-02-12 18:32:55 +02003509static uint16_t dummy_section(MemoryRegion *mr)
3510{
3511 MemoryRegionSection section = {
3512 .mr = mr,
3513 .offset_within_address_space = 0,
3514 .offset_within_region = 0,
3515 .size = UINT64_MAX,
3516 };
3517
3518 return phys_section_add(&section);
3519}
3520
Avi Kivitye9179ce2009-06-14 11:38:52 +03003521static void io_mem_init(void)
3522{
3523 int i;
3524
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003525 /* Must be first: */
3526 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3527 assert(io_mem_ram.ram_addr == 0);
3528 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3529 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3530 "unassigned", UINT64_MAX);
3531 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3532 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003533 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3534 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003535 for (i=0; i<5; i++)
3536 io_mem_used[i] = 1;
3537
Avi Kivity1ec9b902012-01-02 12:47:48 +02003538 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3539 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003540}
3541
Avi Kivity50c1e142012-02-08 21:36:02 +02003542static void core_begin(MemoryListener *listener)
3543{
Avi Kivity54688b12012-02-09 17:34:32 +02003544 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003545 phys_sections_clear();
3546 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivity50c1e142012-02-08 21:36:02 +02003547}
3548
3549static void core_commit(MemoryListener *listener)
3550{
3551}
3552
Avi Kivity93632742012-02-08 16:54:16 +02003553static void core_region_add(MemoryListener *listener,
3554 MemoryRegionSection *section)
3555{
Avi Kivity4855d412012-02-08 21:16:05 +02003556 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003557}
3558
3559static void core_region_del(MemoryListener *listener,
3560 MemoryRegionSection *section)
3561{
Avi Kivity93632742012-02-08 16:54:16 +02003562}
3563
Avi Kivity50c1e142012-02-08 21:36:02 +02003564static void core_region_nop(MemoryListener *listener,
3565 MemoryRegionSection *section)
3566{
Avi Kivity54688b12012-02-09 17:34:32 +02003567 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003568}
3569
Avi Kivity93632742012-02-08 16:54:16 +02003570static void core_log_start(MemoryListener *listener,
3571 MemoryRegionSection *section)
3572{
3573}
3574
3575static void core_log_stop(MemoryListener *listener,
3576 MemoryRegionSection *section)
3577{
3578}
3579
3580static void core_log_sync(MemoryListener *listener,
3581 MemoryRegionSection *section)
3582{
3583}
3584
3585static void core_log_global_start(MemoryListener *listener)
3586{
3587 cpu_physical_memory_set_dirty_tracking(1);
3588}
3589
3590static void core_log_global_stop(MemoryListener *listener)
3591{
3592 cpu_physical_memory_set_dirty_tracking(0);
3593}
3594
3595static void core_eventfd_add(MemoryListener *listener,
3596 MemoryRegionSection *section,
3597 bool match_data, uint64_t data, int fd)
3598{
3599}
3600
3601static void core_eventfd_del(MemoryListener *listener,
3602 MemoryRegionSection *section,
3603 bool match_data, uint64_t data, int fd)
3604{
3605}
3606
Avi Kivity50c1e142012-02-08 21:36:02 +02003607static void io_begin(MemoryListener *listener)
3608{
3609}
3610
3611static void io_commit(MemoryListener *listener)
3612{
3613}
3614
Avi Kivity4855d412012-02-08 21:16:05 +02003615static void io_region_add(MemoryListener *listener,
3616 MemoryRegionSection *section)
3617{
3618 iorange_init(&section->mr->iorange, &memory_region_iorange_ops,
3619 section->offset_within_address_space, section->size);
3620 ioport_register(&section->mr->iorange);
3621}
3622
3623static void io_region_del(MemoryListener *listener,
3624 MemoryRegionSection *section)
3625{
3626 isa_unassign_ioport(section->offset_within_address_space, section->size);
3627}
3628
Avi Kivity50c1e142012-02-08 21:36:02 +02003629static void io_region_nop(MemoryListener *listener,
3630 MemoryRegionSection *section)
3631{
3632}
3633
Avi Kivity4855d412012-02-08 21:16:05 +02003634static void io_log_start(MemoryListener *listener,
3635 MemoryRegionSection *section)
3636{
3637}
3638
3639static void io_log_stop(MemoryListener *listener,
3640 MemoryRegionSection *section)
3641{
3642}
3643
3644static void io_log_sync(MemoryListener *listener,
3645 MemoryRegionSection *section)
3646{
3647}
3648
3649static void io_log_global_start(MemoryListener *listener)
3650{
3651}
3652
3653static void io_log_global_stop(MemoryListener *listener)
3654{
3655}
3656
3657static void io_eventfd_add(MemoryListener *listener,
3658 MemoryRegionSection *section,
3659 bool match_data, uint64_t data, int fd)
3660{
3661}
3662
3663static void io_eventfd_del(MemoryListener *listener,
3664 MemoryRegionSection *section,
3665 bool match_data, uint64_t data, int fd)
3666{
3667}
3668
Avi Kivity93632742012-02-08 16:54:16 +02003669static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003670 .begin = core_begin,
3671 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003672 .region_add = core_region_add,
3673 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003674 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003675 .log_start = core_log_start,
3676 .log_stop = core_log_stop,
3677 .log_sync = core_log_sync,
3678 .log_global_start = core_log_global_start,
3679 .log_global_stop = core_log_global_stop,
3680 .eventfd_add = core_eventfd_add,
3681 .eventfd_del = core_eventfd_del,
3682 .priority = 0,
3683};
3684
Avi Kivity4855d412012-02-08 21:16:05 +02003685static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003686 .begin = io_begin,
3687 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003688 .region_add = io_region_add,
3689 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003690 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003691 .log_start = io_log_start,
3692 .log_stop = io_log_stop,
3693 .log_sync = io_log_sync,
3694 .log_global_start = io_log_global_start,
3695 .log_global_stop = io_log_global_stop,
3696 .eventfd_add = io_eventfd_add,
3697 .eventfd_del = io_eventfd_del,
3698 .priority = 0,
3699};
3700
Avi Kivity62152b82011-07-26 14:26:14 +03003701static void memory_map_init(void)
3702{
Anthony Liguori7267c092011-08-20 22:09:37 -05003703 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003704 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003705 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003706
Anthony Liguori7267c092011-08-20 22:09:37 -05003707 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003708 memory_region_init(system_io, "io", 65536);
3709 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003710
Avi Kivity4855d412012-02-08 21:16:05 +02003711 memory_listener_register(&core_memory_listener, system_memory);
3712 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003713}
3714
3715MemoryRegion *get_system_memory(void)
3716{
3717 return system_memory;
3718}
3719
Avi Kivity309cb472011-08-08 16:09:03 +03003720MemoryRegion *get_system_io(void)
3721{
3722 return system_io;
3723}
3724
pbrooke2eef172008-06-08 01:09:01 +00003725#endif /* !defined(CONFIG_USER_ONLY) */
3726
bellard13eb76e2004-01-24 15:23:36 +00003727/* physical memory access (slow version, mainly for debug) */
3728#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003729int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3730 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003731{
3732 int l, flags;
3733 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003734 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003735
3736 while (len > 0) {
3737 page = addr & TARGET_PAGE_MASK;
3738 l = (page + TARGET_PAGE_SIZE) - addr;
3739 if (l > len)
3740 l = len;
3741 flags = page_get_flags(page);
3742 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003743 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003744 if (is_write) {
3745 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003746 return -1;
bellard579a97f2007-11-11 14:26:47 +00003747 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003748 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003749 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003750 memcpy(p, buf, l);
3751 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003752 } else {
3753 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003754 return -1;
bellard579a97f2007-11-11 14:26:47 +00003755 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003756 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003757 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003758 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003759 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003760 }
3761 len -= l;
3762 buf += l;
3763 addr += l;
3764 }
Paul Brooka68fe892010-03-01 00:08:59 +00003765 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003766}
bellard8df1cd02005-01-28 22:37:22 +00003767
bellard13eb76e2004-01-24 15:23:36 +00003768#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003769void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003770 int len, int is_write)
3771{
3772 int l, io_index;
3773 uint8_t *ptr;
3774 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003775 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003776 ram_addr_t pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003777 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003778
bellard13eb76e2004-01-24 15:23:36 +00003779 while (len > 0) {
3780 page = addr & TARGET_PAGE_MASK;
3781 l = (page + TARGET_PAGE_SIZE) - addr;
3782 if (l > len)
3783 l = len;
bellard92e873b2004-05-21 14:52:29 +00003784 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003785 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003786
bellard13eb76e2004-01-24 15:23:36 +00003787 if (is_write) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003788 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003789 target_phys_addr_t addr1;
Avi Kivity11c7ef02012-01-02 17:21:07 +02003790 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003791 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bellard6a00d602005-11-21 23:25:50 +00003792 /* XXX: could force cpu_single_env to NULL to avoid
3793 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003794 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003795 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003796 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003797 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003798 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003799 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003800 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003801 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003802 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003803 l = 2;
3804 } else {
bellard1c213d12005-09-03 10:49:04 +00003805 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003806 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003807 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003808 l = 1;
3809 }
3810 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003811 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003812 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003813 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003814 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003815 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003816 if (!cpu_physical_memory_is_dirty(addr1)) {
3817 /* invalidate code */
3818 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3819 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003820 cpu_physical_memory_set_dirty_flags(
3821 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003822 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003823 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003824 }
3825 } else {
Avi Kivity1d393fa2012-01-01 21:15:42 +02003826 if (!is_ram_rom_romd(pd)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003827 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003828 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02003829 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003830 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
aurel326c2934d2009-02-18 21:37:17 +00003831 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003832 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003833 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003834 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003835 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003836 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003837 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003838 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003839 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003840 l = 2;
3841 } else {
bellard1c213d12005-09-03 10:49:04 +00003842 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003843 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003844 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003845 l = 1;
3846 }
3847 } else {
3848 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003849 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3850 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3851 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003852 }
3853 }
3854 len -= l;
3855 buf += l;
3856 addr += l;
3857 }
3858}
bellard8df1cd02005-01-28 22:37:22 +00003859
bellardd0ecd2a2006-04-23 17:14:48 +00003860/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003861void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003862 const uint8_t *buf, int len)
3863{
3864 int l;
3865 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003866 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003867 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003868 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003869
bellardd0ecd2a2006-04-23 17:14:48 +00003870 while (len > 0) {
3871 page = addr & TARGET_PAGE_MASK;
3872 l = (page + TARGET_PAGE_SIZE) - addr;
3873 if (l > len)
3874 l = len;
3875 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003876 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003877
Avi Kivity1d393fa2012-01-01 21:15:42 +02003878 if (!is_ram_rom_romd(pd)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003879 /* do nothing */
3880 } else {
3881 unsigned long addr1;
3882 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3883 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003884 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003885 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003886 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003887 }
3888 len -= l;
3889 buf += l;
3890 addr += l;
3891 }
3892}
3893
aliguori6d16c2f2009-01-22 16:59:11 +00003894typedef struct {
3895 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003896 target_phys_addr_t addr;
3897 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003898} BounceBuffer;
3899
3900static BounceBuffer bounce;
3901
aliguoriba223c22009-01-22 16:59:16 +00003902typedef struct MapClient {
3903 void *opaque;
3904 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003905 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003906} MapClient;
3907
Blue Swirl72cf2d42009-09-12 07:36:22 +00003908static QLIST_HEAD(map_client_list, MapClient) map_client_list
3909 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003910
3911void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3912{
Anthony Liguori7267c092011-08-20 22:09:37 -05003913 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003914
3915 client->opaque = opaque;
3916 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003917 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003918 return client;
3919}
3920
3921void cpu_unregister_map_client(void *_client)
3922{
3923 MapClient *client = (MapClient *)_client;
3924
Blue Swirl72cf2d42009-09-12 07:36:22 +00003925 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003926 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003927}
3928
3929static void cpu_notify_map_clients(void)
3930{
3931 MapClient *client;
3932
Blue Swirl72cf2d42009-09-12 07:36:22 +00003933 while (!QLIST_EMPTY(&map_client_list)) {
3934 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003935 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003936 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003937 }
3938}
3939
aliguori6d16c2f2009-01-22 16:59:11 +00003940/* Map a physical memory region into a host virtual address.
3941 * May map a subset of the requested range, given by and returned in *plen.
3942 * May return NULL if resources needed to perform the mapping are exhausted.
3943 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003944 * Use cpu_register_map_client() to know when retrying the map operation is
3945 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003946 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003947void *cpu_physical_memory_map(target_phys_addr_t addr,
3948 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003949 int is_write)
3950{
Anthony Liguoric227f092009-10-01 16:12:16 -05003951 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003952 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003953 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003954 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003955 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003956 PhysPageDesc p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003957 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003958 ram_addr_t rlen;
3959 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003960
3961 while (len > 0) {
3962 page = addr & TARGET_PAGE_MASK;
3963 l = (page + TARGET_PAGE_SIZE) - addr;
3964 if (l > len)
3965 l = len;
3966 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003967 pd = p.phys_offset;
aliguori6d16c2f2009-01-22 16:59:11 +00003968
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003969 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003970 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003971 break;
3972 }
3973 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3974 bounce.addr = addr;
3975 bounce.len = l;
3976 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003977 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003978 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003979
3980 *plen = l;
3981 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003982 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003983 if (!todo) {
3984 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3985 }
aliguori6d16c2f2009-01-22 16:59:11 +00003986
3987 len -= l;
3988 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003989 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003990 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003991 rlen = todo;
3992 ret = qemu_ram_ptr_length(raddr, &rlen);
3993 *plen = rlen;
3994 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003995}
3996
3997/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3998 * Will also mark the memory as dirty if is_write == 1. access_len gives
3999 * the amount of memory that was actually read or written by the caller.
4000 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004001void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4002 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004003{
4004 if (buffer != bounce.buffer) {
4005 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004006 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004007 while (access_len) {
4008 unsigned l;
4009 l = TARGET_PAGE_SIZE;
4010 if (l > access_len)
4011 l = access_len;
4012 if (!cpu_physical_memory_is_dirty(addr1)) {
4013 /* invalidate code */
4014 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4015 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004016 cpu_physical_memory_set_dirty_flags(
4017 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004018 }
4019 addr1 += l;
4020 access_len -= l;
4021 }
4022 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004023 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004024 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004025 }
aliguori6d16c2f2009-01-22 16:59:11 +00004026 return;
4027 }
4028 if (is_write) {
4029 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4030 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004031 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004032 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004033 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004034}
bellardd0ecd2a2006-04-23 17:14:48 +00004035
bellard8df1cd02005-01-28 22:37:22 +00004036/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004037static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4038 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004039{
4040 int io_index;
4041 uint8_t *ptr;
4042 uint32_t val;
4043 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004044 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004045
4046 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004047 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004048
Avi Kivity1d393fa2012-01-01 21:15:42 +02004049 if (!is_ram_rom_romd(pd)) {
bellard8df1cd02005-01-28 22:37:22 +00004050 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004051 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004052 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004053 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004054#if defined(TARGET_WORDS_BIGENDIAN)
4055 if (endian == DEVICE_LITTLE_ENDIAN) {
4056 val = bswap32(val);
4057 }
4058#else
4059 if (endian == DEVICE_BIG_ENDIAN) {
4060 val = bswap32(val);
4061 }
4062#endif
bellard8df1cd02005-01-28 22:37:22 +00004063 } else {
4064 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004065 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004066 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004067 switch (endian) {
4068 case DEVICE_LITTLE_ENDIAN:
4069 val = ldl_le_p(ptr);
4070 break;
4071 case DEVICE_BIG_ENDIAN:
4072 val = ldl_be_p(ptr);
4073 break;
4074 default:
4075 val = ldl_p(ptr);
4076 break;
4077 }
bellard8df1cd02005-01-28 22:37:22 +00004078 }
4079 return val;
4080}
4081
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004082uint32_t ldl_phys(target_phys_addr_t addr)
4083{
4084 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4085}
4086
4087uint32_t ldl_le_phys(target_phys_addr_t addr)
4088{
4089 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4090}
4091
4092uint32_t ldl_be_phys(target_phys_addr_t addr)
4093{
4094 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4095}
4096
bellard84b7b8e2005-11-28 21:19:04 +00004097/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004098static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4099 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004100{
4101 int io_index;
4102 uint8_t *ptr;
4103 uint64_t val;
4104 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004105 PhysPageDesc p;
bellard84b7b8e2005-11-28 21:19:04 +00004106
4107 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004108 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004109
Avi Kivity1d393fa2012-01-01 21:15:42 +02004110 if (!is_ram_rom_romd(pd)) {
bellard84b7b8e2005-11-28 21:19:04 +00004111 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004112 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004113 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004114
4115 /* XXX This is broken when device endian != cpu endian.
4116 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004117#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004118 val = io_mem_read(io_index, addr, 4) << 32;
4119 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004120#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004121 val = io_mem_read(io_index, addr, 4);
4122 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004123#endif
4124 } else {
4125 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004126 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004127 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004128 switch (endian) {
4129 case DEVICE_LITTLE_ENDIAN:
4130 val = ldq_le_p(ptr);
4131 break;
4132 case DEVICE_BIG_ENDIAN:
4133 val = ldq_be_p(ptr);
4134 break;
4135 default:
4136 val = ldq_p(ptr);
4137 break;
4138 }
bellard84b7b8e2005-11-28 21:19:04 +00004139 }
4140 return val;
4141}
4142
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004143uint64_t ldq_phys(target_phys_addr_t addr)
4144{
4145 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4146}
4147
4148uint64_t ldq_le_phys(target_phys_addr_t addr)
4149{
4150 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4151}
4152
4153uint64_t ldq_be_phys(target_phys_addr_t addr)
4154{
4155 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4156}
4157
bellardaab33092005-10-30 20:48:42 +00004158/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004159uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004160{
4161 uint8_t val;
4162 cpu_physical_memory_read(addr, &val, 1);
4163 return val;
4164}
4165
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004166/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004167static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4168 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004169{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004170 int io_index;
4171 uint8_t *ptr;
4172 uint64_t val;
4173 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004174 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004175
4176 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004177 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004178
Avi Kivity1d393fa2012-01-01 21:15:42 +02004179 if (!is_ram_rom_romd(pd)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004180 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02004181 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004182 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004183 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004184#if defined(TARGET_WORDS_BIGENDIAN)
4185 if (endian == DEVICE_LITTLE_ENDIAN) {
4186 val = bswap16(val);
4187 }
4188#else
4189 if (endian == DEVICE_BIG_ENDIAN) {
4190 val = bswap16(val);
4191 }
4192#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004193 } else {
4194 /* RAM case */
4195 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4196 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004197 switch (endian) {
4198 case DEVICE_LITTLE_ENDIAN:
4199 val = lduw_le_p(ptr);
4200 break;
4201 case DEVICE_BIG_ENDIAN:
4202 val = lduw_be_p(ptr);
4203 break;
4204 default:
4205 val = lduw_p(ptr);
4206 break;
4207 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004208 }
4209 return val;
bellardaab33092005-10-30 20:48:42 +00004210}
4211
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004212uint32_t lduw_phys(target_phys_addr_t addr)
4213{
4214 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4215}
4216
4217uint32_t lduw_le_phys(target_phys_addr_t addr)
4218{
4219 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4220}
4221
4222uint32_t lduw_be_phys(target_phys_addr_t addr)
4223{
4224 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4225}
4226
bellard8df1cd02005-01-28 22:37:22 +00004227/* warning: addr must be aligned. The ram page is not masked as dirty
4228 and the code inside is not invalidated. It is useful if the dirty
4229 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004230void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004231{
4232 int io_index;
4233 uint8_t *ptr;
4234 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004235 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004236
4237 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004238 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004239
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004240 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004241 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004242 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004243 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004244 } else {
aliguori74576192008-10-06 14:02:03 +00004245 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004246 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004247 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004248
4249 if (unlikely(in_migration)) {
4250 if (!cpu_physical_memory_is_dirty(addr1)) {
4251 /* invalidate code */
4252 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4253 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004254 cpu_physical_memory_set_dirty_flags(
4255 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004256 }
4257 }
bellard8df1cd02005-01-28 22:37:22 +00004258 }
4259}
4260
Anthony Liguoric227f092009-10-01 16:12:16 -05004261void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004262{
4263 int io_index;
4264 uint8_t *ptr;
4265 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004266 PhysPageDesc p;
j_mayerbc98a7e2007-04-04 07:55:12 +00004267
4268 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004269 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004270
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004271 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004272 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004273 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004274#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004275 io_mem_write(io_index, addr, val >> 32, 4);
4276 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004277#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004278 io_mem_write(io_index, addr, (uint32_t)val, 4);
4279 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004280#endif
4281 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004282 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004283 (addr & ~TARGET_PAGE_MASK);
4284 stq_p(ptr, val);
4285 }
4286}
4287
bellard8df1cd02005-01-28 22:37:22 +00004288/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004289static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4290 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004291{
4292 int io_index;
4293 uint8_t *ptr;
4294 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004295 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004296
4297 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004298 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004299
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004300 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004301 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004302 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004303#if defined(TARGET_WORDS_BIGENDIAN)
4304 if (endian == DEVICE_LITTLE_ENDIAN) {
4305 val = bswap32(val);
4306 }
4307#else
4308 if (endian == DEVICE_BIG_ENDIAN) {
4309 val = bswap32(val);
4310 }
4311#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004312 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004313 } else {
4314 unsigned long addr1;
4315 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4316 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004317 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004318 switch (endian) {
4319 case DEVICE_LITTLE_ENDIAN:
4320 stl_le_p(ptr, val);
4321 break;
4322 case DEVICE_BIG_ENDIAN:
4323 stl_be_p(ptr, val);
4324 break;
4325 default:
4326 stl_p(ptr, val);
4327 break;
4328 }
bellard3a7d9292005-08-21 09:26:42 +00004329 if (!cpu_physical_memory_is_dirty(addr1)) {
4330 /* invalidate code */
4331 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4332 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004333 cpu_physical_memory_set_dirty_flags(addr1,
4334 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004335 }
bellard8df1cd02005-01-28 22:37:22 +00004336 }
4337}
4338
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004339void stl_phys(target_phys_addr_t addr, uint32_t val)
4340{
4341 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4342}
4343
4344void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4345{
4346 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4347}
4348
4349void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4350{
4351 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4352}
4353
bellardaab33092005-10-30 20:48:42 +00004354/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004355void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004356{
4357 uint8_t v = val;
4358 cpu_physical_memory_write(addr, &v, 1);
4359}
4360
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004361/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004362static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4363 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004364{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004365 int io_index;
4366 uint8_t *ptr;
4367 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004368 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004369
4370 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004371 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004372
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004373 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004374 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004375 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004376#if defined(TARGET_WORDS_BIGENDIAN)
4377 if (endian == DEVICE_LITTLE_ENDIAN) {
4378 val = bswap16(val);
4379 }
4380#else
4381 if (endian == DEVICE_BIG_ENDIAN) {
4382 val = bswap16(val);
4383 }
4384#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004385 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004386 } else {
4387 unsigned long addr1;
4388 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4389 /* RAM case */
4390 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004391 switch (endian) {
4392 case DEVICE_LITTLE_ENDIAN:
4393 stw_le_p(ptr, val);
4394 break;
4395 case DEVICE_BIG_ENDIAN:
4396 stw_be_p(ptr, val);
4397 break;
4398 default:
4399 stw_p(ptr, val);
4400 break;
4401 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004402 if (!cpu_physical_memory_is_dirty(addr1)) {
4403 /* invalidate code */
4404 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4405 /* set dirty bit */
4406 cpu_physical_memory_set_dirty_flags(addr1,
4407 (0xff & ~CODE_DIRTY_FLAG));
4408 }
4409 }
bellardaab33092005-10-30 20:48:42 +00004410}
4411
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004412void stw_phys(target_phys_addr_t addr, uint32_t val)
4413{
4414 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4415}
4416
4417void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4418{
4419 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4420}
4421
4422void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4423{
4424 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4425}
4426
bellardaab33092005-10-30 20:48:42 +00004427/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004428void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004429{
4430 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004431 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004432}
4433
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004434void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4435{
4436 val = cpu_to_le64(val);
4437 cpu_physical_memory_write(addr, &val, 8);
4438}
4439
4440void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4441{
4442 val = cpu_to_be64(val);
4443 cpu_physical_memory_write(addr, &val, 8);
4444}
4445
aliguori5e2972f2009-03-28 17:51:36 +00004446/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004447int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004448 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004449{
4450 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004451 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004452 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004453
4454 while (len > 0) {
4455 page = addr & TARGET_PAGE_MASK;
4456 phys_addr = cpu_get_phys_page_debug(env, page);
4457 /* if no physical page mapped, return an error */
4458 if (phys_addr == -1)
4459 return -1;
4460 l = (page + TARGET_PAGE_SIZE) - addr;
4461 if (l > len)
4462 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004463 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004464 if (is_write)
4465 cpu_physical_memory_write_rom(phys_addr, buf, l);
4466 else
aliguori5e2972f2009-03-28 17:51:36 +00004467 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004468 len -= l;
4469 buf += l;
4470 addr += l;
4471 }
4472 return 0;
4473}
Paul Brooka68fe892010-03-01 00:08:59 +00004474#endif
bellard13eb76e2004-01-24 15:23:36 +00004475
pbrook2e70f6e2008-06-29 01:03:05 +00004476/* in deterministic execution mode, instructions doing device I/Os
4477 must be at the end of the TB */
4478void cpu_io_recompile(CPUState *env, void *retaddr)
4479{
4480 TranslationBlock *tb;
4481 uint32_t n, cflags;
4482 target_ulong pc, cs_base;
4483 uint64_t flags;
4484
4485 tb = tb_find_pc((unsigned long)retaddr);
4486 if (!tb) {
4487 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4488 retaddr);
4489 }
4490 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004491 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004492 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004493 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004494 n = n - env->icount_decr.u16.low;
4495 /* Generate a new TB ending on the I/O insn. */
4496 n++;
4497 /* On MIPS and SH, delay slot instructions can only be restarted if
4498 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004499 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004500 branch. */
4501#if defined(TARGET_MIPS)
4502 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4503 env->active_tc.PC -= 4;
4504 env->icount_decr.u16.low++;
4505 env->hflags &= ~MIPS_HFLAG_BMASK;
4506 }
4507#elif defined(TARGET_SH4)
4508 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4509 && n > 1) {
4510 env->pc -= 2;
4511 env->icount_decr.u16.low++;
4512 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4513 }
4514#endif
4515 /* This should never happen. */
4516 if (n > CF_COUNT_MASK)
4517 cpu_abort(env, "TB too big during recompile");
4518
4519 cflags = n | CF_LAST_IO;
4520 pc = tb->pc;
4521 cs_base = tb->cs_base;
4522 flags = tb->flags;
4523 tb_phys_invalidate(tb, -1);
4524 /* FIXME: In theory this could raise an exception. In practice
4525 we have already translated the block once so it's probably ok. */
4526 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004527 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004528 the first in the TB) then we end up generating a whole new TB and
4529 repeating the fault, which is horribly inefficient.
4530 Better would be to execute just this insn uncached, or generate a
4531 second new TB. */
4532 cpu_resume_from_signal(env, NULL);
4533}
4534
Paul Brookb3755a92010-03-12 16:54:58 +00004535#if !defined(CONFIG_USER_ONLY)
4536
Stefan Weil055403b2010-10-22 23:03:32 +02004537void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004538{
4539 int i, target_code_size, max_target_code_size;
4540 int direct_jmp_count, direct_jmp2_count, cross_page;
4541 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004542
bellarde3db7222005-01-26 22:00:47 +00004543 target_code_size = 0;
4544 max_target_code_size = 0;
4545 cross_page = 0;
4546 direct_jmp_count = 0;
4547 direct_jmp2_count = 0;
4548 for(i = 0; i < nb_tbs; i++) {
4549 tb = &tbs[i];
4550 target_code_size += tb->size;
4551 if (tb->size > max_target_code_size)
4552 max_target_code_size = tb->size;
4553 if (tb->page_addr[1] != -1)
4554 cross_page++;
4555 if (tb->tb_next_offset[0] != 0xffff) {
4556 direct_jmp_count++;
4557 if (tb->tb_next_offset[1] != 0xffff) {
4558 direct_jmp2_count++;
4559 }
4560 }
4561 }
4562 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004563 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004564 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004565 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4566 cpu_fprintf(f, "TB count %d/%d\n",
4567 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004568 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004569 nb_tbs ? target_code_size / nb_tbs : 0,
4570 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004571 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004572 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4573 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004574 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4575 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004576 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4577 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004578 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004579 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4580 direct_jmp2_count,
4581 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004582 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004583 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4584 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4585 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004586 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004587}
4588
Avi Kivityd39e8222012-01-01 23:35:10 +02004589/* NOTE: this function can trigger an exception */
4590/* NOTE2: the returned address is not exactly the physical address: it
4591 is the offset relative to phys_ram_base */
4592tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4593{
4594 int mmu_idx, page_index, pd;
4595 void *p;
4596
4597 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4598 mmu_idx = cpu_mmu_index(env1);
4599 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4600 (addr & TARGET_PAGE_MASK))) {
4601 ldub_code(addr);
4602 }
4603 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004604 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
Avi Kivity75c578d2012-01-02 15:40:52 +02004605 && !is_romd(pd)) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004606#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4607 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4608#else
4609 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4610#endif
4611 }
4612 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4613 return qemu_ram_addr_from_host_nofail(p);
4614}
4615
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004616/*
4617 * A helper function for the _utterly broken_ virtio device model to find out if
4618 * it's running on a big endian machine. Don't do this at home kids!
4619 */
4620bool virtio_is_big_endian(void);
4621bool virtio_is_big_endian(void)
4622{
4623#if defined(TARGET_WORDS_BIGENDIAN)
4624 return true;
4625#else
4626 return false;
4627#endif
4628}
4629
bellard61382a52003-10-27 21:22:23 +00004630#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004631#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004632#define GETPC() NULL
4633#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004634#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004635
4636#define SHIFT 0
4637#include "softmmu_template.h"
4638
4639#define SHIFT 1
4640#include "softmmu_template.h"
4641
4642#define SHIFT 2
4643#include "softmmu_template.h"
4644
4645#define SHIFT 3
4646#include "softmmu_template.h"
4647
4648#undef env
4649
4650#endif