blob: c7697acace79c1700abf4bbbe939e140d503b326 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
bellard6180a182003-09-30 21:04:53 +000034#include "cpu.h"
35#include "exec-all.h"
aurel32ca10f862008-04-11 21:35:42 +000036#include "qemu-common.h"
bellardb67d9a52008-05-23 09:57:34 +000037#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000038#include "hw/hw.h"
aliguori74576192008-10-06 14:02:03 +000039#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000040#include "kvm.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Riku Voipiofd052bf2010-01-25 14:30:49 +020043#include <signal.h>
pbrook53a59602006-03-25 19:31:22 +000044#endif
bellard54936002003-05-13 00:25:15 +000045
bellardfd6ce8f2003-05-14 19:00:11 +000046//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000047//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000048//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000049//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000050
51/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000052//#define DEBUG_TB_CHECK
53//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000054
ths1196be32007-03-17 15:17:58 +000055//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000056//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000057
pbrook99773bd2006-04-16 15:14:59 +000058#if !defined(CONFIG_USER_ONLY)
59/* TB consistency checks only implemented for usermode emulation. */
60#undef DEBUG_TB_CHECK
61#endif
62
bellard9fa3e852004-01-04 18:06:42 +000063#define SMC_BITMAP_USE_THRESHOLD 10
64
blueswir1bdaf78e2008-10-04 07:24:27 +000065static TranslationBlock *tbs;
bellard26a5f132008-05-28 12:30:31 +000066int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000067TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000068static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000069/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050070spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000071
blueswir1141ac462008-07-26 15:05:57 +000072#if defined(__arm__) || defined(__sparc_v9__)
73/* The prologue must be reachable with a direct jump. ARM and Sparc64
74 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000075 section close to code segment. */
76#define code_gen_section \
77 __attribute__((__section__(".gen_code"))) \
78 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020079#elif defined(_WIN32)
80/* Maximum alignment for Win32 is 16. */
81#define code_gen_section \
82 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000083#else
84#define code_gen_section \
85 __attribute__((aligned (32)))
86#endif
87
88uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +000089static uint8_t *code_gen_buffer;
90static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +000091/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +000092static unsigned long code_gen_buffer_max_size;
bellardfd6ce8f2003-05-14 19:00:11 +000093uint8_t *code_gen_ptr;
94
pbrooke2eef172008-06-08 01:09:01 +000095#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000096int phys_ram_fd;
bellard1ccde1c2004-02-06 19:46:14 +000097uint8_t *phys_ram_dirty;
aliguori74576192008-10-06 14:02:03 +000098static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000099
100typedef struct RAMBlock {
101 uint8_t *host;
Anthony Liguoric227f092009-10-01 16:12:16 -0500102 ram_addr_t offset;
103 ram_addr_t length;
pbrook94a6b542009-04-11 17:15:54 +0000104 struct RAMBlock *next;
105} RAMBlock;
106
107static RAMBlock *ram_blocks;
108/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100109 then we can no longer assume contiguous ram offsets, and external uses
pbrook94a6b542009-04-11 17:15:54 +0000110 of this variable will break. */
Anthony Liguoric227f092009-10-01 16:12:16 -0500111ram_addr_t last_ram_offset;
pbrooke2eef172008-06-08 01:09:01 +0000112#endif
bellard9fa3e852004-01-04 18:06:42 +0000113
bellard6a00d602005-11-21 23:25:50 +0000114CPUState *first_cpu;
115/* current CPU in the current thread. It is only valid inside
116 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000117CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000118/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000119 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000120 2 = Adaptive rate instruction counting. */
121int use_icount = 0;
122/* Current instruction counter. While executing translated code this may
123 include some instructions that have not yet been executed. */
124int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000125
bellard54936002003-05-13 00:25:15 +0000126typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000127 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000128 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000129 /* in order to optimize self modifying code, we count the number
130 of lookups we do to a given page to use a bitmap */
131 unsigned int code_write_count;
132 uint8_t *code_bitmap;
133#if defined(CONFIG_USER_ONLY)
134 unsigned long flags;
135#endif
bellard54936002003-05-13 00:25:15 +0000136} PageDesc;
137
Paul Brook41c1b1c2010-03-12 16:54:58 +0000138/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800139 while in user mode we want it to be based on virtual addresses. */
140#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000141#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
142# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
143#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800144# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000145#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000146#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000148#endif
bellard54936002003-05-13 00:25:15 +0000149
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800150/* Size of the L2 (and L3, etc) page tables. */
151#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000152#define L2_SIZE (1 << L2_BITS)
153
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800154/* The bits remaining after N lower levels of page tables. */
155#define P_L1_BITS_REM \
156 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157#define V_L1_BITS_REM \
158 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
159
160/* Size of the L1 page table. Avoid silly small sizes. */
161#if P_L1_BITS_REM < 4
162#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
163#else
164#define P_L1_BITS P_L1_BITS_REM
165#endif
166
167#if V_L1_BITS_REM < 4
168#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
169#else
170#define V_L1_BITS V_L1_BITS_REM
171#endif
172
173#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
174#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
175
176#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
bellard83fb7ad2004-07-05 21:25:26 +0000179unsigned long qemu_real_host_page_size;
180unsigned long qemu_host_page_bits;
181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000189typedef struct PhysPageDesc {
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset;
192 ram_addr_t region_offset;
193} PhysPageDesc;
194
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800195/* This is a multi-level map on the physical address space.
196 The bottom level has pointers to PhysPageDesc. */
197static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000198
pbrooke2eef172008-06-08 01:09:01 +0000199static void io_mem_init(void);
200
bellard33417e72003-08-10 21:47:01 +0000201/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000202CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
203CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000204void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000205static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000206static int io_mem_watch;
207#endif
bellard33417e72003-08-10 21:47:01 +0000208
bellard34865132003-10-05 14:28:56 +0000209/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200210#ifdef WIN32
211static const char *logfilename = "qemu.log";
212#else
blueswir1d9b630f2008-10-05 09:57:08 +0000213static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200214#endif
bellard34865132003-10-05 14:28:56 +0000215FILE *logfile;
216int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000217static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000218
bellarde3db7222005-01-26 22:00:47 +0000219/* statistics */
220static int tlb_flush_count;
221static int tb_flush_count;
222static int tb_phys_invalidate_count;
223
bellard7cb69ca2008-05-10 10:55:51 +0000224#ifdef _WIN32
225static void map_exec(void *addr, long size)
226{
227 DWORD old_protect;
228 VirtualProtect(addr, size,
229 PAGE_EXECUTE_READWRITE, &old_protect);
230
231}
232#else
233static void map_exec(void *addr, long size)
234{
bellard43694152008-05-29 09:35:57 +0000235 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000236
bellard43694152008-05-29 09:35:57 +0000237 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000238 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000239 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000240
241 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000242 end += page_size - 1;
243 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000244
245 mprotect((void *)start, end - start,
246 PROT_READ | PROT_WRITE | PROT_EXEC);
247}
248#endif
249
bellardb346ff42003-06-15 20:05:50 +0000250static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000251{
bellard83fb7ad2004-07-05 21:25:26 +0000252 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000253 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000254#ifdef _WIN32
255 {
256 SYSTEM_INFO system_info;
257
258 GetSystemInfo(&system_info);
259 qemu_real_host_page_size = system_info.dwPageSize;
260 }
261#else
262 qemu_real_host_page_size = getpagesize();
263#endif
bellard83fb7ad2004-07-05 21:25:26 +0000264 if (qemu_host_page_size == 0)
265 qemu_host_page_size = qemu_real_host_page_size;
266 if (qemu_host_page_size < TARGET_PAGE_SIZE)
267 qemu_host_page_size = TARGET_PAGE_SIZE;
268 qemu_host_page_bits = 0;
269 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
270 qemu_host_page_bits++;
271 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000272
273#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
274 {
balrog50a95692007-12-12 01:16:23 +0000275 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000276
pbrook07765902008-05-31 16:33:53 +0000277 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800278
balrog50a95692007-12-12 01:16:23 +0000279 f = fopen("/proc/self/maps", "r");
280 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800281 mmap_lock();
282
balrog50a95692007-12-12 01:16:23 +0000283 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800284 unsigned long startaddr, endaddr;
285 int n;
286
287 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
288
289 if (n == 2 && h2g_valid(startaddr)) {
290 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
291
292 if (h2g_valid(endaddr)) {
293 endaddr = h2g(endaddr);
294 } else {
295 endaddr = ~0ul;
296 }
297 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000298 }
299 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800300
balrog50a95692007-12-12 01:16:23 +0000301 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800302 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000303 }
304 }
305#endif
bellard54936002003-05-13 00:25:15 +0000306}
307
Paul Brook41c1b1c2010-03-12 16:54:58 +0000308static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000309{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000310 PageDesc *pd;
311 void **lp;
312 int i;
313
pbrook17e23772008-06-09 13:47:45 +0000314#if defined(CONFIG_USER_ONLY)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800315 /* We can't use qemu_malloc because it may recurse into a locked mutex.
316 Neither can we record the new pages we reserve while allocating a
317 given page because that may recurse into an unallocated page table
318 entry. Stuff the allocations we do make into a queue and process
319 them after having completed one entire page table allocation. */
320
321 unsigned long reserve[2 * (V_L1_SHIFT / L2_BITS)];
322 int reserve_idx = 0;
323
324# define ALLOC(P, SIZE) \
325 do { \
326 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
327 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
328 if (h2g_valid(P)) { \
329 reserve[reserve_idx] = h2g(P); \
330 reserve[reserve_idx + 1] = SIZE; \
331 reserve_idx += 2; \
332 } \
333 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000334#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800335# define ALLOC(P, SIZE) \
336 do { P = qemu_mallocz(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000337#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800338
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800339 /* Level 1. Always allocated. */
340 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
341
342 /* Level 2..N-1. */
343 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
344 void **p = *lp;
345
346 if (p == NULL) {
347 if (!alloc) {
348 return NULL;
349 }
350 ALLOC(p, sizeof(void *) * L2_SIZE);
351 *lp = p;
352 }
353
354 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000355 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800356
357 pd = *lp;
358 if (pd == NULL) {
359 if (!alloc) {
360 return NULL;
361 }
362 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
363 *lp = pd;
364 }
365
366#undef ALLOC
367#if defined(CONFIG_USER_ONLY)
368 for (i = 0; i < reserve_idx; i += 2) {
369 unsigned long addr = reserve[i];
370 unsigned long len = reserve[i + 1];
371
372 page_set_flags(addr & TARGET_PAGE_MASK,
373 TARGET_PAGE_ALIGN(addr + len),
374 PAGE_RESERVED);
375 }
376#endif
377
378 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000379}
380
Paul Brook41c1b1c2010-03-12 16:54:58 +0000381static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000382{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800383 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000384}
385
Paul Brook6d9a1302010-02-28 23:55:53 +0000386#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500387static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000388{
pbrooke3f4e2a2006-04-08 20:02:06 +0000389 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800390 void **lp;
391 int i;
bellard92e873b2004-05-21 14:52:29 +0000392
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800393 /* Level 1. Always allocated. */
394 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000395
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800396 /* Level 2..N-1. */
397 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
398 void **p = *lp;
399 if (p == NULL) {
400 if (!alloc) {
401 return NULL;
402 }
403 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
404 }
405 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000406 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800407
pbrooke3f4e2a2006-04-08 20:02:06 +0000408 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800409 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000410 int i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800411
412 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000413 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800414 }
415
416 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
417
pbrook67c4d232009-02-23 13:16:07 +0000418 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800419 pd[i].phys_offset = IO_MEM_UNASSIGNED;
420 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000421 }
bellard92e873b2004-05-21 14:52:29 +0000422 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800423
424 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000425}
426
Anthony Liguoric227f092009-10-01 16:12:16 -0500427static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000428{
bellard108c49b2005-07-24 12:55:09 +0000429 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000430}
431
Anthony Liguoric227f092009-10-01 16:12:16 -0500432static void tlb_protect_code(ram_addr_t ram_addr);
433static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000434 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000435#define mmap_lock() do { } while(0)
436#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000437#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000438
bellard43694152008-05-29 09:35:57 +0000439#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
440
441#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100442/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000443 user mode. It will change when a dedicated libc will be used */
444#define USE_STATIC_CODE_GEN_BUFFER
445#endif
446
447#ifdef USE_STATIC_CODE_GEN_BUFFER
448static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
449#endif
450
blueswir18fcd3692008-08-17 20:26:25 +0000451static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000452{
bellard43694152008-05-29 09:35:57 +0000453#ifdef USE_STATIC_CODE_GEN_BUFFER
454 code_gen_buffer = static_code_gen_buffer;
455 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
456 map_exec(code_gen_buffer, code_gen_buffer_size);
457#else
bellard26a5f132008-05-28 12:30:31 +0000458 code_gen_buffer_size = tb_size;
459 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000460#if defined(CONFIG_USER_ONLY)
461 /* in user mode, phys_ram_size is not meaningful */
462 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
463#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100464 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000465 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000466#endif
bellard26a5f132008-05-28 12:30:31 +0000467 }
468 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
469 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
470 /* The code gen buffer location may have constraints depending on
471 the host cpu and OS */
472#if defined(__linux__)
473 {
474 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000475 void *start = NULL;
476
bellard26a5f132008-05-28 12:30:31 +0000477 flags = MAP_PRIVATE | MAP_ANONYMOUS;
478#if defined(__x86_64__)
479 flags |= MAP_32BIT;
480 /* Cannot map more than that */
481 if (code_gen_buffer_size > (800 * 1024 * 1024))
482 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000483#elif defined(__sparc_v9__)
484 // Map the buffer below 2G, so we can use direct calls and branches
485 flags |= MAP_FIXED;
486 start = (void *) 0x60000000UL;
487 if (code_gen_buffer_size > (512 * 1024 * 1024))
488 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000489#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000490 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000491 flags |= MAP_FIXED;
492 start = (void *) 0x01000000UL;
493 if (code_gen_buffer_size > 16 * 1024 * 1024)
494 code_gen_buffer_size = 16 * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000495#endif
blueswir1141ac462008-07-26 15:05:57 +0000496 code_gen_buffer = mmap(start, code_gen_buffer_size,
497 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000498 flags, -1, 0);
499 if (code_gen_buffer == MAP_FAILED) {
500 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
501 exit(1);
502 }
503 }
Aurelien Jarnoa167ba52009-11-29 18:00:41 +0100504#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
aliguori06e67a82008-09-27 15:32:41 +0000505 {
506 int flags;
507 void *addr = NULL;
508 flags = MAP_PRIVATE | MAP_ANONYMOUS;
509#if defined(__x86_64__)
510 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
511 * 0x40000000 is free */
512 flags |= MAP_FIXED;
513 addr = (void *)0x40000000;
514 /* Cannot map more than that */
515 if (code_gen_buffer_size > (800 * 1024 * 1024))
516 code_gen_buffer_size = (800 * 1024 * 1024);
517#endif
518 code_gen_buffer = mmap(addr, code_gen_buffer_size,
519 PROT_WRITE | PROT_READ | PROT_EXEC,
520 flags, -1, 0);
521 if (code_gen_buffer == MAP_FAILED) {
522 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
523 exit(1);
524 }
525 }
bellard26a5f132008-05-28 12:30:31 +0000526#else
527 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000528 map_exec(code_gen_buffer, code_gen_buffer_size);
529#endif
bellard43694152008-05-29 09:35:57 +0000530#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000531 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
532 code_gen_buffer_max_size = code_gen_buffer_size -
533 code_gen_max_block_size();
534 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
535 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
536}
537
538/* Must be called before using the QEMU cpus. 'tb_size' is the size
539 (in bytes) allocated to the translation buffer. Zero means default
540 size. */
541void cpu_exec_init_all(unsigned long tb_size)
542{
bellard26a5f132008-05-28 12:30:31 +0000543 cpu_gen_init();
544 code_gen_alloc(tb_size);
545 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000546 page_init();
pbrooke2eef172008-06-08 01:09:01 +0000547#if !defined(CONFIG_USER_ONLY)
bellard26a5f132008-05-28 12:30:31 +0000548 io_mem_init();
pbrooke2eef172008-06-08 01:09:01 +0000549#endif
bellard26a5f132008-05-28 12:30:31 +0000550}
551
pbrook9656f322008-07-01 20:01:19 +0000552#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
553
Juan Quintelae59fb372009-09-29 22:48:21 +0200554static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200555{
556 CPUState *env = opaque;
557
aurel323098dba2009-03-07 21:28:24 +0000558 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
559 version_id is increased. */
560 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000561 tlb_flush(env, 1);
562
563 return 0;
564}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200565
566static const VMStateDescription vmstate_cpu_common = {
567 .name = "cpu_common",
568 .version_id = 1,
569 .minimum_version_id = 1,
570 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200571 .post_load = cpu_common_post_load,
572 .fields = (VMStateField []) {
573 VMSTATE_UINT32(halted, CPUState),
574 VMSTATE_UINT32(interrupt_request, CPUState),
575 VMSTATE_END_OF_LIST()
576 }
577};
pbrook9656f322008-07-01 20:01:19 +0000578#endif
579
Glauber Costa950f1472009-06-09 12:15:18 -0400580CPUState *qemu_get_cpu(int cpu)
581{
582 CPUState *env = first_cpu;
583
584 while (env) {
585 if (env->cpu_index == cpu)
586 break;
587 env = env->next_cpu;
588 }
589
590 return env;
591}
592
bellard6a00d602005-11-21 23:25:50 +0000593void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000594{
bellard6a00d602005-11-21 23:25:50 +0000595 CPUState **penv;
596 int cpu_index;
597
pbrookc2764712009-03-07 15:24:59 +0000598#if defined(CONFIG_USER_ONLY)
599 cpu_list_lock();
600#endif
bellard6a00d602005-11-21 23:25:50 +0000601 env->next_cpu = NULL;
602 penv = &first_cpu;
603 cpu_index = 0;
604 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700605 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000606 cpu_index++;
607 }
608 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000609 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000610 QTAILQ_INIT(&env->breakpoints);
611 QTAILQ_INIT(&env->watchpoints);
bellard6a00d602005-11-21 23:25:50 +0000612 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000613#if defined(CONFIG_USER_ONLY)
614 cpu_list_unlock();
615#endif
pbrookb3c77242008-06-30 16:31:04 +0000616#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200617 vmstate_register(cpu_index, &vmstate_cpu_common, env);
pbrookb3c77242008-06-30 16:31:04 +0000618 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
619 cpu_save, cpu_load, env);
620#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000621}
622
bellard9fa3e852004-01-04 18:06:42 +0000623static inline void invalidate_page_bitmap(PageDesc *p)
624{
625 if (p->code_bitmap) {
bellard59817cc2004-02-16 22:01:13 +0000626 qemu_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000627 p->code_bitmap = NULL;
628 }
629 p->code_write_count = 0;
630}
631
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800632/* Set to NULL all the 'first_tb' fields in all PageDescs. */
633
634static void page_flush_tb_1 (int level, void **lp)
635{
636 int i;
637
638 if (*lp == NULL) {
639 return;
640 }
641 if (level == 0) {
642 PageDesc *pd = *lp;
643 for (i = 0; i < L2_BITS; ++i) {
644 pd[i].first_tb = NULL;
645 invalidate_page_bitmap(pd + i);
646 }
647 } else {
648 void **pp = *lp;
649 for (i = 0; i < L2_BITS; ++i) {
650 page_flush_tb_1 (level - 1, pp + i);
651 }
652 }
653}
654
bellardfd6ce8f2003-05-14 19:00:11 +0000655static void page_flush_tb(void)
656{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800657 int i;
658 for (i = 0; i < V_L1_SIZE; i++) {
659 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000660 }
661}
662
663/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000664/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000665void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000666{
bellard6a00d602005-11-21 23:25:50 +0000667 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000668#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000669 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
670 (unsigned long)(code_gen_ptr - code_gen_buffer),
671 nb_tbs, nb_tbs > 0 ?
672 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000673#endif
bellard26a5f132008-05-28 12:30:31 +0000674 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000675 cpu_abort(env1, "Internal error: code buffer overflow\n");
676
bellardfd6ce8f2003-05-14 19:00:11 +0000677 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000678
bellard6a00d602005-11-21 23:25:50 +0000679 for(env = first_cpu; env != NULL; env = env->next_cpu) {
680 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
681 }
bellard9fa3e852004-01-04 18:06:42 +0000682
bellard8a8a6082004-10-03 13:36:49 +0000683 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000684 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000685
bellardfd6ce8f2003-05-14 19:00:11 +0000686 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000687 /* XXX: flush processor icache at this point if cache flush is
688 expensive */
bellarde3db7222005-01-26 22:00:47 +0000689 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000690}
691
692#ifdef DEBUG_TB_CHECK
693
j_mayerbc98a7e2007-04-04 07:55:12 +0000694static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000695{
696 TranslationBlock *tb;
697 int i;
698 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000699 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
700 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000701 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
702 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000703 printf("ERROR invalidate: address=" TARGET_FMT_lx
704 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000705 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000706 }
707 }
708 }
709}
710
711/* verify that all the pages have correct rights for code */
712static void tb_page_check(void)
713{
714 TranslationBlock *tb;
715 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000716
pbrook99773bd2006-04-16 15:14:59 +0000717 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
718 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000719 flags1 = page_get_flags(tb->pc);
720 flags2 = page_get_flags(tb->pc + tb->size - 1);
721 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
722 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000723 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000724 }
725 }
726 }
727}
728
729#endif
730
731/* invalidate one TB */
732static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
733 int next_offset)
734{
735 TranslationBlock *tb1;
736 for(;;) {
737 tb1 = *ptb;
738 if (tb1 == tb) {
739 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
740 break;
741 }
742 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
743 }
744}
745
bellard9fa3e852004-01-04 18:06:42 +0000746static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
747{
748 TranslationBlock *tb1;
749 unsigned int n1;
750
751 for(;;) {
752 tb1 = *ptb;
753 n1 = (long)tb1 & 3;
754 tb1 = (TranslationBlock *)((long)tb1 & ~3);
755 if (tb1 == tb) {
756 *ptb = tb1->page_next[n1];
757 break;
758 }
759 ptb = &tb1->page_next[n1];
760 }
761}
762
bellardd4e81642003-05-25 16:46:15 +0000763static inline void tb_jmp_remove(TranslationBlock *tb, int n)
764{
765 TranslationBlock *tb1, **ptb;
766 unsigned int n1;
767
768 ptb = &tb->jmp_next[n];
769 tb1 = *ptb;
770 if (tb1) {
771 /* find tb(n) in circular list */
772 for(;;) {
773 tb1 = *ptb;
774 n1 = (long)tb1 & 3;
775 tb1 = (TranslationBlock *)((long)tb1 & ~3);
776 if (n1 == n && tb1 == tb)
777 break;
778 if (n1 == 2) {
779 ptb = &tb1->jmp_first;
780 } else {
781 ptb = &tb1->jmp_next[n1];
782 }
783 }
784 /* now we can suppress tb(n) from the list */
785 *ptb = tb->jmp_next[n];
786
787 tb->jmp_next[n] = NULL;
788 }
789}
790
791/* reset the jump entry 'n' of a TB so that it is not chained to
792 another TB */
793static inline void tb_reset_jump(TranslationBlock *tb, int n)
794{
795 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
796}
797
Paul Brook41c1b1c2010-03-12 16:54:58 +0000798void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000799{
bellard6a00d602005-11-21 23:25:50 +0000800 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000801 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000802 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000803 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000804 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000805
bellard9fa3e852004-01-04 18:06:42 +0000806 /* remove the TB from the hash list */
807 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
808 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000809 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000810 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000811
bellard9fa3e852004-01-04 18:06:42 +0000812 /* remove the TB from the page list */
813 if (tb->page_addr[0] != page_addr) {
814 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
815 tb_page_remove(&p->first_tb, tb);
816 invalidate_page_bitmap(p);
817 }
818 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
819 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
820 tb_page_remove(&p->first_tb, tb);
821 invalidate_page_bitmap(p);
822 }
823
bellard8a40a182005-11-20 10:35:40 +0000824 tb_invalidated_flag = 1;
825
826 /* remove the TB from the hash list */
827 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000828 for(env = first_cpu; env != NULL; env = env->next_cpu) {
829 if (env->tb_jmp_cache[h] == tb)
830 env->tb_jmp_cache[h] = NULL;
831 }
bellard8a40a182005-11-20 10:35:40 +0000832
833 /* suppress this TB from the two jump lists */
834 tb_jmp_remove(tb, 0);
835 tb_jmp_remove(tb, 1);
836
837 /* suppress any remaining jumps to this TB */
838 tb1 = tb->jmp_first;
839 for(;;) {
840 n1 = (long)tb1 & 3;
841 if (n1 == 2)
842 break;
843 tb1 = (TranslationBlock *)((long)tb1 & ~3);
844 tb2 = tb1->jmp_next[n1];
845 tb_reset_jump(tb1, n1);
846 tb1->jmp_next[n1] = NULL;
847 tb1 = tb2;
848 }
849 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
850
bellarde3db7222005-01-26 22:00:47 +0000851 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000852}
853
854static inline void set_bits(uint8_t *tab, int start, int len)
855{
856 int end, mask, end1;
857
858 end = start + len;
859 tab += start >> 3;
860 mask = 0xff << (start & 7);
861 if ((start & ~7) == (end & ~7)) {
862 if (start < end) {
863 mask &= ~(0xff << (end & 7));
864 *tab |= mask;
865 }
866 } else {
867 *tab++ |= mask;
868 start = (start + 8) & ~7;
869 end1 = end & ~7;
870 while (start < end1) {
871 *tab++ = 0xff;
872 start += 8;
873 }
874 if (start < end) {
875 mask = ~(0xff << (end & 7));
876 *tab |= mask;
877 }
878 }
879}
880
881static void build_page_bitmap(PageDesc *p)
882{
883 int n, tb_start, tb_end;
884 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000885
pbrookb2a70812008-06-09 13:57:23 +0000886 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000887
888 tb = p->first_tb;
889 while (tb != NULL) {
890 n = (long)tb & 3;
891 tb = (TranslationBlock *)((long)tb & ~3);
892 /* NOTE: this is subtle as a TB may span two physical pages */
893 if (n == 0) {
894 /* NOTE: tb_end may be after the end of the page, but
895 it is not a problem */
896 tb_start = tb->pc & ~TARGET_PAGE_MASK;
897 tb_end = tb_start + tb->size;
898 if (tb_end > TARGET_PAGE_SIZE)
899 tb_end = TARGET_PAGE_SIZE;
900 } else {
901 tb_start = 0;
902 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
903 }
904 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
905 tb = tb->page_next[n];
906 }
907}
908
pbrook2e70f6e2008-06-29 01:03:05 +0000909TranslationBlock *tb_gen_code(CPUState *env,
910 target_ulong pc, target_ulong cs_base,
911 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000912{
913 TranslationBlock *tb;
914 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000915 tb_page_addr_t phys_pc, phys_page2;
916 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000917 int code_gen_size;
918
Paul Brook41c1b1c2010-03-12 16:54:58 +0000919 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000920 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000921 if (!tb) {
922 /* flush must be done */
923 tb_flush(env);
924 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000925 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000926 /* Don't forget to invalidate previous TB info. */
927 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000928 }
929 tc_ptr = code_gen_ptr;
930 tb->tc_ptr = tc_ptr;
931 tb->cs_base = cs_base;
932 tb->flags = flags;
933 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +0000934 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +0000935 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +0000936
bellardd720b932004-04-25 17:57:43 +0000937 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +0000938 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +0000939 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +0000940 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +0000941 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +0000942 }
Paul Brook41c1b1c2010-03-12 16:54:58 +0000943 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +0000944 return tb;
bellardd720b932004-04-25 17:57:43 +0000945}
ths3b46e622007-09-17 08:09:54 +0000946
bellard9fa3e852004-01-04 18:06:42 +0000947/* invalidate all TBs which intersect with the target physical page
948 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +0000949 the same physical page. 'is_cpu_write_access' should be true if called
950 from a real cpu write access: the virtual CPU will exit the current
951 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +0000952void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +0000953 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +0000954{
aliguori6b917542008-11-18 19:46:41 +0000955 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +0000956 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000957 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +0000958 PageDesc *p;
959 int n;
960#ifdef TARGET_HAS_PRECISE_SMC
961 int current_tb_not_found = is_cpu_write_access;
962 TranslationBlock *current_tb = NULL;
963 int current_tb_modified = 0;
964 target_ulong current_pc = 0;
965 target_ulong current_cs_base = 0;
966 int current_flags = 0;
967#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +0000968
969 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +0000970 if (!p)
bellard9fa3e852004-01-04 18:06:42 +0000971 return;
ths5fafdf22007-09-16 21:08:06 +0000972 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +0000973 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
974 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +0000975 /* build code bitmap */
976 build_page_bitmap(p);
977 }
978
979 /* we remove all the TBs in the range [start, end[ */
980 /* XXX: see if in some cases it could be faster to invalidate all the code */
981 tb = p->first_tb;
982 while (tb != NULL) {
983 n = (long)tb & 3;
984 tb = (TranslationBlock *)((long)tb & ~3);
985 tb_next = tb->page_next[n];
986 /* NOTE: this is subtle as a TB may span two physical pages */
987 if (n == 0) {
988 /* NOTE: tb_end may be after the end of the page, but
989 it is not a problem */
990 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
991 tb_end = tb_start + tb->size;
992 } else {
993 tb_start = tb->page_addr[1];
994 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
995 }
996 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +0000997#ifdef TARGET_HAS_PRECISE_SMC
998 if (current_tb_not_found) {
999 current_tb_not_found = 0;
1000 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001001 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001002 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001003 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001004 }
1005 }
1006 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001007 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001008 /* If we are modifying the current TB, we must stop
1009 its execution. We could be more precise by checking
1010 that the modification is after the current PC, but it
1011 would require a specialized function to partially
1012 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001013
bellardd720b932004-04-25 17:57:43 +00001014 current_tb_modified = 1;
ths5fafdf22007-09-16 21:08:06 +00001015 cpu_restore_state(current_tb, env,
pbrook2e70f6e2008-06-29 01:03:05 +00001016 env->mem_io_pc, NULL);
aliguori6b917542008-11-18 19:46:41 +00001017 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1018 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001019 }
1020#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001021 /* we need to do that to handle the case where a signal
1022 occurs while doing tb_phys_invalidate() */
1023 saved_tb = NULL;
1024 if (env) {
1025 saved_tb = env->current_tb;
1026 env->current_tb = NULL;
1027 }
bellard9fa3e852004-01-04 18:06:42 +00001028 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001029 if (env) {
1030 env->current_tb = saved_tb;
1031 if (env->interrupt_request && env->current_tb)
1032 cpu_interrupt(env, env->interrupt_request);
1033 }
bellard9fa3e852004-01-04 18:06:42 +00001034 }
1035 tb = tb_next;
1036 }
1037#if !defined(CONFIG_USER_ONLY)
1038 /* if no code remaining, no need to continue to use slow writes */
1039 if (!p->first_tb) {
1040 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001041 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001042 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001043 }
1044 }
1045#endif
1046#ifdef TARGET_HAS_PRECISE_SMC
1047 if (current_tb_modified) {
1048 /* we generate a block containing just the instruction
1049 modifying the memory. It will ensure that it cannot modify
1050 itself */
bellardea1c1802004-06-14 18:56:36 +00001051 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001052 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001053 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001054 }
1055#endif
1056}
1057
1058/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001059static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001060{
1061 PageDesc *p;
1062 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001063#if 0
bellarda4193c82004-06-03 14:01:43 +00001064 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001065 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1066 cpu_single_env->mem_io_vaddr, len,
1067 cpu_single_env->eip,
1068 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001069 }
1070#endif
bellard9fa3e852004-01-04 18:06:42 +00001071 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001072 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001073 return;
1074 if (p->code_bitmap) {
1075 offset = start & ~TARGET_PAGE_MASK;
1076 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1077 if (b & ((1 << len) - 1))
1078 goto do_invalidate;
1079 } else {
1080 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001081 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001082 }
1083}
1084
bellard9fa3e852004-01-04 18:06:42 +00001085#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001086static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001087 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001088{
aliguori6b917542008-11-18 19:46:41 +00001089 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001090 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001091 int n;
bellardd720b932004-04-25 17:57:43 +00001092#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001093 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001094 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001095 int current_tb_modified = 0;
1096 target_ulong current_pc = 0;
1097 target_ulong current_cs_base = 0;
1098 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001099#endif
bellard9fa3e852004-01-04 18:06:42 +00001100
1101 addr &= TARGET_PAGE_MASK;
1102 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001103 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001104 return;
1105 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001106#ifdef TARGET_HAS_PRECISE_SMC
1107 if (tb && pc != 0) {
1108 current_tb = tb_find_pc(pc);
1109 }
1110#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001111 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001112 n = (long)tb & 3;
1113 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001114#ifdef TARGET_HAS_PRECISE_SMC
1115 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001116 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001117 /* If we are modifying the current TB, we must stop
1118 its execution. We could be more precise by checking
1119 that the modification is after the current PC, but it
1120 would require a specialized function to partially
1121 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001122
bellardd720b932004-04-25 17:57:43 +00001123 current_tb_modified = 1;
1124 cpu_restore_state(current_tb, env, pc, puc);
aliguori6b917542008-11-18 19:46:41 +00001125 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1126 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001127 }
1128#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001129 tb_phys_invalidate(tb, addr);
1130 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001131 }
1132 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001133#ifdef TARGET_HAS_PRECISE_SMC
1134 if (current_tb_modified) {
1135 /* we generate a block containing just the instruction
1136 modifying the memory. It will ensure that it cannot modify
1137 itself */
bellardea1c1802004-06-14 18:56:36 +00001138 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001139 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001140 cpu_resume_from_signal(env, puc);
1141 }
1142#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001143}
bellard9fa3e852004-01-04 18:06:42 +00001144#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001145
1146/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001147static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001148 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001149{
1150 PageDesc *p;
bellard9fa3e852004-01-04 18:06:42 +00001151 TranslationBlock *last_first_tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001152
bellard9fa3e852004-01-04 18:06:42 +00001153 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001154 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001155 tb->page_next[n] = p->first_tb;
1156 last_first_tb = p->first_tb;
1157 p->first_tb = (TranslationBlock *)((long)tb | n);
1158 invalidate_page_bitmap(p);
1159
bellard107db442004-06-22 18:48:46 +00001160#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001161
bellard9fa3e852004-01-04 18:06:42 +00001162#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001163 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001164 target_ulong addr;
1165 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001166 int prot;
1167
bellardfd6ce8f2003-05-14 19:00:11 +00001168 /* force the host page as non writable (writes will have a
1169 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001170 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001171 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001172 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1173 addr += TARGET_PAGE_SIZE) {
1174
1175 p2 = page_find (addr >> TARGET_PAGE_BITS);
1176 if (!p2)
1177 continue;
1178 prot |= p2->flags;
1179 p2->flags &= ~PAGE_WRITE;
1180 page_get_flags(addr);
1181 }
ths5fafdf22007-09-16 21:08:06 +00001182 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001183 (prot & PAGE_BITS) & ~PAGE_WRITE);
1184#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001185 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001186 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001187#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001188 }
bellard9fa3e852004-01-04 18:06:42 +00001189#else
1190 /* if some code is already present, then the pages are already
1191 protected. So we handle the case where only the first TB is
1192 allocated in a physical page */
1193 if (!last_first_tb) {
bellard6a00d602005-11-21 23:25:50 +00001194 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001195 }
1196#endif
bellardd720b932004-04-25 17:57:43 +00001197
1198#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001199}
1200
1201/* Allocate a new translation block. Flush the translation buffer if
1202 too many translation blocks or too much generated code. */
bellardc27004e2005-01-03 23:35:10 +00001203TranslationBlock *tb_alloc(target_ulong pc)
bellardfd6ce8f2003-05-14 19:00:11 +00001204{
1205 TranslationBlock *tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001206
bellard26a5f132008-05-28 12:30:31 +00001207 if (nb_tbs >= code_gen_max_blocks ||
1208 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
bellardd4e81642003-05-25 16:46:15 +00001209 return NULL;
bellardfd6ce8f2003-05-14 19:00:11 +00001210 tb = &tbs[nb_tbs++];
1211 tb->pc = pc;
bellardb448f2f2004-02-25 23:24:04 +00001212 tb->cflags = 0;
bellardd4e81642003-05-25 16:46:15 +00001213 return tb;
1214}
1215
pbrook2e70f6e2008-06-29 01:03:05 +00001216void tb_free(TranslationBlock *tb)
1217{
thsbf20dc02008-06-30 17:22:19 +00001218 /* In practice this is mostly used for single use temporary TB
pbrook2e70f6e2008-06-29 01:03:05 +00001219 Ignore the hard cases and just back up if this TB happens to
1220 be the last one generated. */
1221 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1222 code_gen_ptr = tb->tc_ptr;
1223 nb_tbs--;
1224 }
1225}
1226
bellard9fa3e852004-01-04 18:06:42 +00001227/* add a new TB and link it to the physical page tables. phys_page2 is
1228 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001229void tb_link_page(TranslationBlock *tb,
1230 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001231{
bellard9fa3e852004-01-04 18:06:42 +00001232 unsigned int h;
1233 TranslationBlock **ptb;
1234
pbrookc8a706f2008-06-02 16:16:42 +00001235 /* Grab the mmap lock to stop another thread invalidating this TB
1236 before we are done. */
1237 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001238 /* add in the physical hash table */
1239 h = tb_phys_hash_func(phys_pc);
1240 ptb = &tb_phys_hash[h];
1241 tb->phys_hash_next = *ptb;
1242 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001243
1244 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001245 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1246 if (phys_page2 != -1)
1247 tb_alloc_page(tb, 1, phys_page2);
1248 else
1249 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001250
bellardd4e81642003-05-25 16:46:15 +00001251 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1252 tb->jmp_next[0] = NULL;
1253 tb->jmp_next[1] = NULL;
1254
1255 /* init original jump addresses */
1256 if (tb->tb_next_offset[0] != 0xffff)
1257 tb_reset_jump(tb, 0);
1258 if (tb->tb_next_offset[1] != 0xffff)
1259 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001260
1261#ifdef DEBUG_TB_CHECK
1262 tb_page_check();
1263#endif
pbrookc8a706f2008-06-02 16:16:42 +00001264 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001265}
1266
bellarda513fe12003-05-27 23:29:48 +00001267/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1268 tb[1].tc_ptr. Return NULL if not found */
1269TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1270{
1271 int m_min, m_max, m;
1272 unsigned long v;
1273 TranslationBlock *tb;
1274
1275 if (nb_tbs <= 0)
1276 return NULL;
1277 if (tc_ptr < (unsigned long)code_gen_buffer ||
1278 tc_ptr >= (unsigned long)code_gen_ptr)
1279 return NULL;
1280 /* binary search (cf Knuth) */
1281 m_min = 0;
1282 m_max = nb_tbs - 1;
1283 while (m_min <= m_max) {
1284 m = (m_min + m_max) >> 1;
1285 tb = &tbs[m];
1286 v = (unsigned long)tb->tc_ptr;
1287 if (v == tc_ptr)
1288 return tb;
1289 else if (tc_ptr < v) {
1290 m_max = m - 1;
1291 } else {
1292 m_min = m + 1;
1293 }
ths5fafdf22007-09-16 21:08:06 +00001294 }
bellarda513fe12003-05-27 23:29:48 +00001295 return &tbs[m_max];
1296}
bellard75012672003-06-21 13:11:07 +00001297
bellardea041c02003-06-25 16:16:50 +00001298static void tb_reset_jump_recursive(TranslationBlock *tb);
1299
1300static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1301{
1302 TranslationBlock *tb1, *tb_next, **ptb;
1303 unsigned int n1;
1304
1305 tb1 = tb->jmp_next[n];
1306 if (tb1 != NULL) {
1307 /* find head of list */
1308 for(;;) {
1309 n1 = (long)tb1 & 3;
1310 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1311 if (n1 == 2)
1312 break;
1313 tb1 = tb1->jmp_next[n1];
1314 }
1315 /* we are now sure now that tb jumps to tb1 */
1316 tb_next = tb1;
1317
1318 /* remove tb from the jmp_first list */
1319 ptb = &tb_next->jmp_first;
1320 for(;;) {
1321 tb1 = *ptb;
1322 n1 = (long)tb1 & 3;
1323 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1324 if (n1 == n && tb1 == tb)
1325 break;
1326 ptb = &tb1->jmp_next[n1];
1327 }
1328 *ptb = tb->jmp_next[n];
1329 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001330
bellardea041c02003-06-25 16:16:50 +00001331 /* suppress the jump to next tb in generated code */
1332 tb_reset_jump(tb, n);
1333
bellard01243112004-01-04 15:48:17 +00001334 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001335 tb_reset_jump_recursive(tb_next);
1336 }
1337}
1338
1339static void tb_reset_jump_recursive(TranslationBlock *tb)
1340{
1341 tb_reset_jump_recursive2(tb, 0);
1342 tb_reset_jump_recursive2(tb, 1);
1343}
1344
bellard1fddef42005-04-17 19:16:13 +00001345#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001346#if defined(CONFIG_USER_ONLY)
1347static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1348{
1349 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1350}
1351#else
bellardd720b932004-04-25 17:57:43 +00001352static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1353{
Anthony Liguoric227f092009-10-01 16:12:16 -05001354 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001355 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001356 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001357 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001358
pbrookc2f07f82006-04-08 17:14:56 +00001359 addr = cpu_get_phys_page_debug(env, pc);
1360 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1361 if (!p) {
1362 pd = IO_MEM_UNASSIGNED;
1363 } else {
1364 pd = p->phys_offset;
1365 }
1366 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001367 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001368}
bellardc27004e2005-01-03 23:35:10 +00001369#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001370#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001371
Paul Brookc527ee82010-03-01 03:31:14 +00001372#if defined(CONFIG_USER_ONLY)
1373void cpu_watchpoint_remove_all(CPUState *env, int mask)
1374
1375{
1376}
1377
1378int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1379 int flags, CPUWatchpoint **watchpoint)
1380{
1381 return -ENOSYS;
1382}
1383#else
pbrook6658ffb2007-03-16 23:58:11 +00001384/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001385int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1386 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001387{
aliguorib4051332008-11-18 20:14:20 +00001388 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001389 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001390
aliguorib4051332008-11-18 20:14:20 +00001391 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1392 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1393 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1394 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1395 return -EINVAL;
1396 }
aliguoria1d1bb32008-11-18 20:07:32 +00001397 wp = qemu_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001398
aliguoria1d1bb32008-11-18 20:07:32 +00001399 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001400 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001401 wp->flags = flags;
1402
aliguori2dc9f412008-11-18 20:56:59 +00001403 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001404 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001405 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001406 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001407 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001408
pbrook6658ffb2007-03-16 23:58:11 +00001409 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001410
1411 if (watchpoint)
1412 *watchpoint = wp;
1413 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001414}
1415
aliguoria1d1bb32008-11-18 20:07:32 +00001416/* Remove a specific watchpoint. */
1417int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1418 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001419{
aliguorib4051332008-11-18 20:14:20 +00001420 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001421 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001422
Blue Swirl72cf2d42009-09-12 07:36:22 +00001423 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001424 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001425 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001426 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001427 return 0;
1428 }
1429 }
aliguoria1d1bb32008-11-18 20:07:32 +00001430 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001431}
1432
aliguoria1d1bb32008-11-18 20:07:32 +00001433/* Remove a specific watchpoint by reference. */
1434void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1435{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001436 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001437
aliguoria1d1bb32008-11-18 20:07:32 +00001438 tlb_flush_page(env, watchpoint->vaddr);
1439
1440 qemu_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001441}
1442
aliguoria1d1bb32008-11-18 20:07:32 +00001443/* Remove all matching watchpoints. */
1444void cpu_watchpoint_remove_all(CPUState *env, int mask)
1445{
aliguoric0ce9982008-11-25 22:13:57 +00001446 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001447
Blue Swirl72cf2d42009-09-12 07:36:22 +00001448 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001449 if (wp->flags & mask)
1450 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001451 }
aliguoria1d1bb32008-11-18 20:07:32 +00001452}
Paul Brookc527ee82010-03-01 03:31:14 +00001453#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001454
1455/* Add a breakpoint. */
1456int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1457 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001458{
bellard1fddef42005-04-17 19:16:13 +00001459#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001460 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001461
aliguoria1d1bb32008-11-18 20:07:32 +00001462 bp = qemu_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001463
1464 bp->pc = pc;
1465 bp->flags = flags;
1466
aliguori2dc9f412008-11-18 20:56:59 +00001467 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001468 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001469 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001470 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001471 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001472
1473 breakpoint_invalidate(env, pc);
1474
1475 if (breakpoint)
1476 *breakpoint = bp;
1477 return 0;
1478#else
1479 return -ENOSYS;
1480#endif
1481}
1482
1483/* Remove a specific breakpoint. */
1484int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1485{
1486#if defined(TARGET_HAS_ICE)
1487 CPUBreakpoint *bp;
1488
Blue Swirl72cf2d42009-09-12 07:36:22 +00001489 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001490 if (bp->pc == pc && bp->flags == flags) {
1491 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001492 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001493 }
bellard4c3a88a2003-07-26 12:06:08 +00001494 }
aliguoria1d1bb32008-11-18 20:07:32 +00001495 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001496#else
aliguoria1d1bb32008-11-18 20:07:32 +00001497 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001498#endif
1499}
1500
aliguoria1d1bb32008-11-18 20:07:32 +00001501/* Remove a specific breakpoint by reference. */
1502void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001503{
bellard1fddef42005-04-17 19:16:13 +00001504#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001505 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001506
aliguoria1d1bb32008-11-18 20:07:32 +00001507 breakpoint_invalidate(env, breakpoint->pc);
1508
1509 qemu_free(breakpoint);
1510#endif
1511}
1512
1513/* Remove all matching breakpoints. */
1514void cpu_breakpoint_remove_all(CPUState *env, int mask)
1515{
1516#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001517 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001518
Blue Swirl72cf2d42009-09-12 07:36:22 +00001519 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001520 if (bp->flags & mask)
1521 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001522 }
bellard4c3a88a2003-07-26 12:06:08 +00001523#endif
1524}
1525
bellardc33a3462003-07-29 20:50:33 +00001526/* enable or disable single step mode. EXCP_DEBUG is returned by the
1527 CPU loop after each instruction */
1528void cpu_single_step(CPUState *env, int enabled)
1529{
bellard1fddef42005-04-17 19:16:13 +00001530#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001531 if (env->singlestep_enabled != enabled) {
1532 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001533 if (kvm_enabled())
1534 kvm_update_guest_debug(env, 0);
1535 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001536 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001537 /* XXX: only flush what is necessary */
1538 tb_flush(env);
1539 }
bellardc33a3462003-07-29 20:50:33 +00001540 }
1541#endif
1542}
1543
bellard34865132003-10-05 14:28:56 +00001544/* enable or disable low levels log */
1545void cpu_set_log(int log_flags)
1546{
1547 loglevel = log_flags;
1548 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001549 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001550 if (!logfile) {
1551 perror(logfilename);
1552 _exit(1);
1553 }
bellard9fa3e852004-01-04 18:06:42 +00001554#if !defined(CONFIG_SOFTMMU)
1555 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1556 {
blueswir1b55266b2008-09-20 08:07:15 +00001557 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001558 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1559 }
Filip Navarabf65f532009-07-27 10:02:04 -05001560#elif !defined(_WIN32)
1561 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001562 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001563#endif
pbrooke735b912007-06-30 13:53:24 +00001564 log_append = 1;
1565 }
1566 if (!loglevel && logfile) {
1567 fclose(logfile);
1568 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001569 }
1570}
1571
1572void cpu_set_log_filename(const char *filename)
1573{
1574 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001575 if (logfile) {
1576 fclose(logfile);
1577 logfile = NULL;
1578 }
1579 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001580}
bellardc33a3462003-07-29 20:50:33 +00001581
aurel323098dba2009-03-07 21:28:24 +00001582static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001583{
pbrookd5975362008-06-07 20:50:51 +00001584 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1585 problem and hope the cpu will stop of its own accord. For userspace
1586 emulation this often isn't actually as bad as it sounds. Often
1587 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001588 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001589 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001590
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001591 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001592 tb = env->current_tb;
1593 /* if the cpu is currently executing code, we must unlink it and
1594 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001595 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001596 env->current_tb = NULL;
1597 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001598 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001599 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001600}
1601
1602/* mask must never be zero, except for A20 change call */
1603void cpu_interrupt(CPUState *env, int mask)
1604{
1605 int old_mask;
1606
1607 old_mask = env->interrupt_request;
1608 env->interrupt_request |= mask;
1609
aliguori8edac962009-04-24 18:03:45 +00001610#ifndef CONFIG_USER_ONLY
1611 /*
1612 * If called from iothread context, wake the target cpu in
1613 * case its halted.
1614 */
1615 if (!qemu_cpu_self(env)) {
1616 qemu_cpu_kick(env);
1617 return;
1618 }
1619#endif
1620
pbrook2e70f6e2008-06-29 01:03:05 +00001621 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001622 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001623#ifndef CONFIG_USER_ONLY
pbrook2e70f6e2008-06-29 01:03:05 +00001624 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001625 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001626 cpu_abort(env, "Raised interrupt while not in I/O function");
1627 }
1628#endif
1629 } else {
aurel323098dba2009-03-07 21:28:24 +00001630 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001631 }
1632}
1633
bellardb54ad042004-05-20 13:42:52 +00001634void cpu_reset_interrupt(CPUState *env, int mask)
1635{
1636 env->interrupt_request &= ~mask;
1637}
1638
aurel323098dba2009-03-07 21:28:24 +00001639void cpu_exit(CPUState *env)
1640{
1641 env->exit_request = 1;
1642 cpu_unlink_tb(env);
1643}
1644
blueswir1c7cd6a32008-10-02 18:27:46 +00001645const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001646 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001647 "show generated host assembly code for each compiled TB" },
1648 { CPU_LOG_TB_IN_ASM, "in_asm",
1649 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001650 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001651 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001652 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001653 "show micro ops "
1654#ifdef TARGET_I386
1655 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001656#endif
blueswir1e01a1152008-03-14 17:37:11 +00001657 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001658 { CPU_LOG_INT, "int",
1659 "show interrupts/exceptions in short format" },
1660 { CPU_LOG_EXEC, "exec",
1661 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001662 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001663 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001664#ifdef TARGET_I386
1665 { CPU_LOG_PCALL, "pcall",
1666 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001667 { CPU_LOG_RESET, "cpu_reset",
1668 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001669#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001670#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001671 { CPU_LOG_IOPORT, "ioport",
1672 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001673#endif
bellardf193c792004-03-21 17:06:25 +00001674 { 0, NULL, NULL },
1675};
1676
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001677#ifndef CONFIG_USER_ONLY
1678static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1679 = QLIST_HEAD_INITIALIZER(memory_client_list);
1680
1681static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1682 ram_addr_t size,
1683 ram_addr_t phys_offset)
1684{
1685 CPUPhysMemoryClient *client;
1686 QLIST_FOREACH(client, &memory_client_list, list) {
1687 client->set_memory(client, start_addr, size, phys_offset);
1688 }
1689}
1690
1691static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1692 target_phys_addr_t end)
1693{
1694 CPUPhysMemoryClient *client;
1695 QLIST_FOREACH(client, &memory_client_list, list) {
1696 int r = client->sync_dirty_bitmap(client, start, end);
1697 if (r < 0)
1698 return r;
1699 }
1700 return 0;
1701}
1702
1703static int cpu_notify_migration_log(int enable)
1704{
1705 CPUPhysMemoryClient *client;
1706 QLIST_FOREACH(client, &memory_client_list, list) {
1707 int r = client->migration_log(client, enable);
1708 if (r < 0)
1709 return r;
1710 }
1711 return 0;
1712}
1713
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001714static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1715 int level, void **lp)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001716{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001717 int i;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001718
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001719 if (*lp == NULL) {
1720 return;
1721 }
1722 if (level == 0) {
1723 PhysPageDesc *pd = *lp;
1724 for (i = 0; i < L2_BITS; ++i) {
1725 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1726 client->set_memory(client, pd[i].region_offset,
1727 TARGET_PAGE_SIZE, pd[i].phys_offset);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001728 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001729 }
1730 } else {
1731 void **pp = *lp;
1732 for (i = 0; i < L2_BITS; ++i) {
1733 phys_page_for_each_1(client, level - 1, pp + i);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001734 }
1735 }
1736}
1737
1738static void phys_page_for_each(CPUPhysMemoryClient *client)
1739{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001740 int i;
1741 for (i = 0; i < P_L1_SIZE; ++i) {
1742 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1743 l1_phys_map + 1);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001744 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001745}
1746
1747void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1748{
1749 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1750 phys_page_for_each(client);
1751}
1752
1753void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1754{
1755 QLIST_REMOVE(client, list);
1756}
1757#endif
1758
bellardf193c792004-03-21 17:06:25 +00001759static int cmp1(const char *s1, int n, const char *s2)
1760{
1761 if (strlen(s2) != n)
1762 return 0;
1763 return memcmp(s1, s2, n) == 0;
1764}
ths3b46e622007-09-17 08:09:54 +00001765
bellardf193c792004-03-21 17:06:25 +00001766/* takes a comma separated list of log masks. Return 0 if error. */
1767int cpu_str_to_log_mask(const char *str)
1768{
blueswir1c7cd6a32008-10-02 18:27:46 +00001769 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001770 int mask;
1771 const char *p, *p1;
1772
1773 p = str;
1774 mask = 0;
1775 for(;;) {
1776 p1 = strchr(p, ',');
1777 if (!p1)
1778 p1 = p + strlen(p);
bellard8e3a9fd2004-10-09 17:32:58 +00001779 if(cmp1(p,p1-p,"all")) {
1780 for(item = cpu_log_items; item->mask != 0; item++) {
1781 mask |= item->mask;
1782 }
1783 } else {
bellardf193c792004-03-21 17:06:25 +00001784 for(item = cpu_log_items; item->mask != 0; item++) {
1785 if (cmp1(p, p1 - p, item->name))
1786 goto found;
1787 }
1788 return 0;
bellard8e3a9fd2004-10-09 17:32:58 +00001789 }
bellardf193c792004-03-21 17:06:25 +00001790 found:
1791 mask |= item->mask;
1792 if (*p1 != ',')
1793 break;
1794 p = p1 + 1;
1795 }
1796 return mask;
1797}
bellardea041c02003-06-25 16:16:50 +00001798
bellard75012672003-06-21 13:11:07 +00001799void cpu_abort(CPUState *env, const char *fmt, ...)
1800{
1801 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001802 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001803
1804 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001805 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001806 fprintf(stderr, "qemu: fatal: ");
1807 vfprintf(stderr, fmt, ap);
1808 fprintf(stderr, "\n");
1809#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001810 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1811#else
1812 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001813#endif
aliguori93fcfe32009-01-15 22:34:14 +00001814 if (qemu_log_enabled()) {
1815 qemu_log("qemu: fatal: ");
1816 qemu_log_vprintf(fmt, ap2);
1817 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001818#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001819 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001820#else
aliguori93fcfe32009-01-15 22:34:14 +00001821 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001822#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001823 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001824 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001825 }
pbrook493ae1f2007-11-23 16:53:59 +00001826 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001827 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001828#if defined(CONFIG_USER_ONLY)
1829 {
1830 struct sigaction act;
1831 sigfillset(&act.sa_mask);
1832 act.sa_handler = SIG_DFL;
1833 sigaction(SIGABRT, &act, NULL);
1834 }
1835#endif
bellard75012672003-06-21 13:11:07 +00001836 abort();
1837}
1838
thsc5be9f02007-02-28 20:20:53 +00001839CPUState *cpu_copy(CPUState *env)
1840{
ths01ba9812007-12-09 02:22:57 +00001841 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001842 CPUState *next_cpu = new_env->next_cpu;
1843 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001844#if defined(TARGET_HAS_ICE)
1845 CPUBreakpoint *bp;
1846 CPUWatchpoint *wp;
1847#endif
1848
thsc5be9f02007-02-28 20:20:53 +00001849 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001850
1851 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001852 new_env->next_cpu = next_cpu;
1853 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001854
1855 /* Clone all break/watchpoints.
1856 Note: Once we support ptrace with hw-debug register access, make sure
1857 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001858 QTAILQ_INIT(&env->breakpoints);
1859 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001860#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001861 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001862 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1863 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001864 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001865 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1866 wp->flags, NULL);
1867 }
1868#endif
1869
thsc5be9f02007-02-28 20:20:53 +00001870 return new_env;
1871}
1872
bellard01243112004-01-04 15:48:17 +00001873#if !defined(CONFIG_USER_ONLY)
1874
edgar_igl5c751e92008-05-06 08:44:21 +00001875static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1876{
1877 unsigned int i;
1878
1879 /* Discard jump cache entries for any tb which might potentially
1880 overlap the flushed page. */
1881 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1882 memset (&env->tb_jmp_cache[i], 0,
1883 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1884
1885 i = tb_jmp_cache_hash_page(addr);
1886 memset (&env->tb_jmp_cache[i], 0,
1887 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1888}
1889
Igor Kovalenko08738982009-07-12 02:15:40 +04001890static CPUTLBEntry s_cputlb_empty_entry = {
1891 .addr_read = -1,
1892 .addr_write = -1,
1893 .addr_code = -1,
1894 .addend = -1,
1895};
1896
bellardee8b7022004-02-03 23:35:10 +00001897/* NOTE: if flush_global is true, also flush global entries (not
1898 implemented yet) */
1899void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001900{
bellard33417e72003-08-10 21:47:01 +00001901 int i;
bellard01243112004-01-04 15:48:17 +00001902
bellard9fa3e852004-01-04 18:06:42 +00001903#if defined(DEBUG_TLB)
1904 printf("tlb_flush:\n");
1905#endif
bellard01243112004-01-04 15:48:17 +00001906 /* must reset current TB so that interrupts cannot modify the
1907 links while we are modifying them */
1908 env->current_tb = NULL;
1909
bellard33417e72003-08-10 21:47:01 +00001910 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001911 int mmu_idx;
1912 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001913 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001914 }
bellard33417e72003-08-10 21:47:01 +00001915 }
bellard9fa3e852004-01-04 18:06:42 +00001916
bellard8a40a182005-11-20 10:35:40 +00001917 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001918
bellarde3db7222005-01-26 22:00:47 +00001919 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001920}
1921
bellard274da6b2004-05-20 21:56:27 +00001922static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001923{
ths5fafdf22007-09-16 21:08:06 +00001924 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001925 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001926 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001927 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001928 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001929 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001930 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001931 }
bellard61382a52003-10-27 21:22:23 +00001932}
1933
bellard2e126692004-04-25 21:28:44 +00001934void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001935{
bellard8a40a182005-11-20 10:35:40 +00001936 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001937 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001938
bellard9fa3e852004-01-04 18:06:42 +00001939#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001940 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001941#endif
bellard01243112004-01-04 15:48:17 +00001942 /* must reset current TB so that interrupts cannot modify the
1943 links while we are modifying them */
1944 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001945
bellard61382a52003-10-27 21:22:23 +00001946 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001947 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001948 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1949 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001950
edgar_igl5c751e92008-05-06 08:44:21 +00001951 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001952}
1953
bellard9fa3e852004-01-04 18:06:42 +00001954/* update the TLBs so that writes to code in the virtual page 'addr'
1955 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001956static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001957{
ths5fafdf22007-09-16 21:08:06 +00001958 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001959 ram_addr + TARGET_PAGE_SIZE,
1960 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001961}
1962
bellard9fa3e852004-01-04 18:06:42 +00001963/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001964 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001965static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001966 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001967{
bellard3a7d9292005-08-21 09:26:42 +00001968 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
bellard1ccde1c2004-02-06 19:46:14 +00001969}
1970
ths5fafdf22007-09-16 21:08:06 +00001971static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001972 unsigned long start, unsigned long length)
1973{
1974 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00001975 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1976 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001977 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001978 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001979 }
1980 }
1981}
1982
pbrook5579c7f2009-04-11 14:47:08 +00001983/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001984void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001985 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001986{
1987 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001988 unsigned long length, start1;
bellard0a962c02005-02-10 22:00:27 +00001989 int i, mask, len;
1990 uint8_t *p;
bellard1ccde1c2004-02-06 19:46:14 +00001991
1992 start &= TARGET_PAGE_MASK;
1993 end = TARGET_PAGE_ALIGN(end);
1994
1995 length = end - start;
1996 if (length == 0)
1997 return;
bellard0a962c02005-02-10 22:00:27 +00001998 len = length >> TARGET_PAGE_BITS;
bellardf23db162005-08-21 19:12:28 +00001999 mask = ~dirty_flags;
2000 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2001 for(i = 0; i < len; i++)
2002 p[i] &= mask;
2003
bellard1ccde1c2004-02-06 19:46:14 +00002004 /* we modify the TLB cache so that the dirty bit will be set again
2005 when accessing the range */
pbrook5579c7f2009-04-11 14:47:08 +00002006 start1 = (unsigned long)qemu_get_ram_ptr(start);
2007 /* Chek that we don't span multiple blocks - this breaks the
2008 address comparisons below. */
2009 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2010 != (end - 1) - start) {
2011 abort();
2012 }
2013
bellard6a00d602005-11-21 23:25:50 +00002014 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002015 int mmu_idx;
2016 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2017 for(i = 0; i < CPU_TLB_SIZE; i++)
2018 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2019 start1, length);
2020 }
bellard6a00d602005-11-21 23:25:50 +00002021 }
bellard1ccde1c2004-02-06 19:46:14 +00002022}
2023
aliguori74576192008-10-06 14:02:03 +00002024int cpu_physical_memory_set_dirty_tracking(int enable)
2025{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002026 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002027 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002028 ret = cpu_notify_migration_log(!!enable);
2029 return ret;
aliguori74576192008-10-06 14:02:03 +00002030}
2031
2032int cpu_physical_memory_get_dirty_tracking(void)
2033{
2034 return in_migration;
2035}
2036
Anthony Liguoric227f092009-10-01 16:12:16 -05002037int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2038 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00002039{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02002040 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02002041
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002042 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02002043 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00002044}
2045
bellard3a7d9292005-08-21 09:26:42 +00002046static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2047{
Anthony Liguoric227f092009-10-01 16:12:16 -05002048 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002049 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002050
bellard84b7b8e2005-11-28 21:19:04 +00002051 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002052 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2053 + tlb_entry->addend);
2054 ram_addr = qemu_ram_addr_from_host(p);
bellard3a7d9292005-08-21 09:26:42 +00002055 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002056 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002057 }
2058 }
2059}
2060
2061/* update the TLB according to the current state of the dirty bits */
2062void cpu_tlb_update_dirty(CPUState *env)
2063{
2064 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002065 int mmu_idx;
2066 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2067 for(i = 0; i < CPU_TLB_SIZE; i++)
2068 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2069 }
bellard3a7d9292005-08-21 09:26:42 +00002070}
2071
pbrook0f459d12008-06-09 00:20:13 +00002072static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002073{
pbrook0f459d12008-06-09 00:20:13 +00002074 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2075 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002076}
2077
pbrook0f459d12008-06-09 00:20:13 +00002078/* update the TLB corresponding to virtual page vaddr
2079 so that it is no longer dirty */
2080static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002081{
bellard1ccde1c2004-02-06 19:46:14 +00002082 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002083 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002084
pbrook0f459d12008-06-09 00:20:13 +00002085 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002086 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002087 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2088 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002089}
2090
bellard59817cc2004-02-16 22:01:13 +00002091/* add a new TLB entry. At most one entry for a given virtual address
2092 is permitted. Return 0 if OK or 2 if the page could not be mapped
2093 (can only happen in non SOFTMMU mode for I/O pages or pages
2094 conflicting with the host address space). */
ths5fafdf22007-09-16 21:08:06 +00002095int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002096 target_phys_addr_t paddr, int prot,
j_mayer6ebbf392007-10-14 07:07:08 +00002097 int mmu_idx, int is_softmmu)
bellard9fa3e852004-01-04 18:06:42 +00002098{
bellard92e873b2004-05-21 14:52:29 +00002099 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002100 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002101 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002102 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002103 target_ulong code_address;
Anthony Liguoric227f092009-10-01 16:12:16 -05002104 target_phys_addr_t addend;
bellard9fa3e852004-01-04 18:06:42 +00002105 int ret;
bellard84b7b8e2005-11-28 21:19:04 +00002106 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002107 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002108 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002109
bellard92e873b2004-05-21 14:52:29 +00002110 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002111 if (!p) {
2112 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002113 } else {
2114 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002115 }
2116#if defined(DEBUG_TLB)
j_mayer6ebbf392007-10-14 07:07:08 +00002117 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2118 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
bellard9fa3e852004-01-04 18:06:42 +00002119#endif
2120
2121 ret = 0;
pbrook0f459d12008-06-09 00:20:13 +00002122 address = vaddr;
2123 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2124 /* IO memory case (romd handled later) */
2125 address |= TLB_MMIO;
2126 }
pbrook5579c7f2009-04-11 14:47:08 +00002127 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002128 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2129 /* Normal RAM. */
2130 iotlb = pd & TARGET_PAGE_MASK;
2131 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2132 iotlb |= IO_MEM_NOTDIRTY;
2133 else
2134 iotlb |= IO_MEM_ROM;
2135 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002136 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002137 It would be nice to pass an offset from the base address
2138 of that region. This would avoid having to special case RAM,
2139 and avoid full address decoding in every device.
2140 We can't use the high bits of pd for this because
2141 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002142 iotlb = (pd & ~TARGET_PAGE_MASK);
2143 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002144 iotlb += p->region_offset;
2145 } else {
2146 iotlb += paddr;
2147 }
pbrook0f459d12008-06-09 00:20:13 +00002148 }
pbrook6658ffb2007-03-16 23:58:11 +00002149
pbrook0f459d12008-06-09 00:20:13 +00002150 code_address = address;
2151 /* Make accesses to pages with watchpoints go via the
2152 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002153 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002154 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
pbrook0f459d12008-06-09 00:20:13 +00002155 iotlb = io_mem_watch + paddr;
2156 /* TODO: The memory case can be optimized by not trapping
2157 reads of pages with a write breakpoint. */
2158 address |= TLB_MMIO;
pbrook6658ffb2007-03-16 23:58:11 +00002159 }
pbrook0f459d12008-06-09 00:20:13 +00002160 }
balrogd79acba2007-06-26 20:01:13 +00002161
pbrook0f459d12008-06-09 00:20:13 +00002162 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2163 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2164 te = &env->tlb_table[mmu_idx][index];
2165 te->addend = addend - vaddr;
2166 if (prot & PAGE_READ) {
2167 te->addr_read = address;
2168 } else {
2169 te->addr_read = -1;
2170 }
edgar_igl5c751e92008-05-06 08:44:21 +00002171
pbrook0f459d12008-06-09 00:20:13 +00002172 if (prot & PAGE_EXEC) {
2173 te->addr_code = code_address;
2174 } else {
2175 te->addr_code = -1;
2176 }
2177 if (prot & PAGE_WRITE) {
2178 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2179 (pd & IO_MEM_ROMD)) {
2180 /* Write access calls the I/O callback. */
2181 te->addr_write = address | TLB_MMIO;
2182 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2183 !cpu_physical_memory_is_dirty(pd)) {
2184 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002185 } else {
pbrook0f459d12008-06-09 00:20:13 +00002186 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002187 }
pbrook0f459d12008-06-09 00:20:13 +00002188 } else {
2189 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002190 }
bellard9fa3e852004-01-04 18:06:42 +00002191 return ret;
2192}
2193
bellard01243112004-01-04 15:48:17 +00002194#else
2195
bellardee8b7022004-02-03 23:35:10 +00002196void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002197{
2198}
2199
bellard2e126692004-04-25 21:28:44 +00002200void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002201{
2202}
2203
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002204/*
2205 * Walks guest process memory "regions" one by one
2206 * and calls callback function 'fn' for each region.
2207 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002208
2209struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002210{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002211 walk_memory_regions_fn fn;
2212 void *priv;
2213 unsigned long start;
2214 int prot;
2215};
bellard9fa3e852004-01-04 18:06:42 +00002216
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002217static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2218 unsigned long end, int new_prot)
2219{
2220 if (data->start != -1ul) {
2221 int rc = data->fn(data->priv, data->start, end, data->prot);
2222 if (rc != 0) {
2223 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002224 }
bellard33417e72003-08-10 21:47:01 +00002225 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002226
2227 data->start = (new_prot ? end : -1ul);
2228 data->prot = new_prot;
2229
2230 return 0;
2231}
2232
2233static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2234 unsigned long base, int level, void **lp)
2235{
2236 unsigned long pa;
2237 int i, rc;
2238
2239 if (*lp == NULL) {
2240 return walk_memory_regions_end(data, base, 0);
2241 }
2242
2243 if (level == 0) {
2244 PageDesc *pd = *lp;
2245 for (i = 0; i < L2_BITS; ++i) {
2246 int prot = pd[i].flags;
2247
2248 pa = base | (i << TARGET_PAGE_BITS);
2249 if (prot != data->prot) {
2250 rc = walk_memory_regions_end(data, pa, prot);
2251 if (rc != 0) {
2252 return rc;
2253 }
2254 }
2255 }
2256 } else {
2257 void **pp = *lp;
2258 for (i = 0; i < L2_BITS; ++i) {
2259 pa = base | (i << (TARGET_PAGE_BITS + L2_BITS * level));
2260 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2261 if (rc != 0) {
2262 return rc;
2263 }
2264 }
2265 }
2266
2267 return 0;
2268}
2269
2270int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2271{
2272 struct walk_memory_regions_data data;
2273 unsigned long i;
2274
2275 data.fn = fn;
2276 data.priv = priv;
2277 data.start = -1ul;
2278 data.prot = 0;
2279
2280 for (i = 0; i < V_L1_SIZE; i++) {
2281 int rc = walk_memory_regions_1(&data, i << V_L1_SHIFT,
2282 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2283 if (rc != 0) {
2284 return rc;
2285 }
2286 }
2287
2288 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002289}
2290
2291static int dump_region(void *priv, unsigned long start,
2292 unsigned long end, unsigned long prot)
2293{
2294 FILE *f = (FILE *)priv;
2295
2296 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2297 start, end, end - start,
2298 ((prot & PAGE_READ) ? 'r' : '-'),
2299 ((prot & PAGE_WRITE) ? 'w' : '-'),
2300 ((prot & PAGE_EXEC) ? 'x' : '-'));
2301
2302 return (0);
2303}
2304
2305/* dump memory mappings */
2306void page_dump(FILE *f)
2307{
2308 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2309 "start", "end", "size", "prot");
2310 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002311}
2312
pbrook53a59602006-03-25 19:31:22 +00002313int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002314{
bellard9fa3e852004-01-04 18:06:42 +00002315 PageDesc *p;
2316
2317 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002318 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002319 return 0;
2320 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002321}
2322
Richard Henderson376a7902010-03-10 15:57:04 -08002323/* Modify the flags of a page and invalidate the code if necessary.
2324 The flag PAGE_WRITE_ORG is positioned automatically depending
2325 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002326void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002327{
Richard Henderson376a7902010-03-10 15:57:04 -08002328 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002329
Richard Henderson376a7902010-03-10 15:57:04 -08002330 /* This function should never be called with addresses outside the
2331 guest address space. If this assert fires, it probably indicates
2332 a missing call to h2g_valid. */
2333#if HOST_LONG_BITS > L1_MAP_ADDR_SPACE_BITS
2334 assert(end < (1ul << L1_MAP_ADDR_SPACE_BITS));
2335#endif
2336 assert(start < end);
2337
bellard9fa3e852004-01-04 18:06:42 +00002338 start = start & TARGET_PAGE_MASK;
2339 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002340
2341 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002342 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002343 }
2344
2345 for (addr = start, len = end - start;
2346 len != 0;
2347 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2348 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2349
2350 /* If the write protection bit is set, then we invalidate
2351 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002352 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002353 (flags & PAGE_WRITE) &&
2354 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002355 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002356 }
2357 p->flags = flags;
2358 }
bellard9fa3e852004-01-04 18:06:42 +00002359}
2360
ths3d97b402007-11-02 19:02:07 +00002361int page_check_range(target_ulong start, target_ulong len, int flags)
2362{
2363 PageDesc *p;
2364 target_ulong end;
2365 target_ulong addr;
2366
Richard Henderson376a7902010-03-10 15:57:04 -08002367 /* This function should never be called with addresses outside the
2368 guest address space. If this assert fires, it probably indicates
2369 a missing call to h2g_valid. */
2370#if HOST_LONG_BITS > L1_MAP_ADDR_SPACE_BITS
2371 assert(start < (1ul << L1_MAP_ADDR_SPACE_BITS));
2372#endif
2373
2374 if (start + len - 1 < start) {
2375 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002376 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002377 }
balrog55f280c2008-10-28 10:24:11 +00002378
ths3d97b402007-11-02 19:02:07 +00002379 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2380 start = start & TARGET_PAGE_MASK;
2381
Richard Henderson376a7902010-03-10 15:57:04 -08002382 for (addr = start, len = end - start;
2383 len != 0;
2384 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002385 p = page_find(addr >> TARGET_PAGE_BITS);
2386 if( !p )
2387 return -1;
2388 if( !(p->flags & PAGE_VALID) )
2389 return -1;
2390
bellarddae32702007-11-14 10:51:00 +00002391 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002392 return -1;
bellarddae32702007-11-14 10:51:00 +00002393 if (flags & PAGE_WRITE) {
2394 if (!(p->flags & PAGE_WRITE_ORG))
2395 return -1;
2396 /* unprotect the page if it was put read-only because it
2397 contains translated code */
2398 if (!(p->flags & PAGE_WRITE)) {
2399 if (!page_unprotect(addr, 0, NULL))
2400 return -1;
2401 }
2402 return 0;
2403 }
ths3d97b402007-11-02 19:02:07 +00002404 }
2405 return 0;
2406}
2407
bellard9fa3e852004-01-04 18:06:42 +00002408/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002409 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002410int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002411{
2412 unsigned int page_index, prot, pindex;
2413 PageDesc *p, *p1;
pbrook53a59602006-03-25 19:31:22 +00002414 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002415
pbrookc8a706f2008-06-02 16:16:42 +00002416 /* Technically this isn't safe inside a signal handler. However we
2417 know this only ever happens in a synchronous SEGV handler, so in
2418 practice it seems to be ok. */
2419 mmap_lock();
2420
bellard83fb7ad2004-07-05 21:25:26 +00002421 host_start = address & qemu_host_page_mask;
bellard9fa3e852004-01-04 18:06:42 +00002422 page_index = host_start >> TARGET_PAGE_BITS;
2423 p1 = page_find(page_index);
pbrookc8a706f2008-06-02 16:16:42 +00002424 if (!p1) {
2425 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002426 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002427 }
bellard83fb7ad2004-07-05 21:25:26 +00002428 host_end = host_start + qemu_host_page_size;
bellard9fa3e852004-01-04 18:06:42 +00002429 p = p1;
2430 prot = 0;
2431 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2432 prot |= p->flags;
2433 p++;
2434 }
2435 /* if the page was really writable, then we change its
2436 protection back to writable */
2437 if (prot & PAGE_WRITE_ORG) {
2438 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2439 if (!(p1[pindex].flags & PAGE_WRITE)) {
ths5fafdf22007-09-16 21:08:06 +00002440 mprotect((void *)g2h(host_start), qemu_host_page_size,
bellard9fa3e852004-01-04 18:06:42 +00002441 (prot & PAGE_BITS) | PAGE_WRITE);
2442 p1[pindex].flags |= PAGE_WRITE;
2443 /* and since the content will be modified, we must invalidate
2444 the corresponding translated code. */
bellardd720b932004-04-25 17:57:43 +00002445 tb_invalidate_phys_page(address, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002446#ifdef DEBUG_TB_CHECK
2447 tb_invalidate_check(address);
2448#endif
pbrookc8a706f2008-06-02 16:16:42 +00002449 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002450 return 1;
2451 }
2452 }
pbrookc8a706f2008-06-02 16:16:42 +00002453 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002454 return 0;
2455}
2456
bellard6a00d602005-11-21 23:25:50 +00002457static inline void tlb_set_dirty(CPUState *env,
2458 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002459{
2460}
bellard9fa3e852004-01-04 18:06:42 +00002461#endif /* defined(CONFIG_USER_ONLY) */
2462
pbrooke2eef172008-06-08 01:09:01 +00002463#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002464
Paul Brookc04b2b72010-03-01 03:31:14 +00002465#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2466typedef struct subpage_t {
2467 target_phys_addr_t base;
2468 CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
2469 CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
2470 void *opaque[TARGET_PAGE_SIZE][2][4];
2471 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
2472} subpage_t;
2473
Anthony Liguoric227f092009-10-01 16:12:16 -05002474static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2475 ram_addr_t memory, ram_addr_t region_offset);
2476static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2477 ram_addr_t orig_memory, ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002478#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2479 need_subpage) \
2480 do { \
2481 if (addr > start_addr) \
2482 start_addr2 = 0; \
2483 else { \
2484 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2485 if (start_addr2 > 0) \
2486 need_subpage = 1; \
2487 } \
2488 \
blueswir149e9fba2007-05-30 17:25:06 +00002489 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002490 end_addr2 = TARGET_PAGE_SIZE - 1; \
2491 else { \
2492 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2493 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2494 need_subpage = 1; \
2495 } \
2496 } while (0)
2497
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002498/* register physical memory.
2499 For RAM, 'size' must be a multiple of the target page size.
2500 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002501 io memory page. The address used when calling the IO function is
2502 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002503 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002504 before calculating this offset. This should not be a problem unless
2505 the low bits of start_addr and region_offset differ. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002506void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2507 ram_addr_t size,
2508 ram_addr_t phys_offset,
2509 ram_addr_t region_offset)
bellard33417e72003-08-10 21:47:01 +00002510{
Anthony Liguoric227f092009-10-01 16:12:16 -05002511 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002512 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002513 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002514 ram_addr_t orig_size = size;
blueswir1db7b5422007-05-26 17:36:03 +00002515 void *subpage;
bellard33417e72003-08-10 21:47:01 +00002516
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002517 cpu_notify_set_memory(start_addr, size, phys_offset);
2518
pbrook67c4d232009-02-23 13:16:07 +00002519 if (phys_offset == IO_MEM_UNASSIGNED) {
2520 region_offset = start_addr;
2521 }
pbrook8da3ff12008-12-01 18:59:50 +00002522 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002523 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002524 end_addr = start_addr + (target_phys_addr_t)size;
blueswir149e9fba2007-05-30 17:25:06 +00002525 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
blueswir1db7b5422007-05-26 17:36:03 +00002526 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2527 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002528 ram_addr_t orig_memory = p->phys_offset;
2529 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002530 int need_subpage = 0;
2531
2532 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2533 need_subpage);
blueswir14254fab2008-01-01 16:57:19 +00002534 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
blueswir1db7b5422007-05-26 17:36:03 +00002535 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2536 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002537 &p->phys_offset, orig_memory,
2538 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002539 } else {
2540 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2541 >> IO_MEM_SHIFT];
2542 }
pbrook8da3ff12008-12-01 18:59:50 +00002543 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2544 region_offset);
2545 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002546 } else {
2547 p->phys_offset = phys_offset;
2548 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2549 (phys_offset & IO_MEM_ROMD))
2550 phys_offset += TARGET_PAGE_SIZE;
2551 }
2552 } else {
2553 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2554 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002555 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002556 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002557 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002558 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002559 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002560 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002561 int need_subpage = 0;
2562
2563 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2564 end_addr2, need_subpage);
2565
blueswir14254fab2008-01-01 16:57:19 +00002566 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
blueswir1db7b5422007-05-26 17:36:03 +00002567 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002568 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002569 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002570 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002571 phys_offset, region_offset);
2572 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002573 }
2574 }
2575 }
pbrook8da3ff12008-12-01 18:59:50 +00002576 region_offset += TARGET_PAGE_SIZE;
bellard33417e72003-08-10 21:47:01 +00002577 }
ths3b46e622007-09-17 08:09:54 +00002578
bellard9d420372006-06-25 22:25:22 +00002579 /* since each CPU stores ram addresses in its TLB cache, we must
2580 reset the modified entries */
2581 /* XXX: slow ! */
2582 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2583 tlb_flush(env, 1);
2584 }
bellard33417e72003-08-10 21:47:01 +00002585}
2586
bellardba863452006-09-24 18:41:10 +00002587/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002588ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002589{
2590 PhysPageDesc *p;
2591
2592 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2593 if (!p)
2594 return IO_MEM_UNASSIGNED;
2595 return p->phys_offset;
2596}
2597
Anthony Liguoric227f092009-10-01 16:12:16 -05002598void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002599{
2600 if (kvm_enabled())
2601 kvm_coalesce_mmio_region(addr, size);
2602}
2603
Anthony Liguoric227f092009-10-01 16:12:16 -05002604void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002605{
2606 if (kvm_enabled())
2607 kvm_uncoalesce_mmio_region(addr, size);
2608}
2609
Sheng Yang62a27442010-01-26 19:21:16 +08002610void qemu_flush_coalesced_mmio_buffer(void)
2611{
2612 if (kvm_enabled())
2613 kvm_flush_coalesced_mmio_buffer();
2614}
2615
Marcelo Tosattic9027602010-03-01 20:25:08 -03002616#if defined(__linux__) && !defined(TARGET_S390X)
2617
2618#include <sys/vfs.h>
2619
2620#define HUGETLBFS_MAGIC 0x958458f6
2621
2622static long gethugepagesize(const char *path)
2623{
2624 struct statfs fs;
2625 int ret;
2626
2627 do {
2628 ret = statfs(path, &fs);
2629 } while (ret != 0 && errno == EINTR);
2630
2631 if (ret != 0) {
2632 perror("statfs");
2633 return 0;
2634 }
2635
2636 if (fs.f_type != HUGETLBFS_MAGIC)
2637 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2638
2639 return fs.f_bsize;
2640}
2641
2642static void *file_ram_alloc(ram_addr_t memory, const char *path)
2643{
2644 char *filename;
2645 void *area;
2646 int fd;
2647#ifdef MAP_POPULATE
2648 int flags;
2649#endif
2650 unsigned long hpagesize;
2651
2652 hpagesize = gethugepagesize(path);
2653 if (!hpagesize) {
2654 return NULL;
2655 }
2656
2657 if (memory < hpagesize) {
2658 return NULL;
2659 }
2660
2661 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2662 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2663 return NULL;
2664 }
2665
2666 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2667 return NULL;
2668 }
2669
2670 fd = mkstemp(filename);
2671 if (fd < 0) {
2672 perror("mkstemp");
2673 free(filename);
2674 return NULL;
2675 }
2676 unlink(filename);
2677 free(filename);
2678
2679 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2680
2681 /*
2682 * ftruncate is not supported by hugetlbfs in older
2683 * hosts, so don't bother bailing out on errors.
2684 * If anything goes wrong with it under other filesystems,
2685 * mmap will fail.
2686 */
2687 if (ftruncate(fd, memory))
2688 perror("ftruncate");
2689
2690#ifdef MAP_POPULATE
2691 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2692 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2693 * to sidestep this quirk.
2694 */
2695 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2696 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2697#else
2698 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2699#endif
2700 if (area == MAP_FAILED) {
2701 perror("file_ram_alloc: can't mmap RAM pages");
2702 close(fd);
2703 return (NULL);
2704 }
2705 return area;
2706}
2707#endif
2708
Anthony Liguoric227f092009-10-01 16:12:16 -05002709ram_addr_t qemu_ram_alloc(ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002710{
2711 RAMBlock *new_block;
2712
pbrook94a6b542009-04-11 17:15:54 +00002713 size = TARGET_PAGE_ALIGN(size);
2714 new_block = qemu_malloc(sizeof(*new_block));
2715
Marcelo Tosattic9027602010-03-01 20:25:08 -03002716 if (mem_path) {
2717#if defined (__linux__) && !defined(TARGET_S390X)
2718 new_block->host = file_ram_alloc(size, mem_path);
2719 if (!new_block->host)
2720 exit(1);
Alexander Graf6b024942009-12-05 12:44:25 +01002721#else
Marcelo Tosattic9027602010-03-01 20:25:08 -03002722 fprintf(stderr, "-mem-path option unsupported\n");
2723 exit(1);
2724#endif
2725 } else {
2726#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2727 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2728 new_block->host = mmap((void*)0x1000000, size,
2729 PROT_EXEC|PROT_READ|PROT_WRITE,
2730 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2731#else
2732 new_block->host = qemu_vmalloc(size);
Alexander Graf6b024942009-12-05 12:44:25 +01002733#endif
Izik Eidusccb167e2009-10-08 16:39:39 +02002734#ifdef MADV_MERGEABLE
Marcelo Tosattic9027602010-03-01 20:25:08 -03002735 madvise(new_block->host, size, MADV_MERGEABLE);
Izik Eidusccb167e2009-10-08 16:39:39 +02002736#endif
Marcelo Tosattic9027602010-03-01 20:25:08 -03002737 }
pbrook94a6b542009-04-11 17:15:54 +00002738 new_block->offset = last_ram_offset;
2739 new_block->length = size;
2740
2741 new_block->next = ram_blocks;
2742 ram_blocks = new_block;
2743
2744 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2745 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2746 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2747 0xff, size >> TARGET_PAGE_BITS);
2748
2749 last_ram_offset += size;
2750
Jan Kiszka6f0437e2009-04-26 18:03:40 +02002751 if (kvm_enabled())
2752 kvm_setup_guest_memory(new_block->host, size);
2753
pbrook94a6b542009-04-11 17:15:54 +00002754 return new_block->offset;
2755}
bellarde9a1ab12007-02-08 23:08:38 +00002756
Anthony Liguoric227f092009-10-01 16:12:16 -05002757void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002758{
pbrook94a6b542009-04-11 17:15:54 +00002759 /* TODO: implement this. */
bellarde9a1ab12007-02-08 23:08:38 +00002760}
2761
pbrookdc828ca2009-04-09 22:21:07 +00002762/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002763 With the exception of the softmmu code in this file, this should
2764 only be used for local memory (e.g. video ram) that the device owns,
2765 and knows it isn't going to access beyond the end of the block.
2766
2767 It should not be used for general purpose DMA.
2768 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2769 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002770void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002771{
pbrook94a6b542009-04-11 17:15:54 +00002772 RAMBlock *prev;
2773 RAMBlock **prevp;
2774 RAMBlock *block;
2775
pbrook94a6b542009-04-11 17:15:54 +00002776 prev = NULL;
2777 prevp = &ram_blocks;
2778 block = ram_blocks;
2779 while (block && (block->offset > addr
2780 || block->offset + block->length <= addr)) {
2781 if (prev)
2782 prevp = &prev->next;
2783 prev = block;
2784 block = block->next;
2785 }
2786 if (!block) {
2787 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2788 abort();
2789 }
2790 /* Move this entry to to start of the list. */
2791 if (prev) {
2792 prev->next = block->next;
2793 block->next = *prevp;
2794 *prevp = block;
2795 }
2796 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00002797}
2798
pbrook5579c7f2009-04-11 14:47:08 +00002799/* Some of the softmmu routines need to translate from a host pointer
2800 (typically a TLB entry) back to a ram offset. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002801ram_addr_t qemu_ram_addr_from_host(void *ptr)
pbrook5579c7f2009-04-11 14:47:08 +00002802{
pbrook94a6b542009-04-11 17:15:54 +00002803 RAMBlock *prev;
pbrook94a6b542009-04-11 17:15:54 +00002804 RAMBlock *block;
2805 uint8_t *host = ptr;
2806
pbrook94a6b542009-04-11 17:15:54 +00002807 prev = NULL;
pbrook94a6b542009-04-11 17:15:54 +00002808 block = ram_blocks;
2809 while (block && (block->host > host
2810 || block->host + block->length <= host)) {
pbrook94a6b542009-04-11 17:15:54 +00002811 prev = block;
2812 block = block->next;
2813 }
2814 if (!block) {
2815 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2816 abort();
2817 }
2818 return block->offset + (host - block->host);
pbrook5579c7f2009-04-11 14:47:08 +00002819}
2820
Anthony Liguoric227f092009-10-01 16:12:16 -05002821static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00002822{
pbrook67d3b952006-12-18 05:03:52 +00002823#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002824 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002825#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002826#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002827 do_unassigned_access(addr, 0, 0, 0, 1);
2828#endif
2829 return 0;
2830}
2831
Anthony Liguoric227f092009-10-01 16:12:16 -05002832static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002833{
2834#ifdef DEBUG_UNASSIGNED
2835 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2836#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002837#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002838 do_unassigned_access(addr, 0, 0, 0, 2);
2839#endif
2840 return 0;
2841}
2842
Anthony Liguoric227f092009-10-01 16:12:16 -05002843static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00002844{
2845#ifdef DEBUG_UNASSIGNED
2846 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2847#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002848#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002849 do_unassigned_access(addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002850#endif
bellard33417e72003-08-10 21:47:01 +00002851 return 0;
2852}
2853
Anthony Liguoric227f092009-10-01 16:12:16 -05002854static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00002855{
pbrook67d3b952006-12-18 05:03:52 +00002856#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002857 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00002858#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002859#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002860 do_unassigned_access(addr, 1, 0, 0, 1);
2861#endif
2862}
2863
Anthony Liguoric227f092009-10-01 16:12:16 -05002864static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002865{
2866#ifdef DEBUG_UNASSIGNED
2867 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2868#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002869#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002870 do_unassigned_access(addr, 1, 0, 0, 2);
2871#endif
2872}
2873
Anthony Liguoric227f092009-10-01 16:12:16 -05002874static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00002875{
2876#ifdef DEBUG_UNASSIGNED
2877 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2878#endif
Edgar E. Iglesiasfaed1c22009-09-03 13:25:09 +02002879#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00002880 do_unassigned_access(addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00002881#endif
bellard33417e72003-08-10 21:47:01 +00002882}
2883
Blue Swirld60efc62009-08-25 18:29:31 +00002884static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00002885 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00002886 unassigned_mem_readw,
2887 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00002888};
2889
Blue Swirld60efc62009-08-25 18:29:31 +00002890static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00002891 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00002892 unassigned_mem_writew,
2893 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00002894};
2895
Anthony Liguoric227f092009-10-01 16:12:16 -05002896static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002897 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002898{
bellard3a7d9292005-08-21 09:26:42 +00002899 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002900 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2901 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2902#if !defined(CONFIG_USER_ONLY)
2903 tb_invalidate_phys_page_fast(ram_addr, 1);
2904 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2905#endif
2906 }
pbrook5579c7f2009-04-11 14:47:08 +00002907 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002908 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2909 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2910 /* we remove the notdirty callback only if the code has been
2911 flushed */
2912 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002913 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002914}
2915
Anthony Liguoric227f092009-10-01 16:12:16 -05002916static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002917 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002918{
bellard3a7d9292005-08-21 09:26:42 +00002919 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002920 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2921 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2922#if !defined(CONFIG_USER_ONLY)
2923 tb_invalidate_phys_page_fast(ram_addr, 2);
2924 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2925#endif
2926 }
pbrook5579c7f2009-04-11 14:47:08 +00002927 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002928 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2929 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2930 /* we remove the notdirty callback only if the code has been
2931 flushed */
2932 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002933 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002934}
2935
Anthony Liguoric227f092009-10-01 16:12:16 -05002936static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00002937 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00002938{
bellard3a7d9292005-08-21 09:26:42 +00002939 int dirty_flags;
bellard3a7d9292005-08-21 09:26:42 +00002940 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2941 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2942#if !defined(CONFIG_USER_ONLY)
2943 tb_invalidate_phys_page_fast(ram_addr, 4);
2944 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2945#endif
2946 }
pbrook5579c7f2009-04-11 14:47:08 +00002947 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00002948 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2949 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2950 /* we remove the notdirty callback only if the code has been
2951 flushed */
2952 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002953 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002954}
2955
Blue Swirld60efc62009-08-25 18:29:31 +00002956static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00002957 NULL, /* never used */
2958 NULL, /* never used */
2959 NULL, /* never used */
2960};
2961
Blue Swirld60efc62009-08-25 18:29:31 +00002962static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00002963 notdirty_mem_writeb,
2964 notdirty_mem_writew,
2965 notdirty_mem_writel,
2966};
2967
pbrook0f459d12008-06-09 00:20:13 +00002968/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002969static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002970{
2971 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002972 target_ulong pc, cs_base;
2973 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002974 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002975 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002976 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002977
aliguori06d55cc2008-11-18 20:24:06 +00002978 if (env->watchpoint_hit) {
2979 /* We re-entered the check after replacing the TB. Now raise
2980 * the debug interrupt so that is will trigger after the
2981 * current instruction. */
2982 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2983 return;
2984 }
pbrook2e70f6e2008-06-29 01:03:05 +00002985 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002986 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002987 if ((vaddr == (wp->vaddr & len_mask) ||
2988 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002989 wp->flags |= BP_WATCHPOINT_HIT;
2990 if (!env->watchpoint_hit) {
2991 env->watchpoint_hit = wp;
2992 tb = tb_find_pc(env->mem_io_pc);
2993 if (!tb) {
2994 cpu_abort(env, "check_watchpoint: could not find TB for "
2995 "pc=%p", (void *)env->mem_io_pc);
2996 }
2997 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2998 tb_phys_invalidate(tb, -1);
2999 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3000 env->exception_index = EXCP_DEBUG;
3001 } else {
3002 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3003 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3004 }
3005 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003006 }
aliguori6e140f22008-11-18 20:37:55 +00003007 } else {
3008 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003009 }
3010 }
3011}
3012
pbrook6658ffb2007-03-16 23:58:11 +00003013/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3014 so these check for a hit then pass through to the normal out-of-line
3015 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003016static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003017{
aliguorib4051332008-11-18 20:14:20 +00003018 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003019 return ldub_phys(addr);
3020}
3021
Anthony Liguoric227f092009-10-01 16:12:16 -05003022static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003023{
aliguorib4051332008-11-18 20:14:20 +00003024 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003025 return lduw_phys(addr);
3026}
3027
Anthony Liguoric227f092009-10-01 16:12:16 -05003028static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003029{
aliguorib4051332008-11-18 20:14:20 +00003030 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003031 return ldl_phys(addr);
3032}
3033
Anthony Liguoric227f092009-10-01 16:12:16 -05003034static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003035 uint32_t val)
3036{
aliguorib4051332008-11-18 20:14:20 +00003037 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003038 stb_phys(addr, val);
3039}
3040
Anthony Liguoric227f092009-10-01 16:12:16 -05003041static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003042 uint32_t val)
3043{
aliguorib4051332008-11-18 20:14:20 +00003044 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003045 stw_phys(addr, val);
3046}
3047
Anthony Liguoric227f092009-10-01 16:12:16 -05003048static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003049 uint32_t val)
3050{
aliguorib4051332008-11-18 20:14:20 +00003051 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003052 stl_phys(addr, val);
3053}
3054
Blue Swirld60efc62009-08-25 18:29:31 +00003055static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003056 watch_mem_readb,
3057 watch_mem_readw,
3058 watch_mem_readl,
3059};
3060
Blue Swirld60efc62009-08-25 18:29:31 +00003061static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003062 watch_mem_writeb,
3063 watch_mem_writew,
3064 watch_mem_writel,
3065};
pbrook6658ffb2007-03-16 23:58:11 +00003066
Anthony Liguoric227f092009-10-01 16:12:16 -05003067static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003068 unsigned int len)
3069{
blueswir1db7b5422007-05-26 17:36:03 +00003070 uint32_t ret;
3071 unsigned int idx;
3072
pbrook8da3ff12008-12-01 18:59:50 +00003073 idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003074#if defined(DEBUG_SUBPAGE)
3075 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3076 mmio, len, addr, idx);
3077#endif
pbrook8da3ff12008-12-01 18:59:50 +00003078 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
3079 addr + mmio->region_offset[idx][0][len]);
blueswir1db7b5422007-05-26 17:36:03 +00003080
3081 return ret;
3082}
3083
Anthony Liguoric227f092009-10-01 16:12:16 -05003084static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003085 uint32_t value, unsigned int len)
3086{
blueswir1db7b5422007-05-26 17:36:03 +00003087 unsigned int idx;
3088
pbrook8da3ff12008-12-01 18:59:50 +00003089 idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003090#if defined(DEBUG_SUBPAGE)
3091 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3092 mmio, len, addr, idx, value);
3093#endif
pbrook8da3ff12008-12-01 18:59:50 +00003094 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
3095 addr + mmio->region_offset[idx][1][len],
3096 value);
blueswir1db7b5422007-05-26 17:36:03 +00003097}
3098
Anthony Liguoric227f092009-10-01 16:12:16 -05003099static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003100{
3101#if defined(DEBUG_SUBPAGE)
3102 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3103#endif
3104
3105 return subpage_readlen(opaque, addr, 0);
3106}
3107
Anthony Liguoric227f092009-10-01 16:12:16 -05003108static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003109 uint32_t value)
3110{
3111#if defined(DEBUG_SUBPAGE)
3112 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3113#endif
3114 subpage_writelen(opaque, addr, value, 0);
3115}
3116
Anthony Liguoric227f092009-10-01 16:12:16 -05003117static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003118{
3119#if defined(DEBUG_SUBPAGE)
3120 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3121#endif
3122
3123 return subpage_readlen(opaque, addr, 1);
3124}
3125
Anthony Liguoric227f092009-10-01 16:12:16 -05003126static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003127 uint32_t value)
3128{
3129#if defined(DEBUG_SUBPAGE)
3130 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3131#endif
3132 subpage_writelen(opaque, addr, value, 1);
3133}
3134
Anthony Liguoric227f092009-10-01 16:12:16 -05003135static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003136{
3137#if defined(DEBUG_SUBPAGE)
3138 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3139#endif
3140
3141 return subpage_readlen(opaque, addr, 2);
3142}
3143
3144static void subpage_writel (void *opaque,
Anthony Liguoric227f092009-10-01 16:12:16 -05003145 target_phys_addr_t addr, uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003146{
3147#if defined(DEBUG_SUBPAGE)
3148 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3149#endif
3150 subpage_writelen(opaque, addr, value, 2);
3151}
3152
Blue Swirld60efc62009-08-25 18:29:31 +00003153static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003154 &subpage_readb,
3155 &subpage_readw,
3156 &subpage_readl,
3157};
3158
Blue Swirld60efc62009-08-25 18:29:31 +00003159static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003160 &subpage_writeb,
3161 &subpage_writew,
3162 &subpage_writel,
3163};
3164
Anthony Liguoric227f092009-10-01 16:12:16 -05003165static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3166 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003167{
3168 int idx, eidx;
blueswir14254fab2008-01-01 16:57:19 +00003169 unsigned int i;
blueswir1db7b5422007-05-26 17:36:03 +00003170
3171 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3172 return -1;
3173 idx = SUBPAGE_IDX(start);
3174 eidx = SUBPAGE_IDX(end);
3175#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003176 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003177 mmio, start, end, idx, eidx, memory);
3178#endif
3179 memory >>= IO_MEM_SHIFT;
3180 for (; idx <= eidx; idx++) {
blueswir14254fab2008-01-01 16:57:19 +00003181 for (i = 0; i < 4; i++) {
blueswir13ee89922008-01-02 19:45:26 +00003182 if (io_mem_read[memory][i]) {
3183 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3184 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
pbrook8da3ff12008-12-01 18:59:50 +00003185 mmio->region_offset[idx][0][i] = region_offset;
blueswir13ee89922008-01-02 19:45:26 +00003186 }
3187 if (io_mem_write[memory][i]) {
3188 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3189 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
pbrook8da3ff12008-12-01 18:59:50 +00003190 mmio->region_offset[idx][1][i] = region_offset;
blueswir13ee89922008-01-02 19:45:26 +00003191 }
blueswir14254fab2008-01-01 16:57:19 +00003192 }
blueswir1db7b5422007-05-26 17:36:03 +00003193 }
3194
3195 return 0;
3196}
3197
Anthony Liguoric227f092009-10-01 16:12:16 -05003198static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3199 ram_addr_t orig_memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003200{
Anthony Liguoric227f092009-10-01 16:12:16 -05003201 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003202 int subpage_memory;
3203
Anthony Liguoric227f092009-10-01 16:12:16 -05003204 mmio = qemu_mallocz(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003205
3206 mmio->base = base;
Avi Kivity1eed09c2009-06-14 11:38:51 +03003207 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
blueswir1db7b5422007-05-26 17:36:03 +00003208#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003209 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3210 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003211#endif
aliguori1eec6142009-02-05 22:06:18 +00003212 *phys = subpage_memory | IO_MEM_SUBPAGE;
3213 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
pbrook8da3ff12008-12-01 18:59:50 +00003214 region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003215
3216 return mmio;
3217}
3218
aliguori88715652009-02-11 15:20:58 +00003219static int get_free_io_mem_idx(void)
3220{
3221 int i;
3222
3223 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3224 if (!io_mem_used[i]) {
3225 io_mem_used[i] = 1;
3226 return i;
3227 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003228 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003229 return -1;
3230}
3231
bellard33417e72003-08-10 21:47:01 +00003232/* mem_read and mem_write are arrays of functions containing the
3233 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003234 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003235 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003236 modified. If it is zero, a new io zone is allocated. The return
3237 value can be used with cpu_register_physical_memory(). (-1) is
3238 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003239static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003240 CPUReadMemoryFunc * const *mem_read,
3241 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03003242 void *opaque)
bellard33417e72003-08-10 21:47:01 +00003243{
blueswir14254fab2008-01-01 16:57:19 +00003244 int i, subwidth = 0;
bellard33417e72003-08-10 21:47:01 +00003245
3246 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003247 io_index = get_free_io_mem_idx();
3248 if (io_index == -1)
3249 return io_index;
bellard33417e72003-08-10 21:47:01 +00003250 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003251 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003252 if (io_index >= IO_MEM_NB_ENTRIES)
3253 return -1;
3254 }
bellardb5ff1b32005-11-26 10:38:39 +00003255
bellard33417e72003-08-10 21:47:01 +00003256 for(i = 0;i < 3; i++) {
blueswir14254fab2008-01-01 16:57:19 +00003257 if (!mem_read[i] || !mem_write[i])
3258 subwidth = IO_MEM_SUBWIDTH;
bellard33417e72003-08-10 21:47:01 +00003259 io_mem_read[io_index][i] = mem_read[i];
3260 io_mem_write[io_index][i] = mem_write[i];
3261 }
bellarda4193c82004-06-03 14:01:43 +00003262 io_mem_opaque[io_index] = opaque;
blueswir14254fab2008-01-01 16:57:19 +00003263 return (io_index << IO_MEM_SHIFT) | subwidth;
bellard33417e72003-08-10 21:47:01 +00003264}
bellard61382a52003-10-27 21:22:23 +00003265
Blue Swirld60efc62009-08-25 18:29:31 +00003266int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3267 CPUWriteMemoryFunc * const *mem_write,
Avi Kivity1eed09c2009-06-14 11:38:51 +03003268 void *opaque)
3269{
3270 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3271}
3272
aliguori88715652009-02-11 15:20:58 +00003273void cpu_unregister_io_memory(int io_table_address)
3274{
3275 int i;
3276 int io_index = io_table_address >> IO_MEM_SHIFT;
3277
3278 for (i=0;i < 3; i++) {
3279 io_mem_read[io_index][i] = unassigned_mem_read[i];
3280 io_mem_write[io_index][i] = unassigned_mem_write[i];
3281 }
3282 io_mem_opaque[io_index] = NULL;
3283 io_mem_used[io_index] = 0;
3284}
3285
Avi Kivitye9179ce2009-06-14 11:38:52 +03003286static void io_mem_init(void)
3287{
3288 int i;
3289
3290 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3291 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3292 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3293 for (i=0; i<5; i++)
3294 io_mem_used[i] = 1;
3295
3296 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3297 watch_mem_write, NULL);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003298}
3299
pbrooke2eef172008-06-08 01:09:01 +00003300#endif /* !defined(CONFIG_USER_ONLY) */
3301
bellard13eb76e2004-01-24 15:23:36 +00003302/* physical memory access (slow version, mainly for debug) */
3303#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003304int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3305 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003306{
3307 int l, flags;
3308 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003309 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003310
3311 while (len > 0) {
3312 page = addr & TARGET_PAGE_MASK;
3313 l = (page + TARGET_PAGE_SIZE) - addr;
3314 if (l > len)
3315 l = len;
3316 flags = page_get_flags(page);
3317 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003318 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003319 if (is_write) {
3320 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003321 return -1;
bellard579a97f2007-11-11 14:26:47 +00003322 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003323 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003324 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003325 memcpy(p, buf, l);
3326 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003327 } else {
3328 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003329 return -1;
bellard579a97f2007-11-11 14:26:47 +00003330 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003331 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003332 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003333 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003334 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003335 }
3336 len -= l;
3337 buf += l;
3338 addr += l;
3339 }
Paul Brooka68fe892010-03-01 00:08:59 +00003340 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003341}
bellard8df1cd02005-01-28 22:37:22 +00003342
bellard13eb76e2004-01-24 15:23:36 +00003343#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003344void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003345 int len, int is_write)
3346{
3347 int l, io_index;
3348 uint8_t *ptr;
3349 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003350 target_phys_addr_t page;
bellard2e126692004-04-25 21:28:44 +00003351 unsigned long pd;
bellard92e873b2004-05-21 14:52:29 +00003352 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003353
bellard13eb76e2004-01-24 15:23:36 +00003354 while (len > 0) {
3355 page = addr & TARGET_PAGE_MASK;
3356 l = (page + TARGET_PAGE_SIZE) - addr;
3357 if (l > len)
3358 l = len;
bellard92e873b2004-05-21 14:52:29 +00003359 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003360 if (!p) {
3361 pd = IO_MEM_UNASSIGNED;
3362 } else {
3363 pd = p->phys_offset;
3364 }
ths3b46e622007-09-17 08:09:54 +00003365
bellard13eb76e2004-01-24 15:23:36 +00003366 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003367 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003368 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003369 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003370 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003371 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003372 /* XXX: could force cpu_single_env to NULL to avoid
3373 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003374 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003375 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003376 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003377 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003378 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003379 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003380 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003381 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003382 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003383 l = 2;
3384 } else {
bellard1c213d12005-09-03 10:49:04 +00003385 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003386 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003387 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003388 l = 1;
3389 }
3390 } else {
bellardb448f2f2004-02-25 23:24:04 +00003391 unsigned long addr1;
3392 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003393 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003394 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003395 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003396 if (!cpu_physical_memory_is_dirty(addr1)) {
3397 /* invalidate code */
3398 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3399 /* set dirty bit */
ths5fafdf22007-09-16 21:08:06 +00003400 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
bellardf23db162005-08-21 19:12:28 +00003401 (0xff & ~CODE_DIRTY_FLAG);
bellard3a7d9292005-08-21 09:26:42 +00003402 }
bellard13eb76e2004-01-24 15:23:36 +00003403 }
3404 } else {
ths5fafdf22007-09-16 21:08:06 +00003405 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003406 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003407 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003408 /* I/O case */
3409 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003410 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003411 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3412 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003413 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003414 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003415 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003416 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003417 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003418 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003419 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003420 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003421 l = 2;
3422 } else {
bellard1c213d12005-09-03 10:49:04 +00003423 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003424 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003425 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003426 l = 1;
3427 }
3428 } else {
3429 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003430 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard13eb76e2004-01-24 15:23:36 +00003431 (addr & ~TARGET_PAGE_MASK);
3432 memcpy(buf, ptr, l);
3433 }
3434 }
3435 len -= l;
3436 buf += l;
3437 addr += l;
3438 }
3439}
bellard8df1cd02005-01-28 22:37:22 +00003440
bellardd0ecd2a2006-04-23 17:14:48 +00003441/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003442void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003443 const uint8_t *buf, int len)
3444{
3445 int l;
3446 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003447 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003448 unsigned long pd;
3449 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003450
bellardd0ecd2a2006-04-23 17:14:48 +00003451 while (len > 0) {
3452 page = addr & TARGET_PAGE_MASK;
3453 l = (page + TARGET_PAGE_SIZE) - addr;
3454 if (l > len)
3455 l = len;
3456 p = phys_page_find(page >> TARGET_PAGE_BITS);
3457 if (!p) {
3458 pd = IO_MEM_UNASSIGNED;
3459 } else {
3460 pd = p->phys_offset;
3461 }
ths3b46e622007-09-17 08:09:54 +00003462
bellardd0ecd2a2006-04-23 17:14:48 +00003463 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00003464 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3465 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003466 /* do nothing */
3467 } else {
3468 unsigned long addr1;
3469 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3470 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003471 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003472 memcpy(ptr, buf, l);
3473 }
3474 len -= l;
3475 buf += l;
3476 addr += l;
3477 }
3478}
3479
aliguori6d16c2f2009-01-22 16:59:11 +00003480typedef struct {
3481 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003482 target_phys_addr_t addr;
3483 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003484} BounceBuffer;
3485
3486static BounceBuffer bounce;
3487
aliguoriba223c22009-01-22 16:59:16 +00003488typedef struct MapClient {
3489 void *opaque;
3490 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003491 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003492} MapClient;
3493
Blue Swirl72cf2d42009-09-12 07:36:22 +00003494static QLIST_HEAD(map_client_list, MapClient) map_client_list
3495 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003496
3497void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3498{
3499 MapClient *client = qemu_malloc(sizeof(*client));
3500
3501 client->opaque = opaque;
3502 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003503 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003504 return client;
3505}
3506
3507void cpu_unregister_map_client(void *_client)
3508{
3509 MapClient *client = (MapClient *)_client;
3510
Blue Swirl72cf2d42009-09-12 07:36:22 +00003511 QLIST_REMOVE(client, link);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003512 qemu_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003513}
3514
3515static void cpu_notify_map_clients(void)
3516{
3517 MapClient *client;
3518
Blue Swirl72cf2d42009-09-12 07:36:22 +00003519 while (!QLIST_EMPTY(&map_client_list)) {
3520 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003521 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003522 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003523 }
3524}
3525
aliguori6d16c2f2009-01-22 16:59:11 +00003526/* Map a physical memory region into a host virtual address.
3527 * May map a subset of the requested range, given by and returned in *plen.
3528 * May return NULL if resources needed to perform the mapping are exhausted.
3529 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003530 * Use cpu_register_map_client() to know when retrying the map operation is
3531 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003532 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003533void *cpu_physical_memory_map(target_phys_addr_t addr,
3534 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003535 int is_write)
3536{
Anthony Liguoric227f092009-10-01 16:12:16 -05003537 target_phys_addr_t len = *plen;
3538 target_phys_addr_t done = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003539 int l;
3540 uint8_t *ret = NULL;
3541 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003542 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003543 unsigned long pd;
3544 PhysPageDesc *p;
3545 unsigned long addr1;
3546
3547 while (len > 0) {
3548 page = addr & TARGET_PAGE_MASK;
3549 l = (page + TARGET_PAGE_SIZE) - addr;
3550 if (l > len)
3551 l = len;
3552 p = phys_page_find(page >> TARGET_PAGE_BITS);
3553 if (!p) {
3554 pd = IO_MEM_UNASSIGNED;
3555 } else {
3556 pd = p->phys_offset;
3557 }
3558
3559 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3560 if (done || bounce.buffer) {
3561 break;
3562 }
3563 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3564 bounce.addr = addr;
3565 bounce.len = l;
3566 if (!is_write) {
3567 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3568 }
3569 ptr = bounce.buffer;
3570 } else {
3571 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003572 ptr = qemu_get_ram_ptr(addr1);
aliguori6d16c2f2009-01-22 16:59:11 +00003573 }
3574 if (!done) {
3575 ret = ptr;
3576 } else if (ret + done != ptr) {
3577 break;
3578 }
3579
3580 len -= l;
3581 addr += l;
3582 done += l;
3583 }
3584 *plen = done;
3585 return ret;
3586}
3587
3588/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3589 * Will also mark the memory as dirty if is_write == 1. access_len gives
3590 * the amount of memory that was actually read or written by the caller.
3591 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003592void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3593 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003594{
3595 if (buffer != bounce.buffer) {
3596 if (is_write) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003597 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003598 while (access_len) {
3599 unsigned l;
3600 l = TARGET_PAGE_SIZE;
3601 if (l > access_len)
3602 l = access_len;
3603 if (!cpu_physical_memory_is_dirty(addr1)) {
3604 /* invalidate code */
3605 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3606 /* set dirty bit */
3607 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3608 (0xff & ~CODE_DIRTY_FLAG);
3609 }
3610 addr1 += l;
3611 access_len -= l;
3612 }
3613 }
3614 return;
3615 }
3616 if (is_write) {
3617 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3618 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003619 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003620 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003621 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003622}
bellardd0ecd2a2006-04-23 17:14:48 +00003623
bellard8df1cd02005-01-28 22:37:22 +00003624/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003625uint32_t ldl_phys(target_phys_addr_t addr)
bellard8df1cd02005-01-28 22:37:22 +00003626{
3627 int io_index;
3628 uint8_t *ptr;
3629 uint32_t val;
3630 unsigned long pd;
3631 PhysPageDesc *p;
3632
3633 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3634 if (!p) {
3635 pd = IO_MEM_UNASSIGNED;
3636 } else {
3637 pd = p->phys_offset;
3638 }
ths3b46e622007-09-17 08:09:54 +00003639
ths5fafdf22007-09-16 21:08:06 +00003640 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003641 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00003642 /* I/O case */
3643 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003644 if (p)
3645 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003646 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3647 } else {
3648 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003649 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00003650 (addr & ~TARGET_PAGE_MASK);
3651 val = ldl_p(ptr);
3652 }
3653 return val;
3654}
3655
bellard84b7b8e2005-11-28 21:19:04 +00003656/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003657uint64_t ldq_phys(target_phys_addr_t addr)
bellard84b7b8e2005-11-28 21:19:04 +00003658{
3659 int io_index;
3660 uint8_t *ptr;
3661 uint64_t val;
3662 unsigned long pd;
3663 PhysPageDesc *p;
3664
3665 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3666 if (!p) {
3667 pd = IO_MEM_UNASSIGNED;
3668 } else {
3669 pd = p->phys_offset;
3670 }
ths3b46e622007-09-17 08:09:54 +00003671
bellard2a4188a2006-06-25 21:54:59 +00003672 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3673 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00003674 /* I/O case */
3675 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003676 if (p)
3677 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard84b7b8e2005-11-28 21:19:04 +00003678#ifdef TARGET_WORDS_BIGENDIAN
3679 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3680 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3681#else
3682 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3683 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3684#endif
3685 } else {
3686 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003687 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00003688 (addr & ~TARGET_PAGE_MASK);
3689 val = ldq_p(ptr);
3690 }
3691 return val;
3692}
3693
bellardaab33092005-10-30 20:48:42 +00003694/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003695uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003696{
3697 uint8_t val;
3698 cpu_physical_memory_read(addr, &val, 1);
3699 return val;
3700}
3701
3702/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003703uint32_t lduw_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003704{
3705 uint16_t val;
3706 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3707 return tswap16(val);
3708}
3709
bellard8df1cd02005-01-28 22:37:22 +00003710/* warning: addr must be aligned. The ram page is not masked as dirty
3711 and the code inside is not invalidated. It is useful if the dirty
3712 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003713void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003714{
3715 int io_index;
3716 uint8_t *ptr;
3717 unsigned long pd;
3718 PhysPageDesc *p;
3719
3720 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3721 if (!p) {
3722 pd = IO_MEM_UNASSIGNED;
3723 } else {
3724 pd = p->phys_offset;
3725 }
ths3b46e622007-09-17 08:09:54 +00003726
bellard3a7d9292005-08-21 09:26:42 +00003727 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003728 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003729 if (p)
3730 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003731 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3732 } else {
aliguori74576192008-10-06 14:02:03 +00003733 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00003734 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003735 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003736
3737 if (unlikely(in_migration)) {
3738 if (!cpu_physical_memory_is_dirty(addr1)) {
3739 /* invalidate code */
3740 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3741 /* set dirty bit */
3742 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3743 (0xff & ~CODE_DIRTY_FLAG);
3744 }
3745 }
bellard8df1cd02005-01-28 22:37:22 +00003746 }
3747}
3748
Anthony Liguoric227f092009-10-01 16:12:16 -05003749void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003750{
3751 int io_index;
3752 uint8_t *ptr;
3753 unsigned long pd;
3754 PhysPageDesc *p;
3755
3756 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3757 if (!p) {
3758 pd = IO_MEM_UNASSIGNED;
3759 } else {
3760 pd = p->phys_offset;
3761 }
ths3b46e622007-09-17 08:09:54 +00003762
j_mayerbc98a7e2007-04-04 07:55:12 +00003763 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3764 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003765 if (p)
3766 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00003767#ifdef TARGET_WORDS_BIGENDIAN
3768 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3769 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3770#else
3771 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3772 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3773#endif
3774 } else {
pbrook5579c7f2009-04-11 14:47:08 +00003775 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00003776 (addr & ~TARGET_PAGE_MASK);
3777 stq_p(ptr, val);
3778 }
3779}
3780
bellard8df1cd02005-01-28 22:37:22 +00003781/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05003782void stl_phys(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003783{
3784 int io_index;
3785 uint8_t *ptr;
3786 unsigned long pd;
3787 PhysPageDesc *p;
3788
3789 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3790 if (!p) {
3791 pd = IO_MEM_UNASSIGNED;
3792 } else {
3793 pd = p->phys_offset;
3794 }
ths3b46e622007-09-17 08:09:54 +00003795
bellard3a7d9292005-08-21 09:26:42 +00003796 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00003797 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003798 if (p)
3799 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00003800 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3801 } else {
3802 unsigned long addr1;
3803 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3804 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003805 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003806 stl_p(ptr, val);
bellard3a7d9292005-08-21 09:26:42 +00003807 if (!cpu_physical_memory_is_dirty(addr1)) {
3808 /* invalidate code */
3809 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3810 /* set dirty bit */
bellardf23db162005-08-21 19:12:28 +00003811 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3812 (0xff & ~CODE_DIRTY_FLAG);
bellard3a7d9292005-08-21 09:26:42 +00003813 }
bellard8df1cd02005-01-28 22:37:22 +00003814 }
3815}
3816
bellardaab33092005-10-30 20:48:42 +00003817/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003818void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003819{
3820 uint8_t v = val;
3821 cpu_physical_memory_write(addr, &v, 1);
3822}
3823
3824/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003825void stw_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003826{
3827 uint16_t v = tswap16(val);
3828 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3829}
3830
3831/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003832void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003833{
3834 val = tswap64(val);
3835 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3836}
3837
aliguori5e2972f2009-03-28 17:51:36 +00003838/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00003839int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003840 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003841{
3842 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003843 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003844 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003845
3846 while (len > 0) {
3847 page = addr & TARGET_PAGE_MASK;
3848 phys_addr = cpu_get_phys_page_debug(env, page);
3849 /* if no physical page mapped, return an error */
3850 if (phys_addr == -1)
3851 return -1;
3852 l = (page + TARGET_PAGE_SIZE) - addr;
3853 if (l > len)
3854 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003855 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00003856 if (is_write)
3857 cpu_physical_memory_write_rom(phys_addr, buf, l);
3858 else
aliguori5e2972f2009-03-28 17:51:36 +00003859 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00003860 len -= l;
3861 buf += l;
3862 addr += l;
3863 }
3864 return 0;
3865}
Paul Brooka68fe892010-03-01 00:08:59 +00003866#endif
bellard13eb76e2004-01-24 15:23:36 +00003867
pbrook2e70f6e2008-06-29 01:03:05 +00003868/* in deterministic execution mode, instructions doing device I/Os
3869 must be at the end of the TB */
3870void cpu_io_recompile(CPUState *env, void *retaddr)
3871{
3872 TranslationBlock *tb;
3873 uint32_t n, cflags;
3874 target_ulong pc, cs_base;
3875 uint64_t flags;
3876
3877 tb = tb_find_pc((unsigned long)retaddr);
3878 if (!tb) {
3879 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3880 retaddr);
3881 }
3882 n = env->icount_decr.u16.low + tb->icount;
3883 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3884 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00003885 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00003886 n = n - env->icount_decr.u16.low;
3887 /* Generate a new TB ending on the I/O insn. */
3888 n++;
3889 /* On MIPS and SH, delay slot instructions can only be restarted if
3890 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00003891 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00003892 branch. */
3893#if defined(TARGET_MIPS)
3894 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3895 env->active_tc.PC -= 4;
3896 env->icount_decr.u16.low++;
3897 env->hflags &= ~MIPS_HFLAG_BMASK;
3898 }
3899#elif defined(TARGET_SH4)
3900 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3901 && n > 1) {
3902 env->pc -= 2;
3903 env->icount_decr.u16.low++;
3904 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3905 }
3906#endif
3907 /* This should never happen. */
3908 if (n > CF_COUNT_MASK)
3909 cpu_abort(env, "TB too big during recompile");
3910
3911 cflags = n | CF_LAST_IO;
3912 pc = tb->pc;
3913 cs_base = tb->cs_base;
3914 flags = tb->flags;
3915 tb_phys_invalidate(tb, -1);
3916 /* FIXME: In theory this could raise an exception. In practice
3917 we have already translated the block once so it's probably ok. */
3918 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00003919 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00003920 the first in the TB) then we end up generating a whole new TB and
3921 repeating the fault, which is horribly inefficient.
3922 Better would be to execute just this insn uncached, or generate a
3923 second new TB. */
3924 cpu_resume_from_signal(env, NULL);
3925}
3926
bellarde3db7222005-01-26 22:00:47 +00003927void dump_exec_info(FILE *f,
3928 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3929{
3930 int i, target_code_size, max_target_code_size;
3931 int direct_jmp_count, direct_jmp2_count, cross_page;
3932 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00003933
bellarde3db7222005-01-26 22:00:47 +00003934 target_code_size = 0;
3935 max_target_code_size = 0;
3936 cross_page = 0;
3937 direct_jmp_count = 0;
3938 direct_jmp2_count = 0;
3939 for(i = 0; i < nb_tbs; i++) {
3940 tb = &tbs[i];
3941 target_code_size += tb->size;
3942 if (tb->size > max_target_code_size)
3943 max_target_code_size = tb->size;
3944 if (tb->page_addr[1] != -1)
3945 cross_page++;
3946 if (tb->tb_next_offset[0] != 0xffff) {
3947 direct_jmp_count++;
3948 if (tb->tb_next_offset[1] != 0xffff) {
3949 direct_jmp2_count++;
3950 }
3951 }
3952 }
3953 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00003954 cpu_fprintf(f, "Translation buffer state:\n");
bellard26a5f132008-05-28 12:30:31 +00003955 cpu_fprintf(f, "gen code size %ld/%ld\n",
3956 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3957 cpu_fprintf(f, "TB count %d/%d\n",
3958 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00003959 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00003960 nb_tbs ? target_code_size / nb_tbs : 0,
3961 max_target_code_size);
ths5fafdf22007-09-16 21:08:06 +00003962 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00003963 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3964 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00003965 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3966 cross_page,
bellarde3db7222005-01-26 22:00:47 +00003967 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3968 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00003969 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00003970 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3971 direct_jmp2_count,
3972 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00003973 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00003974 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3975 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3976 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00003977 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00003978}
3979
ths5fafdf22007-09-16 21:08:06 +00003980#if !defined(CONFIG_USER_ONLY)
bellard61382a52003-10-27 21:22:23 +00003981
3982#define MMUSUFFIX _cmmu
3983#define GETPC() NULL
3984#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00003985#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00003986
3987#define SHIFT 0
3988#include "softmmu_template.h"
3989
3990#define SHIFT 1
3991#include "softmmu_template.h"
3992
3993#define SHIFT 2
3994#include "softmmu_template.h"
3995
3996#define SHIFT 3
3997#include "softmmu_template.h"
3998
3999#undef env
4000
4001#endif