blob: f84e0955dc024e6d8797264b5ad7bdfba6b60660 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020070static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200190 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 goto not_found;
195 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200198 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200199
Avi Kivityc19e8802012-02-13 20:25:31 +0200200 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200201not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200202 return &phys_sections[s_index];
203}
204
Blue Swirle5548612012-04-21 13:08:33 +0000205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
210}
bellard9fa3e852004-01-04 18:06:42 +0000211#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000212
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200213void cpu_exec_init_all(void)
214{
215#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700216 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200217 memory_map_init();
218 io_mem_init();
219#endif
220}
221
pbrook9656f322008-07-01 20:01:19 +0000222#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
223
Juan Quintelae59fb372009-09-29 22:48:21 +0200224static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200225{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100226 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200227
aurel323098dba2009-03-07 21:28:24 +0000228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000231 tlb_flush(env, 1);
232
233 return 0;
234}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200235
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100243 VMSTATE_UINT32(halted, CPUArchState),
244 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200245 VMSTATE_END_OF_LIST()
246 }
247};
pbrook9656f322008-07-01 20:01:19 +0000248#endif
249
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100250CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400251{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100252 CPUArchState *env = first_cpu;
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100253 CPUState *cpu = NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400254
255 while (env) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100256 cpu = ENV_GET_CPU(env);
257 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400258 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100259 }
Glauber Costa950f1472009-06-09 12:15:18 -0400260 env = env->next_cpu;
261 }
262
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100263 return cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400264}
265
Andreas Färber9349b4f2012-03-14 01:38:32 +0100266void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000267{
Andreas Färber9f09e182012-05-03 06:59:07 +0200268 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100269 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000270 int cpu_index;
271
pbrookc2764712009-03-07 15:24:59 +0000272#if defined(CONFIG_USER_ONLY)
273 cpu_list_lock();
274#endif
bellard6a00d602005-11-21 23:25:50 +0000275 env->next_cpu = NULL;
276 penv = &first_cpu;
277 cpu_index = 0;
278 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700279 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000280 cpu_index++;
281 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100282 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100283 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000284 QTAILQ_INIT(&env->breakpoints);
285 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100286#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200287 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100288#endif
bellard6a00d602005-11-21 23:25:50 +0000289 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000290#if defined(CONFIG_USER_ONLY)
291 cpu_list_unlock();
292#endif
pbrookb3c77242008-06-30 16:31:04 +0000293#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600294 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
295 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000296 cpu_save, cpu_load, env);
297#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000298}
299
bellard1fddef42005-04-17 19:16:13 +0000300#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000301#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100302static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000303{
304 tb_invalidate_phys_page_range(pc, pc + 1, 0);
305}
306#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400307static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
308{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400309 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
310 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400311}
bellardc27004e2005-01-03 23:35:10 +0000312#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000313#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000314
Paul Brookc527ee82010-03-01 03:31:14 +0000315#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100316void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000317
318{
319}
320
Andreas Färber9349b4f2012-03-14 01:38:32 +0100321int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000322 int flags, CPUWatchpoint **watchpoint)
323{
324 return -ENOSYS;
325}
326#else
pbrook6658ffb2007-03-16 23:58:11 +0000327/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100328int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000329 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000330{
aliguorib4051332008-11-18 20:14:20 +0000331 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000332 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000333
aliguorib4051332008-11-18 20:14:20 +0000334 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400335 if ((len & (len - 1)) || (addr & ~len_mask) ||
336 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000337 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
338 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
339 return -EINVAL;
340 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500341 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000342
aliguoria1d1bb32008-11-18 20:07:32 +0000343 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000344 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000345 wp->flags = flags;
346
aliguori2dc9f412008-11-18 20:56:59 +0000347 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000348 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000349 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000350 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000351 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000352
pbrook6658ffb2007-03-16 23:58:11 +0000353 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000354
355 if (watchpoint)
356 *watchpoint = wp;
357 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000358}
359
aliguoria1d1bb32008-11-18 20:07:32 +0000360/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100361int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000362 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000363{
aliguorib4051332008-11-18 20:14:20 +0000364 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000365 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000366
Blue Swirl72cf2d42009-09-12 07:36:22 +0000367 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000368 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000369 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000370 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000371 return 0;
372 }
373 }
aliguoria1d1bb32008-11-18 20:07:32 +0000374 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000375}
376
aliguoria1d1bb32008-11-18 20:07:32 +0000377/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100378void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000379{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000380 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000381
aliguoria1d1bb32008-11-18 20:07:32 +0000382 tlb_flush_page(env, watchpoint->vaddr);
383
Anthony Liguori7267c092011-08-20 22:09:37 -0500384 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000385}
386
aliguoria1d1bb32008-11-18 20:07:32 +0000387/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100388void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000389{
aliguoric0ce9982008-11-25 22:13:57 +0000390 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000391
Blue Swirl72cf2d42009-09-12 07:36:22 +0000392 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000393 if (wp->flags & mask)
394 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000395 }
aliguoria1d1bb32008-11-18 20:07:32 +0000396}
Paul Brookc527ee82010-03-01 03:31:14 +0000397#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000398
399/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100400int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000401 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000402{
bellard1fddef42005-04-17 19:16:13 +0000403#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000404 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000405
Anthony Liguori7267c092011-08-20 22:09:37 -0500406 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000407
408 bp->pc = pc;
409 bp->flags = flags;
410
aliguori2dc9f412008-11-18 20:56:59 +0000411 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000412 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000413 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000414 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000415 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000416
417 breakpoint_invalidate(env, pc);
418
419 if (breakpoint)
420 *breakpoint = bp;
421 return 0;
422#else
423 return -ENOSYS;
424#endif
425}
426
427/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100428int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000429{
430#if defined(TARGET_HAS_ICE)
431 CPUBreakpoint *bp;
432
Blue Swirl72cf2d42009-09-12 07:36:22 +0000433 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000434 if (bp->pc == pc && bp->flags == flags) {
435 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000436 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000437 }
bellard4c3a88a2003-07-26 12:06:08 +0000438 }
aliguoria1d1bb32008-11-18 20:07:32 +0000439 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000440#else
aliguoria1d1bb32008-11-18 20:07:32 +0000441 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000442#endif
443}
444
aliguoria1d1bb32008-11-18 20:07:32 +0000445/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100446void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000447{
bellard1fddef42005-04-17 19:16:13 +0000448#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000449 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000450
aliguoria1d1bb32008-11-18 20:07:32 +0000451 breakpoint_invalidate(env, breakpoint->pc);
452
Anthony Liguori7267c092011-08-20 22:09:37 -0500453 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000454#endif
455}
456
457/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100458void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000459{
460#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000461 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000462
Blue Swirl72cf2d42009-09-12 07:36:22 +0000463 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000464 if (bp->flags & mask)
465 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000466 }
bellard4c3a88a2003-07-26 12:06:08 +0000467#endif
468}
469
bellardc33a3462003-07-29 20:50:33 +0000470/* enable or disable single step mode. EXCP_DEBUG is returned by the
471 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100472void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000473{
bellard1fddef42005-04-17 19:16:13 +0000474#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000475 if (env->singlestep_enabled != enabled) {
476 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000477 if (kvm_enabled())
478 kvm_update_guest_debug(env, 0);
479 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100480 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000481 /* XXX: only flush what is necessary */
482 tb_flush(env);
483 }
bellardc33a3462003-07-29 20:50:33 +0000484 }
485#endif
486}
487
Andreas Färber9349b4f2012-03-14 01:38:32 +0100488void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +0000489{
490 env->interrupt_request &= ~mask;
491}
492
Andreas Färber9349b4f2012-03-14 01:38:32 +0100493void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000494{
Andreas Färberfcd7d002012-12-17 08:02:44 +0100495 CPUState *cpu = ENV_GET_CPU(env);
496
497 cpu->exit_request = 1;
Peter Maydell378df4b2013-02-22 18:10:03 +0000498 cpu->tcg_exit_req = 1;
aurel323098dba2009-03-07 21:28:24 +0000499}
500
Andreas Färber9349b4f2012-03-14 01:38:32 +0100501void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000502{
503 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000504 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000505
506 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000507 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000508 fprintf(stderr, "qemu: fatal: ");
509 vfprintf(stderr, fmt, ap);
510 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100511 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000512 if (qemu_log_enabled()) {
513 qemu_log("qemu: fatal: ");
514 qemu_log_vprintf(fmt, ap2);
515 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100516 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000517 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000518 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000519 }
pbrook493ae1f2007-11-23 16:53:59 +0000520 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000521 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200522#if defined(CONFIG_USER_ONLY)
523 {
524 struct sigaction act;
525 sigfillset(&act.sa_mask);
526 act.sa_handler = SIG_DFL;
527 sigaction(SIGABRT, &act, NULL);
528 }
529#endif
bellard75012672003-06-21 13:11:07 +0000530 abort();
531}
532
Andreas Färber9349b4f2012-03-14 01:38:32 +0100533CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000534{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100535 CPUArchState *new_env = cpu_init(env->cpu_model_str);
536 CPUArchState *next_cpu = new_env->next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000537#if defined(TARGET_HAS_ICE)
538 CPUBreakpoint *bp;
539 CPUWatchpoint *wp;
540#endif
541
Andreas Färber9349b4f2012-03-14 01:38:32 +0100542 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000543
Andreas Färber55e5c282012-12-17 06:18:02 +0100544 /* Preserve chaining. */
thsc5be9f02007-02-28 20:20:53 +0000545 new_env->next_cpu = next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000546
547 /* Clone all break/watchpoints.
548 Note: Once we support ptrace with hw-debug register access, make sure
549 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000550 QTAILQ_INIT(&env->breakpoints);
551 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000552#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000553 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000554 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
555 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000556 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000557 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
558 wp->flags, NULL);
559 }
560#endif
561
thsc5be9f02007-02-28 20:20:53 +0000562 return new_env;
563}
564
bellard01243112004-01-04 15:48:17 +0000565#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200566static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
567 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000568{
Juan Quintelad24981d2012-05-22 00:42:40 +0200569 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000570
bellard1ccde1c2004-02-06 19:46:14 +0000571 /* we modify the TLB cache so that the dirty bit will be set again
572 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200573 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200574 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000575 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200576 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000577 != (end - 1) - start) {
578 abort();
579 }
Blue Swirle5548612012-04-21 13:08:33 +0000580 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200581
582}
583
584/* Note: start and end must be within the same ram block. */
585void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
586 int dirty_flags)
587{
588 uintptr_t length;
589
590 start &= TARGET_PAGE_MASK;
591 end = TARGET_PAGE_ALIGN(end);
592
593 length = end - start;
594 if (length == 0)
595 return;
596 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
597
598 if (tcg_enabled()) {
599 tlb_reset_dirty_range_all(start, end, length);
600 }
bellard1ccde1c2004-02-06 19:46:14 +0000601}
602
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000603static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000604{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200605 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000606 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200607 return ret;
aliguori74576192008-10-06 14:02:03 +0000608}
609
Avi Kivitya8170e52012-10-23 12:30:10 +0200610hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000611 MemoryRegionSection *section,
612 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200613 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000614 int prot,
615 target_ulong *address)
616{
Avi Kivitya8170e52012-10-23 12:30:10 +0200617 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000618 CPUWatchpoint *wp;
619
Blue Swirlcc5bea62012-04-14 14:56:48 +0000620 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000621 /* Normal RAM. */
622 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000623 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000624 if (!section->readonly) {
625 iotlb |= phys_section_notdirty;
626 } else {
627 iotlb |= phys_section_rom;
628 }
629 } else {
630 /* IO handlers are currently passed a physical address.
631 It would be nice to pass an offset from the base address
632 of that region. This would avoid having to special case RAM,
633 and avoid full address decoding in every device.
634 We can't use the high bits of pd for this because
635 IO_MEM_ROMD uses these as a ram address. */
636 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000637 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000638 }
639
640 /* Make accesses to pages with watchpoints go via the
641 watchpoint trap routines. */
642 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
643 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
644 /* Avoid trapping reads of pages with a write breakpoint. */
645 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
646 iotlb = phys_section_watch + paddr;
647 *address |= TLB_MMIO;
648 break;
649 }
650 }
651 }
652
653 return iotlb;
654}
bellard9fa3e852004-01-04 18:06:42 +0000655#endif /* defined(CONFIG_USER_ONLY) */
656
pbrooke2eef172008-06-08 01:09:01 +0000657#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000658
Paul Brookc04b2b72010-03-01 03:31:14 +0000659#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
660typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200661 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200662 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200663 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000664} subpage_t;
665
Anthony Liguoric227f092009-10-01 16:12:16 -0500666static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200667 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200668static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200669static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200670{
Avi Kivity5312bd82012-02-12 18:32:55 +0200671 MemoryRegionSection *section = &phys_sections[section_index];
672 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200673
674 if (mr->subpage) {
675 subpage_t *subpage = container_of(mr, subpage_t, iomem);
676 memory_region_destroy(&subpage->iomem);
677 g_free(subpage);
678 }
679}
680
Avi Kivity4346ae32012-02-10 17:00:01 +0200681static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200682{
683 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200684 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200685
Avi Kivityc19e8802012-02-13 20:25:31 +0200686 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200687 return;
688 }
689
Avi Kivityc19e8802012-02-13 20:25:31 +0200690 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200691 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200692 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200693 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200694 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200695 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200696 }
Avi Kivity54688b12012-02-09 17:34:32 +0200697 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200698 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200699 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200700}
701
Avi Kivityac1970f2012-10-03 16:22:53 +0200702static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200703{
Avi Kivityac1970f2012-10-03 16:22:53 +0200704 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200705 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200706}
707
Avi Kivity5312bd82012-02-12 18:32:55 +0200708static uint16_t phys_section_add(MemoryRegionSection *section)
709{
710 if (phys_sections_nb == phys_sections_nb_alloc) {
711 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
712 phys_sections = g_renew(MemoryRegionSection, phys_sections,
713 phys_sections_nb_alloc);
714 }
715 phys_sections[phys_sections_nb] = *section;
716 return phys_sections_nb++;
717}
718
719static void phys_sections_clear(void)
720{
721 phys_sections_nb = 0;
722}
723
Avi Kivityac1970f2012-10-03 16:22:53 +0200724static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200725{
726 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200727 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200728 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200729 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200730 MemoryRegionSection subsection = {
731 .offset_within_address_space = base,
732 .size = TARGET_PAGE_SIZE,
733 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200734 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200735
Avi Kivityf3705d52012-03-08 16:16:34 +0200736 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200737
Avi Kivityf3705d52012-03-08 16:16:34 +0200738 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200739 subpage = subpage_init(base);
740 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200741 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200742 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200743 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200744 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200745 }
746 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400747 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200748 subpage_register(subpage, start, end, phys_section_add(section));
749}
750
751
Avi Kivityac1970f2012-10-03 16:22:53 +0200752static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000753{
Avi Kivitya8170e52012-10-23 12:30:10 +0200754 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200755 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200756 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200757 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200758
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200759 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200760
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200761 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200762 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200763 section_index);
bellard33417e72003-08-10 21:47:01 +0000764}
765
Avi Kivityac1970f2012-10-03 16:22:53 +0200766static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200767{
Avi Kivityac1970f2012-10-03 16:22:53 +0200768 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200769 MemoryRegionSection now = *section, remain = *section;
770
771 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
772 || (now.size < TARGET_PAGE_SIZE)) {
773 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
774 - now.offset_within_address_space,
775 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200776 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200777 remain.size -= now.size;
778 remain.offset_within_address_space += now.size;
779 remain.offset_within_region += now.size;
780 }
Tyler Hall69b67642012-07-25 18:45:04 -0400781 while (remain.size >= TARGET_PAGE_SIZE) {
782 now = remain;
783 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
784 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200785 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400786 } else {
787 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200788 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400789 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200790 remain.size -= now.size;
791 remain.offset_within_address_space += now.size;
792 remain.offset_within_region += now.size;
793 }
794 now = remain;
795 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200796 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200797 }
798}
799
Sheng Yang62a27442010-01-26 19:21:16 +0800800void qemu_flush_coalesced_mmio_buffer(void)
801{
802 if (kvm_enabled())
803 kvm_flush_coalesced_mmio_buffer();
804}
805
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700806void qemu_mutex_lock_ramlist(void)
807{
808 qemu_mutex_lock(&ram_list.mutex);
809}
810
811void qemu_mutex_unlock_ramlist(void)
812{
813 qemu_mutex_unlock(&ram_list.mutex);
814}
815
Marcelo Tosattic9027602010-03-01 20:25:08 -0300816#if defined(__linux__) && !defined(TARGET_S390X)
817
818#include <sys/vfs.h>
819
820#define HUGETLBFS_MAGIC 0x958458f6
821
822static long gethugepagesize(const char *path)
823{
824 struct statfs fs;
825 int ret;
826
827 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900828 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300829 } while (ret != 0 && errno == EINTR);
830
831 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900832 perror(path);
833 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300834 }
835
836 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900837 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300838
839 return fs.f_bsize;
840}
841
Alex Williamson04b16652010-07-02 11:13:17 -0600842static void *file_ram_alloc(RAMBlock *block,
843 ram_addr_t memory,
844 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300845{
846 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500847 char *sanitized_name;
848 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300849 void *area;
850 int fd;
851#ifdef MAP_POPULATE
852 int flags;
853#endif
854 unsigned long hpagesize;
855
856 hpagesize = gethugepagesize(path);
857 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900858 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300859 }
860
861 if (memory < hpagesize) {
862 return NULL;
863 }
864
865 if (kvm_enabled() && !kvm_has_sync_mmu()) {
866 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
867 return NULL;
868 }
869
Peter Feiner8ca761f2013-03-04 13:54:25 -0500870 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
871 sanitized_name = g_strdup(block->mr->name);
872 for (c = sanitized_name; *c != '\0'; c++) {
873 if (*c == '/')
874 *c = '_';
875 }
876
877 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
878 sanitized_name);
879 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300880
881 fd = mkstemp(filename);
882 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900883 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100884 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900885 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300886 }
887 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100888 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300889
890 memory = (memory+hpagesize-1) & ~(hpagesize-1);
891
892 /*
893 * ftruncate is not supported by hugetlbfs in older
894 * hosts, so don't bother bailing out on errors.
895 * If anything goes wrong with it under other filesystems,
896 * mmap will fail.
897 */
898 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900899 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300900
901#ifdef MAP_POPULATE
902 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
903 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
904 * to sidestep this quirk.
905 */
906 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
907 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
908#else
909 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
910#endif
911 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900912 perror("file_ram_alloc: can't mmap RAM pages");
913 close(fd);
914 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300915 }
Alex Williamson04b16652010-07-02 11:13:17 -0600916 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300917 return area;
918}
919#endif
920
Alex Williamsond17b5282010-06-25 11:08:38 -0600921static ram_addr_t find_ram_offset(ram_addr_t size)
922{
Alex Williamson04b16652010-07-02 11:13:17 -0600923 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600924 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600925
Paolo Bonzinia3161032012-11-14 15:54:48 +0100926 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600927 return 0;
928
Paolo Bonzinia3161032012-11-14 15:54:48 +0100929 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000930 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600931
932 end = block->offset + block->length;
933
Paolo Bonzinia3161032012-11-14 15:54:48 +0100934 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600935 if (next_block->offset >= end) {
936 next = MIN(next, next_block->offset);
937 }
938 }
939 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600940 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600941 mingap = next - end;
942 }
943 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600944
945 if (offset == RAM_ADDR_MAX) {
946 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
947 (uint64_t)size);
948 abort();
949 }
950
Alex Williamson04b16652010-07-02 11:13:17 -0600951 return offset;
952}
953
Juan Quintela652d7ec2012-07-20 10:37:54 +0200954ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600955{
Alex Williamsond17b5282010-06-25 11:08:38 -0600956 RAMBlock *block;
957 ram_addr_t last = 0;
958
Paolo Bonzinia3161032012-11-14 15:54:48 +0100959 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600960 last = MAX(last, block->offset + block->length);
961
962 return last;
963}
964
Jason Baronddb97f12012-08-02 15:44:16 -0400965static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
966{
967 int ret;
968 QemuOpts *machine_opts;
969
970 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
971 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
972 if (machine_opts &&
973 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
974 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
975 if (ret) {
976 perror("qemu_madvise");
977 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
978 "but dump_guest_core=off specified\n");
979 }
980 }
981}
982
Avi Kivityc5705a72011-12-20 15:59:12 +0200983void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -0600984{
985 RAMBlock *new_block, *block;
986
Avi Kivityc5705a72011-12-20 15:59:12 +0200987 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +0100988 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200989 if (block->offset == addr) {
990 new_block = block;
991 break;
992 }
993 }
994 assert(new_block);
995 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600996
Anthony Liguori09e5ab62012-02-03 12:28:43 -0600997 if (dev) {
998 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600999 if (id) {
1000 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001001 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001002 }
1003 }
1004 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1005
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001006 /* This assumes the iothread lock is taken here too. */
1007 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001008 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001009 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001010 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1011 new_block->idstr);
1012 abort();
1013 }
1014 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001015 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001016}
1017
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001018static int memory_try_enable_merging(void *addr, size_t len)
1019{
1020 QemuOpts *opts;
1021
1022 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1023 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1024 /* disabled by the user */
1025 return 0;
1026 }
1027
1028 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1029}
1030
Avi Kivityc5705a72011-12-20 15:59:12 +02001031ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1032 MemoryRegion *mr)
1033{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001034 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001035
1036 size = TARGET_PAGE_ALIGN(size);
1037 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001038
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001039 /* This assumes the iothread lock is taken here too. */
1040 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001041 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001042 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001043 if (host) {
1044 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001045 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001046 } else {
1047 if (mem_path) {
1048#if defined (__linux__) && !defined(TARGET_S390X)
1049 new_block->host = file_ram_alloc(new_block, size, mem_path);
1050 if (!new_block->host) {
1051 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001052 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001053 }
1054#else
1055 fprintf(stderr, "-mem-path option unsupported\n");
1056 exit(1);
1057#endif
1058 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001059 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001060 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001061 } else if (kvm_enabled()) {
1062 /* some s390/kvm configurations have special constraints */
1063 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001064 } else {
1065 new_block->host = qemu_vmalloc(size);
1066 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001067 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001068 }
1069 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001070 new_block->length = size;
1071
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001072 /* Keep the list sorted from biggest to smallest block. */
1073 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1074 if (block->length < new_block->length) {
1075 break;
1076 }
1077 }
1078 if (block) {
1079 QTAILQ_INSERT_BEFORE(block, new_block, next);
1080 } else {
1081 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1082 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001083 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001084
Umesh Deshpandef798b072011-08-18 11:41:17 -07001085 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001086 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001087
Anthony Liguori7267c092011-08-20 22:09:37 -05001088 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001089 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001090 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1091 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001092 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001093
Jason Baronddb97f12012-08-02 15:44:16 -04001094 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001095 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001096
Cam Macdonell84b89d72010-07-26 18:10:57 -06001097 if (kvm_enabled())
1098 kvm_setup_guest_memory(new_block->host, size);
1099
1100 return new_block->offset;
1101}
1102
Avi Kivityc5705a72011-12-20 15:59:12 +02001103ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001104{
Avi Kivityc5705a72011-12-20 15:59:12 +02001105 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001106}
bellarde9a1ab12007-02-08 23:08:38 +00001107
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001108void qemu_ram_free_from_ptr(ram_addr_t addr)
1109{
1110 RAMBlock *block;
1111
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001112 /* This assumes the iothread lock is taken here too. */
1113 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001114 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001115 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001116 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001117 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001118 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001119 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001120 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001121 }
1122 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001123 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001124}
1125
Anthony Liguoric227f092009-10-01 16:12:16 -05001126void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001127{
Alex Williamson04b16652010-07-02 11:13:17 -06001128 RAMBlock *block;
1129
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001130 /* This assumes the iothread lock is taken here too. */
1131 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001132 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001133 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001134 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001135 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001136 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001137 if (block->flags & RAM_PREALLOC_MASK) {
1138 ;
1139 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001140#if defined (__linux__) && !defined(TARGET_S390X)
1141 if (block->fd) {
1142 munmap(block->host, block->length);
1143 close(block->fd);
1144 } else {
1145 qemu_vfree(block->host);
1146 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001147#else
1148 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001149#endif
1150 } else {
1151#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1152 munmap(block->host, block->length);
1153#else
Jan Kiszka868bb332011-06-21 22:59:09 +02001154 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001155 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001156 } else {
1157 qemu_vfree(block->host);
1158 }
Alex Williamson04b16652010-07-02 11:13:17 -06001159#endif
1160 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001161 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001162 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001163 }
1164 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001165 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001166
bellarde9a1ab12007-02-08 23:08:38 +00001167}
1168
Huang Yingcd19cfa2011-03-02 08:56:19 +01001169#ifndef _WIN32
1170void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1171{
1172 RAMBlock *block;
1173 ram_addr_t offset;
1174 int flags;
1175 void *area, *vaddr;
1176
Paolo Bonzinia3161032012-11-14 15:54:48 +01001177 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001178 offset = addr - block->offset;
1179 if (offset < block->length) {
1180 vaddr = block->host + offset;
1181 if (block->flags & RAM_PREALLOC_MASK) {
1182 ;
1183 } else {
1184 flags = MAP_FIXED;
1185 munmap(vaddr, length);
1186 if (mem_path) {
1187#if defined(__linux__) && !defined(TARGET_S390X)
1188 if (block->fd) {
1189#ifdef MAP_POPULATE
1190 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1191 MAP_PRIVATE;
1192#else
1193 flags |= MAP_PRIVATE;
1194#endif
1195 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1196 flags, block->fd, offset);
1197 } else {
1198 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1199 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1200 flags, -1, 0);
1201 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001202#else
1203 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001204#endif
1205 } else {
1206#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1207 flags |= MAP_SHARED | MAP_ANONYMOUS;
1208 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1209 flags, -1, 0);
1210#else
1211 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1212 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1213 flags, -1, 0);
1214#endif
1215 }
1216 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001217 fprintf(stderr, "Could not remap addr: "
1218 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001219 length, addr);
1220 exit(1);
1221 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001222 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001223 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001224 }
1225 return;
1226 }
1227 }
1228}
1229#endif /* !_WIN32 */
1230
pbrookdc828ca2009-04-09 22:21:07 +00001231/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001232 With the exception of the softmmu code in this file, this should
1233 only be used for local memory (e.g. video ram) that the device owns,
1234 and knows it isn't going to access beyond the end of the block.
1235
1236 It should not be used for general purpose DMA.
1237 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1238 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001239void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001240{
pbrook94a6b542009-04-11 17:15:54 +00001241 RAMBlock *block;
1242
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001243 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001244 block = ram_list.mru_block;
1245 if (block && addr - block->offset < block->length) {
1246 goto found;
1247 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001248 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001249 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001250 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001251 }
pbrook94a6b542009-04-11 17:15:54 +00001252 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001253
1254 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1255 abort();
1256
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001257found:
1258 ram_list.mru_block = block;
1259 if (xen_enabled()) {
1260 /* We need to check if the requested address is in the RAM
1261 * because we don't want to map the entire memory in QEMU.
1262 * In that case just map until the end of the page.
1263 */
1264 if (block->offset == 0) {
1265 return xen_map_cache(addr, 0, 0);
1266 } else if (block->host == NULL) {
1267 block->host =
1268 xen_map_cache(block->offset, block->length, 1);
1269 }
1270 }
1271 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001272}
1273
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001274/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1275 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1276 *
1277 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001278 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001279static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001280{
1281 RAMBlock *block;
1282
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001283 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001284 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001285 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001286 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001287 /* We need to check if the requested address is in the RAM
1288 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001289 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001290 */
1291 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001292 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001293 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001294 block->host =
1295 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001296 }
1297 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001298 return block->host + (addr - block->offset);
1299 }
1300 }
1301
1302 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1303 abort();
1304
1305 return NULL;
1306}
1307
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001308/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1309 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001310static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001311{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001312 if (*size == 0) {
1313 return NULL;
1314 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001315 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001316 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001317 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001318 RAMBlock *block;
1319
Paolo Bonzinia3161032012-11-14 15:54:48 +01001320 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001321 if (addr - block->offset < block->length) {
1322 if (addr - block->offset + *size > block->length)
1323 *size = block->length - addr + block->offset;
1324 return block->host + (addr - block->offset);
1325 }
1326 }
1327
1328 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1329 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001330 }
1331}
1332
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001333void qemu_put_ram_ptr(void *addr)
1334{
1335 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001336}
1337
Marcelo Tosattie8902612010-10-11 15:31:19 -03001338int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001339{
pbrook94a6b542009-04-11 17:15:54 +00001340 RAMBlock *block;
1341 uint8_t *host = ptr;
1342
Jan Kiszka868bb332011-06-21 22:59:09 +02001343 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001344 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001345 return 0;
1346 }
1347
Paolo Bonzinia3161032012-11-14 15:54:48 +01001348 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001349 /* This case append when the block is not mapped. */
1350 if (block->host == NULL) {
1351 continue;
1352 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001353 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001354 *ram_addr = block->offset + (host - block->host);
1355 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001356 }
pbrook94a6b542009-04-11 17:15:54 +00001357 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001358
Marcelo Tosattie8902612010-10-11 15:31:19 -03001359 return -1;
1360}
Alex Williamsonf471a172010-06-11 11:11:42 -06001361
Marcelo Tosattie8902612010-10-11 15:31:19 -03001362/* Some of the softmmu routines need to translate from a host pointer
1363 (typically a TLB entry) back to a ram offset. */
1364ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1365{
1366 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001367
Marcelo Tosattie8902612010-10-11 15:31:19 -03001368 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1369 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1370 abort();
1371 }
1372 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001373}
1374
Avi Kivitya8170e52012-10-23 12:30:10 +02001375static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001376 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001377{
pbrook67d3b952006-12-18 05:03:52 +00001378#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001379 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001380#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001381#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001382 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001383#endif
1384 return 0;
1385}
1386
Avi Kivitya8170e52012-10-23 12:30:10 +02001387static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001388 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001389{
1390#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001391 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001392#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001393#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001394 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001395#endif
1396}
1397
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001398static const MemoryRegionOps unassigned_mem_ops = {
1399 .read = unassigned_mem_read,
1400 .write = unassigned_mem_write,
1401 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001402};
1403
Avi Kivitya8170e52012-10-23 12:30:10 +02001404static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001405 unsigned size)
1406{
1407 abort();
1408}
1409
Avi Kivitya8170e52012-10-23 12:30:10 +02001410static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001411 uint64_t value, unsigned size)
1412{
1413 abort();
1414}
1415
1416static const MemoryRegionOps error_mem_ops = {
1417 .read = error_mem_read,
1418 .write = error_mem_write,
1419 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001420};
1421
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001422static const MemoryRegionOps rom_mem_ops = {
1423 .read = error_mem_read,
1424 .write = unassigned_mem_write,
1425 .endianness = DEVICE_NATIVE_ENDIAN,
1426};
1427
Avi Kivitya8170e52012-10-23 12:30:10 +02001428static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001429 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001430{
bellard3a7d9292005-08-21 09:26:42 +00001431 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001432 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001433 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1434#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001435 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001436 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001437#endif
1438 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001439 switch (size) {
1440 case 1:
1441 stb_p(qemu_get_ram_ptr(ram_addr), val);
1442 break;
1443 case 2:
1444 stw_p(qemu_get_ram_ptr(ram_addr), val);
1445 break;
1446 case 4:
1447 stl_p(qemu_get_ram_ptr(ram_addr), val);
1448 break;
1449 default:
1450 abort();
1451 }
bellardf23db162005-08-21 19:12:28 +00001452 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001453 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001454 /* we remove the notdirty callback only if the code has been
1455 flushed */
1456 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001457 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001458}
1459
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001460static const MemoryRegionOps notdirty_mem_ops = {
1461 .read = error_mem_read,
1462 .write = notdirty_mem_write,
1463 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001464};
1465
pbrook0f459d12008-06-09 00:20:13 +00001466/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001467static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001468{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001469 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001470 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001471 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001472 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001473 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001474
aliguori06d55cc2008-11-18 20:24:06 +00001475 if (env->watchpoint_hit) {
1476 /* We re-entered the check after replacing the TB. Now raise
1477 * the debug interrupt so that is will trigger after the
1478 * current instruction. */
1479 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1480 return;
1481 }
pbrook2e70f6e2008-06-29 01:03:05 +00001482 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001483 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001484 if ((vaddr == (wp->vaddr & len_mask) ||
1485 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001486 wp->flags |= BP_WATCHPOINT_HIT;
1487 if (!env->watchpoint_hit) {
1488 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001489 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001490 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1491 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001492 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001493 } else {
1494 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1495 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001496 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001497 }
aliguori06d55cc2008-11-18 20:24:06 +00001498 }
aliguori6e140f22008-11-18 20:37:55 +00001499 } else {
1500 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001501 }
1502 }
1503}
1504
pbrook6658ffb2007-03-16 23:58:11 +00001505/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1506 so these check for a hit then pass through to the normal out-of-line
1507 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001508static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001509 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001510{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001511 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1512 switch (size) {
1513 case 1: return ldub_phys(addr);
1514 case 2: return lduw_phys(addr);
1515 case 4: return ldl_phys(addr);
1516 default: abort();
1517 }
pbrook6658ffb2007-03-16 23:58:11 +00001518}
1519
Avi Kivitya8170e52012-10-23 12:30:10 +02001520static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001521 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001522{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001523 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1524 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001525 case 1:
1526 stb_phys(addr, val);
1527 break;
1528 case 2:
1529 stw_phys(addr, val);
1530 break;
1531 case 4:
1532 stl_phys(addr, val);
1533 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001534 default: abort();
1535 }
pbrook6658ffb2007-03-16 23:58:11 +00001536}
1537
Avi Kivity1ec9b902012-01-02 12:47:48 +02001538static const MemoryRegionOps watch_mem_ops = {
1539 .read = watch_mem_read,
1540 .write = watch_mem_write,
1541 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001542};
pbrook6658ffb2007-03-16 23:58:11 +00001543
Avi Kivitya8170e52012-10-23 12:30:10 +02001544static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001545 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001546{
Avi Kivity70c68e42012-01-02 12:32:48 +02001547 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001548 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001549 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001550#if defined(DEBUG_SUBPAGE)
1551 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1552 mmio, len, addr, idx);
1553#endif
blueswir1db7b5422007-05-26 17:36:03 +00001554
Avi Kivity5312bd82012-02-12 18:32:55 +02001555 section = &phys_sections[mmio->sub_section[idx]];
1556 addr += mmio->base;
1557 addr -= section->offset_within_address_space;
1558 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001559 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001560}
1561
Avi Kivitya8170e52012-10-23 12:30:10 +02001562static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001563 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001564{
Avi Kivity70c68e42012-01-02 12:32:48 +02001565 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001566 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001567 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001568#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001569 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1570 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001571 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001572#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001573
Avi Kivity5312bd82012-02-12 18:32:55 +02001574 section = &phys_sections[mmio->sub_section[idx]];
1575 addr += mmio->base;
1576 addr -= section->offset_within_address_space;
1577 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001578 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001579}
1580
Avi Kivity70c68e42012-01-02 12:32:48 +02001581static const MemoryRegionOps subpage_ops = {
1582 .read = subpage_read,
1583 .write = subpage_write,
1584 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001585};
1586
Avi Kivitya8170e52012-10-23 12:30:10 +02001587static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001588 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001589{
1590 ram_addr_t raddr = addr;
1591 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001592 switch (size) {
1593 case 1: return ldub_p(ptr);
1594 case 2: return lduw_p(ptr);
1595 case 4: return ldl_p(ptr);
1596 default: abort();
1597 }
Andreas Färber56384e82011-11-30 16:26:21 +01001598}
1599
Avi Kivitya8170e52012-10-23 12:30:10 +02001600static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001601 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001602{
1603 ram_addr_t raddr = addr;
1604 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001605 switch (size) {
1606 case 1: return stb_p(ptr, value);
1607 case 2: return stw_p(ptr, value);
1608 case 4: return stl_p(ptr, value);
1609 default: abort();
1610 }
Andreas Färber56384e82011-11-30 16:26:21 +01001611}
1612
Avi Kivityde712f92012-01-02 12:41:07 +02001613static const MemoryRegionOps subpage_ram_ops = {
1614 .read = subpage_ram_read,
1615 .write = subpage_ram_write,
1616 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001617};
1618
Anthony Liguoric227f092009-10-01 16:12:16 -05001619static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001620 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001621{
1622 int idx, eidx;
1623
1624 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1625 return -1;
1626 idx = SUBPAGE_IDX(start);
1627 eidx = SUBPAGE_IDX(end);
1628#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001629 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001630 mmio, start, end, idx, eidx, memory);
1631#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001632 if (memory_region_is_ram(phys_sections[section].mr)) {
1633 MemoryRegionSection new_section = phys_sections[section];
1634 new_section.mr = &io_mem_subpage_ram;
1635 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001636 }
blueswir1db7b5422007-05-26 17:36:03 +00001637 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001638 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001639 }
1640
1641 return 0;
1642}
1643
Avi Kivitya8170e52012-10-23 12:30:10 +02001644static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001645{
Anthony Liguoric227f092009-10-01 16:12:16 -05001646 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001647
Anthony Liguori7267c092011-08-20 22:09:37 -05001648 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001649
1650 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001651 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1652 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001653 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001654#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001655 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1656 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001657#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001658 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001659
1660 return mmio;
1661}
1662
Avi Kivity5312bd82012-02-12 18:32:55 +02001663static uint16_t dummy_section(MemoryRegion *mr)
1664{
1665 MemoryRegionSection section = {
1666 .mr = mr,
1667 .offset_within_address_space = 0,
1668 .offset_within_region = 0,
1669 .size = UINT64_MAX,
1670 };
1671
1672 return phys_section_add(&section);
1673}
1674
Avi Kivitya8170e52012-10-23 12:30:10 +02001675MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001676{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001677 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001678}
1679
Avi Kivitye9179ce2009-06-14 11:38:52 +03001680static void io_mem_init(void)
1681{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001682 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001683 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1684 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1685 "unassigned", UINT64_MAX);
1686 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1687 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001688 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1689 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001690 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1691 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001692}
1693
Avi Kivityac1970f2012-10-03 16:22:53 +02001694static void mem_begin(MemoryListener *listener)
1695{
1696 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1697
1698 destroy_all_mappings(d);
1699 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1700}
1701
Avi Kivity50c1e142012-02-08 21:36:02 +02001702static void core_begin(MemoryListener *listener)
1703{
Avi Kivity5312bd82012-02-12 18:32:55 +02001704 phys_sections_clear();
1705 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001706 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1707 phys_section_rom = dummy_section(&io_mem_rom);
1708 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001709}
1710
Avi Kivity1d711482012-10-02 18:54:45 +02001711static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001712{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001713 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001714
1715 /* since each CPU stores ram addresses in its TLB cache, we must
1716 reset the modified entries */
1717 /* XXX: slow ! */
1718 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1719 tlb_flush(env, 1);
1720 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001721}
1722
Avi Kivity93632742012-02-08 16:54:16 +02001723static void core_log_global_start(MemoryListener *listener)
1724{
1725 cpu_physical_memory_set_dirty_tracking(1);
1726}
1727
1728static void core_log_global_stop(MemoryListener *listener)
1729{
1730 cpu_physical_memory_set_dirty_tracking(0);
1731}
1732
Avi Kivity4855d412012-02-08 21:16:05 +02001733static void io_region_add(MemoryListener *listener,
1734 MemoryRegionSection *section)
1735{
Avi Kivitya2d33522012-03-05 17:40:12 +02001736 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1737
1738 mrio->mr = section->mr;
1739 mrio->offset = section->offset_within_region;
1740 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001741 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001742 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001743}
1744
1745static void io_region_del(MemoryListener *listener,
1746 MemoryRegionSection *section)
1747{
1748 isa_unassign_ioport(section->offset_within_address_space, section->size);
1749}
1750
Avi Kivity93632742012-02-08 16:54:16 +02001751static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001752 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001753 .log_global_start = core_log_global_start,
1754 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001755 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001756};
1757
Avi Kivity4855d412012-02-08 21:16:05 +02001758static MemoryListener io_memory_listener = {
1759 .region_add = io_region_add,
1760 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001761 .priority = 0,
1762};
1763
Avi Kivity1d711482012-10-02 18:54:45 +02001764static MemoryListener tcg_memory_listener = {
1765 .commit = tcg_commit,
1766};
1767
Avi Kivityac1970f2012-10-03 16:22:53 +02001768void address_space_init_dispatch(AddressSpace *as)
1769{
1770 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1771
1772 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1773 d->listener = (MemoryListener) {
1774 .begin = mem_begin,
1775 .region_add = mem_add,
1776 .region_nop = mem_add,
1777 .priority = 0,
1778 };
1779 as->dispatch = d;
1780 memory_listener_register(&d->listener, as);
1781}
1782
Avi Kivity83f3c252012-10-07 12:59:55 +02001783void address_space_destroy_dispatch(AddressSpace *as)
1784{
1785 AddressSpaceDispatch *d = as->dispatch;
1786
1787 memory_listener_unregister(&d->listener);
1788 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1789 g_free(d);
1790 as->dispatch = NULL;
1791}
1792
Avi Kivity62152b82011-07-26 14:26:14 +03001793static void memory_map_init(void)
1794{
Anthony Liguori7267c092011-08-20 22:09:37 -05001795 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001796 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001797 address_space_init(&address_space_memory, system_memory);
1798 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001799
Anthony Liguori7267c092011-08-20 22:09:37 -05001800 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001801 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001802 address_space_init(&address_space_io, system_io);
1803 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001804
Avi Kivityf6790af2012-10-02 20:13:51 +02001805 memory_listener_register(&core_memory_listener, &address_space_memory);
1806 memory_listener_register(&io_memory_listener, &address_space_io);
1807 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001808
1809 dma_context_init(&dma_context_memory, &address_space_memory,
1810 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001811}
1812
1813MemoryRegion *get_system_memory(void)
1814{
1815 return system_memory;
1816}
1817
Avi Kivity309cb472011-08-08 16:09:03 +03001818MemoryRegion *get_system_io(void)
1819{
1820 return system_io;
1821}
1822
pbrooke2eef172008-06-08 01:09:01 +00001823#endif /* !defined(CONFIG_USER_ONLY) */
1824
bellard13eb76e2004-01-24 15:23:36 +00001825/* physical memory access (slow version, mainly for debug) */
1826#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001827int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001828 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001829{
1830 int l, flags;
1831 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001832 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001833
1834 while (len > 0) {
1835 page = addr & TARGET_PAGE_MASK;
1836 l = (page + TARGET_PAGE_SIZE) - addr;
1837 if (l > len)
1838 l = len;
1839 flags = page_get_flags(page);
1840 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001841 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001842 if (is_write) {
1843 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001844 return -1;
bellard579a97f2007-11-11 14:26:47 +00001845 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001846 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001847 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001848 memcpy(p, buf, l);
1849 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001850 } else {
1851 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001852 return -1;
bellard579a97f2007-11-11 14:26:47 +00001853 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001854 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001855 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001856 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001857 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001858 }
1859 len -= l;
1860 buf += l;
1861 addr += l;
1862 }
Paul Brooka68fe892010-03-01 00:08:59 +00001863 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001864}
bellard8df1cd02005-01-28 22:37:22 +00001865
bellard13eb76e2004-01-24 15:23:36 +00001866#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001867
Avi Kivitya8170e52012-10-23 12:30:10 +02001868static void invalidate_and_set_dirty(hwaddr addr,
1869 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001870{
1871 if (!cpu_physical_memory_is_dirty(addr)) {
1872 /* invalidate code */
1873 tb_invalidate_phys_page_range(addr, addr + length, 0);
1874 /* set dirty bit */
1875 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1876 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001877 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001878}
1879
Avi Kivitya8170e52012-10-23 12:30:10 +02001880void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001881 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001882{
Avi Kivityac1970f2012-10-03 16:22:53 +02001883 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001884 int l;
bellard13eb76e2004-01-24 15:23:36 +00001885 uint8_t *ptr;
1886 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001887 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001888 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001889
bellard13eb76e2004-01-24 15:23:36 +00001890 while (len > 0) {
1891 page = addr & TARGET_PAGE_MASK;
1892 l = (page + TARGET_PAGE_SIZE) - addr;
1893 if (l > len)
1894 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001895 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001896
bellard13eb76e2004-01-24 15:23:36 +00001897 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001898 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001899 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001900 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001901 /* XXX: could force cpu_single_env to NULL to avoid
1902 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001903 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001904 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001905 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001906 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001907 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001908 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001909 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001910 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001911 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001912 l = 2;
1913 } else {
bellard1c213d12005-09-03 10:49:04 +00001914 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001915 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001916 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001917 l = 1;
1918 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001919 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001920 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001921 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001922 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001923 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001924 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001925 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001926 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001927 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001928 }
1929 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001930 if (!(memory_region_is_ram(section->mr) ||
1931 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001932 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001933 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001934 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001935 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001936 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001937 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001938 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001939 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001940 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001941 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001942 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001943 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001944 l = 2;
1945 } else {
bellard1c213d12005-09-03 10:49:04 +00001946 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001947 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001948 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001949 l = 1;
1950 }
1951 } else {
1952 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001953 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001954 + memory_region_section_addr(section,
1955 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001956 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001957 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001958 }
1959 }
1960 len -= l;
1961 buf += l;
1962 addr += l;
1963 }
1964}
bellard8df1cd02005-01-28 22:37:22 +00001965
Avi Kivitya8170e52012-10-23 12:30:10 +02001966void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001967 const uint8_t *buf, int len)
1968{
1969 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1970}
1971
1972/**
1973 * address_space_read: read from an address space.
1974 *
1975 * @as: #AddressSpace to be accessed
1976 * @addr: address within that address space
1977 * @buf: buffer with the data transferred
1978 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001979void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001980{
1981 address_space_rw(as, addr, buf, len, false);
1982}
1983
1984
Avi Kivitya8170e52012-10-23 12:30:10 +02001985void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001986 int len, int is_write)
1987{
1988 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1989}
1990
bellardd0ecd2a2006-04-23 17:14:48 +00001991/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001992void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001993 const uint8_t *buf, int len)
1994{
Avi Kivityac1970f2012-10-03 16:22:53 +02001995 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00001996 int l;
1997 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02001998 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001999 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00002000
bellardd0ecd2a2006-04-23 17:14:48 +00002001 while (len > 0) {
2002 page = addr & TARGET_PAGE_MASK;
2003 l = (page + TARGET_PAGE_SIZE) - addr;
2004 if (l > len)
2005 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002006 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002007
Blue Swirlcc5bea62012-04-14 14:56:48 +00002008 if (!(memory_region_is_ram(section->mr) ||
2009 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002010 /* do nothing */
2011 } else {
2012 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002013 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002014 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00002015 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002016 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002017 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002018 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002019 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00002020 }
2021 len -= l;
2022 buf += l;
2023 addr += l;
2024 }
2025}
2026
aliguori6d16c2f2009-01-22 16:59:11 +00002027typedef struct {
2028 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002029 hwaddr addr;
2030 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002031} BounceBuffer;
2032
2033static BounceBuffer bounce;
2034
aliguoriba223c22009-01-22 16:59:16 +00002035typedef struct MapClient {
2036 void *opaque;
2037 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002038 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002039} MapClient;
2040
Blue Swirl72cf2d42009-09-12 07:36:22 +00002041static QLIST_HEAD(map_client_list, MapClient) map_client_list
2042 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002043
2044void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2045{
Anthony Liguori7267c092011-08-20 22:09:37 -05002046 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002047
2048 client->opaque = opaque;
2049 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002050 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002051 return client;
2052}
2053
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002054static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002055{
2056 MapClient *client = (MapClient *)_client;
2057
Blue Swirl72cf2d42009-09-12 07:36:22 +00002058 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002059 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002060}
2061
2062static void cpu_notify_map_clients(void)
2063{
2064 MapClient *client;
2065
Blue Swirl72cf2d42009-09-12 07:36:22 +00002066 while (!QLIST_EMPTY(&map_client_list)) {
2067 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002068 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002069 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002070 }
2071}
2072
aliguori6d16c2f2009-01-22 16:59:11 +00002073/* Map a physical memory region into a host virtual address.
2074 * May map a subset of the requested range, given by and returned in *plen.
2075 * May return NULL if resources needed to perform the mapping are exhausted.
2076 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002077 * Use cpu_register_map_client() to know when retrying the map operation is
2078 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002079 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002080void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002081 hwaddr addr,
2082 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002083 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002084{
Avi Kivityac1970f2012-10-03 16:22:53 +02002085 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002086 hwaddr len = *plen;
2087 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002088 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002089 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002090 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002091 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002092 ram_addr_t rlen;
2093 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002094
2095 while (len > 0) {
2096 page = addr & TARGET_PAGE_MASK;
2097 l = (page + TARGET_PAGE_SIZE) - addr;
2098 if (l > len)
2099 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002100 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002101
Avi Kivityf3705d52012-03-08 16:16:34 +02002102 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002103 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002104 break;
2105 }
2106 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2107 bounce.addr = addr;
2108 bounce.len = l;
2109 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002110 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002111 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002112
2113 *plen = l;
2114 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002115 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002116 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002117 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002118 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002119 }
aliguori6d16c2f2009-01-22 16:59:11 +00002120
2121 len -= l;
2122 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002123 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002124 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002125 rlen = todo;
2126 ret = qemu_ram_ptr_length(raddr, &rlen);
2127 *plen = rlen;
2128 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002129}
2130
Avi Kivityac1970f2012-10-03 16:22:53 +02002131/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002132 * Will also mark the memory as dirty if is_write == 1. access_len gives
2133 * the amount of memory that was actually read or written by the caller.
2134 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002135void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2136 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002137{
2138 if (buffer != bounce.buffer) {
2139 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002140 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002141 while (access_len) {
2142 unsigned l;
2143 l = TARGET_PAGE_SIZE;
2144 if (l > access_len)
2145 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002146 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002147 addr1 += l;
2148 access_len -= l;
2149 }
2150 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002151 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002152 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002153 }
aliguori6d16c2f2009-01-22 16:59:11 +00002154 return;
2155 }
2156 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002157 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002158 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002159 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002160 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002161 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002162}
bellardd0ecd2a2006-04-23 17:14:48 +00002163
Avi Kivitya8170e52012-10-23 12:30:10 +02002164void *cpu_physical_memory_map(hwaddr addr,
2165 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002166 int is_write)
2167{
2168 return address_space_map(&address_space_memory, addr, plen, is_write);
2169}
2170
Avi Kivitya8170e52012-10-23 12:30:10 +02002171void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2172 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002173{
2174 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2175}
2176
bellard8df1cd02005-01-28 22:37:22 +00002177/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002178static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002179 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002180{
bellard8df1cd02005-01-28 22:37:22 +00002181 uint8_t *ptr;
2182 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002183 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002184
Avi Kivityac1970f2012-10-03 16:22:53 +02002185 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002186
Blue Swirlcc5bea62012-04-14 14:56:48 +00002187 if (!(memory_region_is_ram(section->mr) ||
2188 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002189 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002190 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002191 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002192#if defined(TARGET_WORDS_BIGENDIAN)
2193 if (endian == DEVICE_LITTLE_ENDIAN) {
2194 val = bswap32(val);
2195 }
2196#else
2197 if (endian == DEVICE_BIG_ENDIAN) {
2198 val = bswap32(val);
2199 }
2200#endif
bellard8df1cd02005-01-28 22:37:22 +00002201 } else {
2202 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002203 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002204 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002205 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002206 switch (endian) {
2207 case DEVICE_LITTLE_ENDIAN:
2208 val = ldl_le_p(ptr);
2209 break;
2210 case DEVICE_BIG_ENDIAN:
2211 val = ldl_be_p(ptr);
2212 break;
2213 default:
2214 val = ldl_p(ptr);
2215 break;
2216 }
bellard8df1cd02005-01-28 22:37:22 +00002217 }
2218 return val;
2219}
2220
Avi Kivitya8170e52012-10-23 12:30:10 +02002221uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002222{
2223 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2224}
2225
Avi Kivitya8170e52012-10-23 12:30:10 +02002226uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002227{
2228 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2229}
2230
Avi Kivitya8170e52012-10-23 12:30:10 +02002231uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002232{
2233 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2234}
2235
bellard84b7b8e2005-11-28 21:19:04 +00002236/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002237static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002238 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002239{
bellard84b7b8e2005-11-28 21:19:04 +00002240 uint8_t *ptr;
2241 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002242 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002243
Avi Kivityac1970f2012-10-03 16:22:53 +02002244 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002245
Blue Swirlcc5bea62012-04-14 14:56:48 +00002246 if (!(memory_region_is_ram(section->mr) ||
2247 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002248 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002249 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002250
2251 /* XXX This is broken when device endian != cpu endian.
2252 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002253#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002254 val = io_mem_read(section->mr, addr, 4) << 32;
2255 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002256#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002257 val = io_mem_read(section->mr, addr, 4);
2258 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002259#endif
2260 } else {
2261 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002262 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002263 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002264 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002265 switch (endian) {
2266 case DEVICE_LITTLE_ENDIAN:
2267 val = ldq_le_p(ptr);
2268 break;
2269 case DEVICE_BIG_ENDIAN:
2270 val = ldq_be_p(ptr);
2271 break;
2272 default:
2273 val = ldq_p(ptr);
2274 break;
2275 }
bellard84b7b8e2005-11-28 21:19:04 +00002276 }
2277 return val;
2278}
2279
Avi Kivitya8170e52012-10-23 12:30:10 +02002280uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002281{
2282 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2283}
2284
Avi Kivitya8170e52012-10-23 12:30:10 +02002285uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002286{
2287 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2288}
2289
Avi Kivitya8170e52012-10-23 12:30:10 +02002290uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002291{
2292 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2293}
2294
bellardaab33092005-10-30 20:48:42 +00002295/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002296uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002297{
2298 uint8_t val;
2299 cpu_physical_memory_read(addr, &val, 1);
2300 return val;
2301}
2302
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002303/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002304static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002305 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002306{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002307 uint8_t *ptr;
2308 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002309 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002310
Avi Kivityac1970f2012-10-03 16:22:53 +02002311 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002312
Blue Swirlcc5bea62012-04-14 14:56:48 +00002313 if (!(memory_region_is_ram(section->mr) ||
2314 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002315 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002316 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002317 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002318#if defined(TARGET_WORDS_BIGENDIAN)
2319 if (endian == DEVICE_LITTLE_ENDIAN) {
2320 val = bswap16(val);
2321 }
2322#else
2323 if (endian == DEVICE_BIG_ENDIAN) {
2324 val = bswap16(val);
2325 }
2326#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002327 } else {
2328 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002329 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002330 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002331 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002332 switch (endian) {
2333 case DEVICE_LITTLE_ENDIAN:
2334 val = lduw_le_p(ptr);
2335 break;
2336 case DEVICE_BIG_ENDIAN:
2337 val = lduw_be_p(ptr);
2338 break;
2339 default:
2340 val = lduw_p(ptr);
2341 break;
2342 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002343 }
2344 return val;
bellardaab33092005-10-30 20:48:42 +00002345}
2346
Avi Kivitya8170e52012-10-23 12:30:10 +02002347uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002348{
2349 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2350}
2351
Avi Kivitya8170e52012-10-23 12:30:10 +02002352uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002353{
2354 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2355}
2356
Avi Kivitya8170e52012-10-23 12:30:10 +02002357uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002358{
2359 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2360}
2361
bellard8df1cd02005-01-28 22:37:22 +00002362/* warning: addr must be aligned. The ram page is not masked as dirty
2363 and the code inside is not invalidated. It is useful if the dirty
2364 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002365void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002366{
bellard8df1cd02005-01-28 22:37:22 +00002367 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002368 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002369
Avi Kivityac1970f2012-10-03 16:22:53 +02002370 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002371
Avi Kivityf3705d52012-03-08 16:16:34 +02002372 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002373 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002374 if (memory_region_is_ram(section->mr)) {
2375 section = &phys_sections[phys_section_rom];
2376 }
2377 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002378 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002379 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002380 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002381 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002382 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002383 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002384
2385 if (unlikely(in_migration)) {
2386 if (!cpu_physical_memory_is_dirty(addr1)) {
2387 /* invalidate code */
2388 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2389 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002390 cpu_physical_memory_set_dirty_flags(
2391 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002392 }
2393 }
bellard8df1cd02005-01-28 22:37:22 +00002394 }
2395}
2396
Avi Kivitya8170e52012-10-23 12:30:10 +02002397void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00002398{
j_mayerbc98a7e2007-04-04 07:55:12 +00002399 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002400 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00002401
Avi Kivityac1970f2012-10-03 16:22:53 +02002402 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002403
Avi Kivityf3705d52012-03-08 16:16:34 +02002404 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002405 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002406 if (memory_region_is_ram(section->mr)) {
2407 section = &phys_sections[phys_section_rom];
2408 }
j_mayerbc98a7e2007-04-04 07:55:12 +00002409#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002410 io_mem_write(section->mr, addr, val >> 32, 4);
2411 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002412#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002413 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2414 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002415#endif
2416 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002417 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002418 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002419 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00002420 stq_p(ptr, val);
2421 }
2422}
2423
bellard8df1cd02005-01-28 22:37:22 +00002424/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002425static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002426 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002427{
bellard8df1cd02005-01-28 22:37:22 +00002428 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002429 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002430
Avi Kivityac1970f2012-10-03 16:22:53 +02002431 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002432
Avi Kivityf3705d52012-03-08 16:16:34 +02002433 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002434 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002435 if (memory_region_is_ram(section->mr)) {
2436 section = &phys_sections[phys_section_rom];
2437 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002438#if defined(TARGET_WORDS_BIGENDIAN)
2439 if (endian == DEVICE_LITTLE_ENDIAN) {
2440 val = bswap32(val);
2441 }
2442#else
2443 if (endian == DEVICE_BIG_ENDIAN) {
2444 val = bswap32(val);
2445 }
2446#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002447 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002448 } else {
2449 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002450 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002451 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002452 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002453 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002454 switch (endian) {
2455 case DEVICE_LITTLE_ENDIAN:
2456 stl_le_p(ptr, val);
2457 break;
2458 case DEVICE_BIG_ENDIAN:
2459 stl_be_p(ptr, val);
2460 break;
2461 default:
2462 stl_p(ptr, val);
2463 break;
2464 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002465 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002466 }
2467}
2468
Avi Kivitya8170e52012-10-23 12:30:10 +02002469void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002470{
2471 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2472}
2473
Avi Kivitya8170e52012-10-23 12:30:10 +02002474void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002475{
2476 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2477}
2478
Avi Kivitya8170e52012-10-23 12:30:10 +02002479void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002480{
2481 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2482}
2483
bellardaab33092005-10-30 20:48:42 +00002484/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002485void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002486{
2487 uint8_t v = val;
2488 cpu_physical_memory_write(addr, &v, 1);
2489}
2490
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002491/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002492static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002493 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002494{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002495 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002496 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002497
Avi Kivityac1970f2012-10-03 16:22:53 +02002498 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002499
Avi Kivityf3705d52012-03-08 16:16:34 +02002500 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002501 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002502 if (memory_region_is_ram(section->mr)) {
2503 section = &phys_sections[phys_section_rom];
2504 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002505#if defined(TARGET_WORDS_BIGENDIAN)
2506 if (endian == DEVICE_LITTLE_ENDIAN) {
2507 val = bswap16(val);
2508 }
2509#else
2510 if (endian == DEVICE_BIG_ENDIAN) {
2511 val = bswap16(val);
2512 }
2513#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002514 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002515 } else {
2516 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002517 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002518 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002519 /* RAM case */
2520 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002521 switch (endian) {
2522 case DEVICE_LITTLE_ENDIAN:
2523 stw_le_p(ptr, val);
2524 break;
2525 case DEVICE_BIG_ENDIAN:
2526 stw_be_p(ptr, val);
2527 break;
2528 default:
2529 stw_p(ptr, val);
2530 break;
2531 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002532 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002533 }
bellardaab33092005-10-30 20:48:42 +00002534}
2535
Avi Kivitya8170e52012-10-23 12:30:10 +02002536void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002537{
2538 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2539}
2540
Avi Kivitya8170e52012-10-23 12:30:10 +02002541void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002542{
2543 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2544}
2545
Avi Kivitya8170e52012-10-23 12:30:10 +02002546void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002547{
2548 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2549}
2550
bellardaab33092005-10-30 20:48:42 +00002551/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002552void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002553{
2554 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002555 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002556}
2557
Avi Kivitya8170e52012-10-23 12:30:10 +02002558void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002559{
2560 val = cpu_to_le64(val);
2561 cpu_physical_memory_write(addr, &val, 8);
2562}
2563
Avi Kivitya8170e52012-10-23 12:30:10 +02002564void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002565{
2566 val = cpu_to_be64(val);
2567 cpu_physical_memory_write(addr, &val, 8);
2568}
2569
aliguori5e2972f2009-03-28 17:51:36 +00002570/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002571int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002572 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002573{
2574 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002575 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002576 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002577
2578 while (len > 0) {
2579 page = addr & TARGET_PAGE_MASK;
2580 phys_addr = cpu_get_phys_page_debug(env, page);
2581 /* if no physical page mapped, return an error */
2582 if (phys_addr == -1)
2583 return -1;
2584 l = (page + TARGET_PAGE_SIZE) - addr;
2585 if (l > len)
2586 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002587 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002588 if (is_write)
2589 cpu_physical_memory_write_rom(phys_addr, buf, l);
2590 else
aliguori5e2972f2009-03-28 17:51:36 +00002591 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002592 len -= l;
2593 buf += l;
2594 addr += l;
2595 }
2596 return 0;
2597}
Paul Brooka68fe892010-03-01 00:08:59 +00002598#endif
bellard13eb76e2004-01-24 15:23:36 +00002599
Blue Swirl8e4a4242013-01-06 18:30:17 +00002600#if !defined(CONFIG_USER_ONLY)
2601
2602/*
2603 * A helper function for the _utterly broken_ virtio device model to find out if
2604 * it's running on a big endian machine. Don't do this at home kids!
2605 */
2606bool virtio_is_big_endian(void);
2607bool virtio_is_big_endian(void)
2608{
2609#if defined(TARGET_WORDS_BIGENDIAN)
2610 return true;
2611#else
2612 return false;
2613#endif
2614}
2615
2616#endif
2617
Wen Congyang76f35532012-05-07 12:04:18 +08002618#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002619bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002620{
2621 MemoryRegionSection *section;
2622
Avi Kivityac1970f2012-10-03 16:22:53 +02002623 section = phys_page_find(address_space_memory.dispatch,
2624 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002625
2626 return !(memory_region_is_ram(section->mr) ||
2627 memory_region_is_romd(section->mr));
2628}
2629#endif