blob: 254ae620ce7a32cfb2d6c2e39ad90b46e800d8fb [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020070static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200190 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 goto not_found;
195 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200198 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200199
Avi Kivityc19e8802012-02-13 20:25:31 +0200200 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200201not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200202 return &phys_sections[s_index];
203}
204
Blue Swirle5548612012-04-21 13:08:33 +0000205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
210}
bellard9fa3e852004-01-04 18:06:42 +0000211#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000212
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200213void cpu_exec_init_all(void)
214{
215#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700216 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200217 memory_map_init();
218 io_mem_init();
219#endif
220}
221
Andreas Färberb170fce2013-01-20 20:23:22 +0100222#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000223
Juan Quintelae59fb372009-09-29 22:48:21 +0200224static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200225{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100226 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200227
aurel323098dba2009-03-07 21:28:24 +0000228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000231 tlb_flush(env, 1);
232
233 return 0;
234}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200235
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100243 VMSTATE_UINT32(halted, CPUArchState),
244 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200245 VMSTATE_END_OF_LIST()
246 }
247};
Andreas Färberb170fce2013-01-20 20:23:22 +0100248#else
249#define vmstate_cpu_common vmstate_dummy
pbrook9656f322008-07-01 20:01:19 +0000250#endif
251
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100252CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400253{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100254 CPUArchState *env = first_cpu;
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100255 CPUState *cpu = NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400256
257 while (env) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400260 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100261 }
Glauber Costa950f1472009-06-09 12:15:18 -0400262 env = env->next_cpu;
263 }
264
Igor Mammedovd76fdda2013-03-07 19:12:43 +0100265 return env ? cpu : NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400266}
267
Andreas Färber9349b4f2012-03-14 01:38:32 +0100268void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000269{
Andreas Färber9f09e182012-05-03 06:59:07 +0200270 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100271 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100272 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000273 int cpu_index;
274
pbrookc2764712009-03-07 15:24:59 +0000275#if defined(CONFIG_USER_ONLY)
276 cpu_list_lock();
277#endif
bellard6a00d602005-11-21 23:25:50 +0000278 env->next_cpu = NULL;
279 penv = &first_cpu;
280 cpu_index = 0;
281 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700282 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000283 cpu_index++;
284 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100285 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100286 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000287 QTAILQ_INIT(&env->breakpoints);
288 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100289#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200290 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100291#endif
bellard6a00d602005-11-21 23:25:50 +0000292 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000293#if defined(CONFIG_USER_ONLY)
294 cpu_list_unlock();
295#endif
Alex Williamson0be71e32010-06-25 11:09:07 -0600296 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100297#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600298 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000299 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100300 assert(cc->vmsd == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000301#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100302 if (cc->vmsd != NULL) {
303 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
304 }
bellardfd6ce8f2003-05-14 19:00:11 +0000305}
306
bellard1fddef42005-04-17 19:16:13 +0000307#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000308#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100309static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000310{
311 tb_invalidate_phys_page_range(pc, pc + 1, 0);
312}
313#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400314static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
315{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400316 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
317 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400318}
bellardc27004e2005-01-03 23:35:10 +0000319#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000320#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000321
Paul Brookc527ee82010-03-01 03:31:14 +0000322#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100323void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000324
325{
326}
327
Andreas Färber9349b4f2012-03-14 01:38:32 +0100328int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000329 int flags, CPUWatchpoint **watchpoint)
330{
331 return -ENOSYS;
332}
333#else
pbrook6658ffb2007-03-16 23:58:11 +0000334/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100335int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000336 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000337{
aliguorib4051332008-11-18 20:14:20 +0000338 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000339 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000340
aliguorib4051332008-11-18 20:14:20 +0000341 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400342 if ((len & (len - 1)) || (addr & ~len_mask) ||
343 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000344 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
345 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
346 return -EINVAL;
347 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500348 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000349
aliguoria1d1bb32008-11-18 20:07:32 +0000350 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000351 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000352 wp->flags = flags;
353
aliguori2dc9f412008-11-18 20:56:59 +0000354 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000355 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000356 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000357 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000358 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000359
pbrook6658ffb2007-03-16 23:58:11 +0000360 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000361
362 if (watchpoint)
363 *watchpoint = wp;
364 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000365}
366
aliguoria1d1bb32008-11-18 20:07:32 +0000367/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100368int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000369 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000370{
aliguorib4051332008-11-18 20:14:20 +0000371 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000372 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000373
Blue Swirl72cf2d42009-09-12 07:36:22 +0000374 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000375 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000376 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000377 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000378 return 0;
379 }
380 }
aliguoria1d1bb32008-11-18 20:07:32 +0000381 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000382}
383
aliguoria1d1bb32008-11-18 20:07:32 +0000384/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100385void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000386{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000387 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000388
aliguoria1d1bb32008-11-18 20:07:32 +0000389 tlb_flush_page(env, watchpoint->vaddr);
390
Anthony Liguori7267c092011-08-20 22:09:37 -0500391 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000392}
393
aliguoria1d1bb32008-11-18 20:07:32 +0000394/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100395void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000396{
aliguoric0ce9982008-11-25 22:13:57 +0000397 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000398
Blue Swirl72cf2d42009-09-12 07:36:22 +0000399 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000400 if (wp->flags & mask)
401 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000402 }
aliguoria1d1bb32008-11-18 20:07:32 +0000403}
Paul Brookc527ee82010-03-01 03:31:14 +0000404#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000405
406/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100407int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000408 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000409{
bellard1fddef42005-04-17 19:16:13 +0000410#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000411 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000412
Anthony Liguori7267c092011-08-20 22:09:37 -0500413 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000414
415 bp->pc = pc;
416 bp->flags = flags;
417
aliguori2dc9f412008-11-18 20:56:59 +0000418 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000419 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000420 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000421 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000422 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000423
424 breakpoint_invalidate(env, pc);
425
426 if (breakpoint)
427 *breakpoint = bp;
428 return 0;
429#else
430 return -ENOSYS;
431#endif
432}
433
434/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100435int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000436{
437#if defined(TARGET_HAS_ICE)
438 CPUBreakpoint *bp;
439
Blue Swirl72cf2d42009-09-12 07:36:22 +0000440 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000441 if (bp->pc == pc && bp->flags == flags) {
442 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000443 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000444 }
bellard4c3a88a2003-07-26 12:06:08 +0000445 }
aliguoria1d1bb32008-11-18 20:07:32 +0000446 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000447#else
aliguoria1d1bb32008-11-18 20:07:32 +0000448 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000449#endif
450}
451
aliguoria1d1bb32008-11-18 20:07:32 +0000452/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100453void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000454{
bellard1fddef42005-04-17 19:16:13 +0000455#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000456 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000457
aliguoria1d1bb32008-11-18 20:07:32 +0000458 breakpoint_invalidate(env, breakpoint->pc);
459
Anthony Liguori7267c092011-08-20 22:09:37 -0500460 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000461#endif
462}
463
464/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100465void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000466{
467#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000468 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000469
Blue Swirl72cf2d42009-09-12 07:36:22 +0000470 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000471 if (bp->flags & mask)
472 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000473 }
bellard4c3a88a2003-07-26 12:06:08 +0000474#endif
475}
476
bellardc33a3462003-07-29 20:50:33 +0000477/* enable or disable single step mode. EXCP_DEBUG is returned by the
478 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100479void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000480{
bellard1fddef42005-04-17 19:16:13 +0000481#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000482 if (env->singlestep_enabled != enabled) {
483 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000484 if (kvm_enabled())
485 kvm_update_guest_debug(env, 0);
486 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100487 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000488 /* XXX: only flush what is necessary */
489 tb_flush(env);
490 }
bellardc33a3462003-07-29 20:50:33 +0000491 }
492#endif
493}
494
Andreas Färber9349b4f2012-03-14 01:38:32 +0100495void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +0000496{
497 env->interrupt_request &= ~mask;
498}
499
Andreas Färber9349b4f2012-03-14 01:38:32 +0100500void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000501{
Andreas Färberfcd7d002012-12-17 08:02:44 +0100502 CPUState *cpu = ENV_GET_CPU(env);
503
504 cpu->exit_request = 1;
Peter Maydell378df4b2013-02-22 18:10:03 +0000505 cpu->tcg_exit_req = 1;
aurel323098dba2009-03-07 21:28:24 +0000506}
507
Andreas Färber9349b4f2012-03-14 01:38:32 +0100508void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000509{
510 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000511 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000512
513 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000514 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000515 fprintf(stderr, "qemu: fatal: ");
516 vfprintf(stderr, fmt, ap);
517 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100518 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000519 if (qemu_log_enabled()) {
520 qemu_log("qemu: fatal: ");
521 qemu_log_vprintf(fmt, ap2);
522 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100523 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000524 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000525 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000526 }
pbrook493ae1f2007-11-23 16:53:59 +0000527 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000528 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200529#if defined(CONFIG_USER_ONLY)
530 {
531 struct sigaction act;
532 sigfillset(&act.sa_mask);
533 act.sa_handler = SIG_DFL;
534 sigaction(SIGABRT, &act, NULL);
535 }
536#endif
bellard75012672003-06-21 13:11:07 +0000537 abort();
538}
539
Andreas Färber9349b4f2012-03-14 01:38:32 +0100540CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000541{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100542 CPUArchState *new_env = cpu_init(env->cpu_model_str);
543 CPUArchState *next_cpu = new_env->next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000544#if defined(TARGET_HAS_ICE)
545 CPUBreakpoint *bp;
546 CPUWatchpoint *wp;
547#endif
548
Andreas Färber9349b4f2012-03-14 01:38:32 +0100549 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000550
Andreas Färber55e5c282012-12-17 06:18:02 +0100551 /* Preserve chaining. */
thsc5be9f02007-02-28 20:20:53 +0000552 new_env->next_cpu = next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000553
554 /* Clone all break/watchpoints.
555 Note: Once we support ptrace with hw-debug register access, make sure
556 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000557 QTAILQ_INIT(&env->breakpoints);
558 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000559#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000560 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000561 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
562 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000563 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000564 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
565 wp->flags, NULL);
566 }
567#endif
568
thsc5be9f02007-02-28 20:20:53 +0000569 return new_env;
570}
571
bellard01243112004-01-04 15:48:17 +0000572#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200573static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
574 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000575{
Juan Quintelad24981d2012-05-22 00:42:40 +0200576 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000577
bellard1ccde1c2004-02-06 19:46:14 +0000578 /* we modify the TLB cache so that the dirty bit will be set again
579 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200580 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200581 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000582 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200583 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000584 != (end - 1) - start) {
585 abort();
586 }
Blue Swirle5548612012-04-21 13:08:33 +0000587 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200588
589}
590
591/* Note: start and end must be within the same ram block. */
592void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
593 int dirty_flags)
594{
595 uintptr_t length;
596
597 start &= TARGET_PAGE_MASK;
598 end = TARGET_PAGE_ALIGN(end);
599
600 length = end - start;
601 if (length == 0)
602 return;
603 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
604
605 if (tcg_enabled()) {
606 tlb_reset_dirty_range_all(start, end, length);
607 }
bellard1ccde1c2004-02-06 19:46:14 +0000608}
609
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000610static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000611{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200612 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000613 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200614 return ret;
aliguori74576192008-10-06 14:02:03 +0000615}
616
Avi Kivitya8170e52012-10-23 12:30:10 +0200617hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000618 MemoryRegionSection *section,
619 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200620 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000621 int prot,
622 target_ulong *address)
623{
Avi Kivitya8170e52012-10-23 12:30:10 +0200624 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000625 CPUWatchpoint *wp;
626
Blue Swirlcc5bea62012-04-14 14:56:48 +0000627 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000628 /* Normal RAM. */
629 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000630 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000631 if (!section->readonly) {
632 iotlb |= phys_section_notdirty;
633 } else {
634 iotlb |= phys_section_rom;
635 }
636 } else {
637 /* IO handlers are currently passed a physical address.
638 It would be nice to pass an offset from the base address
639 of that region. This would avoid having to special case RAM,
640 and avoid full address decoding in every device.
641 We can't use the high bits of pd for this because
642 IO_MEM_ROMD uses these as a ram address. */
643 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000644 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000645 }
646
647 /* Make accesses to pages with watchpoints go via the
648 watchpoint trap routines. */
649 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
650 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
651 /* Avoid trapping reads of pages with a write breakpoint. */
652 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
653 iotlb = phys_section_watch + paddr;
654 *address |= TLB_MMIO;
655 break;
656 }
657 }
658 }
659
660 return iotlb;
661}
bellard9fa3e852004-01-04 18:06:42 +0000662#endif /* defined(CONFIG_USER_ONLY) */
663
pbrooke2eef172008-06-08 01:09:01 +0000664#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000665
Paul Brookc04b2b72010-03-01 03:31:14 +0000666#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
667typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200668 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200669 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200670 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000671} subpage_t;
672
Anthony Liguoric227f092009-10-01 16:12:16 -0500673static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200674 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200675static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200676static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200677{
Avi Kivity5312bd82012-02-12 18:32:55 +0200678 MemoryRegionSection *section = &phys_sections[section_index];
679 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200680
681 if (mr->subpage) {
682 subpage_t *subpage = container_of(mr, subpage_t, iomem);
683 memory_region_destroy(&subpage->iomem);
684 g_free(subpage);
685 }
686}
687
Avi Kivity4346ae32012-02-10 17:00:01 +0200688static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200689{
690 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200691 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200692
Avi Kivityc19e8802012-02-13 20:25:31 +0200693 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200694 return;
695 }
696
Avi Kivityc19e8802012-02-13 20:25:31 +0200697 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200698 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200699 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200700 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200701 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200702 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200703 }
Avi Kivity54688b12012-02-09 17:34:32 +0200704 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200705 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200706 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200707}
708
Avi Kivityac1970f2012-10-03 16:22:53 +0200709static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200710{
Avi Kivityac1970f2012-10-03 16:22:53 +0200711 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200712 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200713}
714
Avi Kivity5312bd82012-02-12 18:32:55 +0200715static uint16_t phys_section_add(MemoryRegionSection *section)
716{
717 if (phys_sections_nb == phys_sections_nb_alloc) {
718 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
719 phys_sections = g_renew(MemoryRegionSection, phys_sections,
720 phys_sections_nb_alloc);
721 }
722 phys_sections[phys_sections_nb] = *section;
723 return phys_sections_nb++;
724}
725
726static void phys_sections_clear(void)
727{
728 phys_sections_nb = 0;
729}
730
Avi Kivityac1970f2012-10-03 16:22:53 +0200731static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200732{
733 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200734 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200735 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200736 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200737 MemoryRegionSection subsection = {
738 .offset_within_address_space = base,
739 .size = TARGET_PAGE_SIZE,
740 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200741 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200742
Avi Kivityf3705d52012-03-08 16:16:34 +0200743 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200744
Avi Kivityf3705d52012-03-08 16:16:34 +0200745 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200746 subpage = subpage_init(base);
747 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200748 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200749 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200750 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200751 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200752 }
753 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400754 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200755 subpage_register(subpage, start, end, phys_section_add(section));
756}
757
758
Avi Kivityac1970f2012-10-03 16:22:53 +0200759static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000760{
Avi Kivitya8170e52012-10-23 12:30:10 +0200761 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200762 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200763 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200764 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200765
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200766 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200767
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200768 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200769 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200770 section_index);
bellard33417e72003-08-10 21:47:01 +0000771}
772
Avi Kivityac1970f2012-10-03 16:22:53 +0200773static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200774{
Avi Kivityac1970f2012-10-03 16:22:53 +0200775 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200776 MemoryRegionSection now = *section, remain = *section;
777
778 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
779 || (now.size < TARGET_PAGE_SIZE)) {
780 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
781 - now.offset_within_address_space,
782 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200783 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200784 remain.size -= now.size;
785 remain.offset_within_address_space += now.size;
786 remain.offset_within_region += now.size;
787 }
Tyler Hall69b67642012-07-25 18:45:04 -0400788 while (remain.size >= TARGET_PAGE_SIZE) {
789 now = remain;
790 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
791 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200792 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400793 } else {
794 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200795 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400796 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200797 remain.size -= now.size;
798 remain.offset_within_address_space += now.size;
799 remain.offset_within_region += now.size;
800 }
801 now = remain;
802 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200803 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200804 }
805}
806
Sheng Yang62a27442010-01-26 19:21:16 +0800807void qemu_flush_coalesced_mmio_buffer(void)
808{
809 if (kvm_enabled())
810 kvm_flush_coalesced_mmio_buffer();
811}
812
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700813void qemu_mutex_lock_ramlist(void)
814{
815 qemu_mutex_lock(&ram_list.mutex);
816}
817
818void qemu_mutex_unlock_ramlist(void)
819{
820 qemu_mutex_unlock(&ram_list.mutex);
821}
822
Marcelo Tosattic9027602010-03-01 20:25:08 -0300823#if defined(__linux__) && !defined(TARGET_S390X)
824
825#include <sys/vfs.h>
826
827#define HUGETLBFS_MAGIC 0x958458f6
828
829static long gethugepagesize(const char *path)
830{
831 struct statfs fs;
832 int ret;
833
834 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900835 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300836 } while (ret != 0 && errno == EINTR);
837
838 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900839 perror(path);
840 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300841 }
842
843 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900844 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300845
846 return fs.f_bsize;
847}
848
Alex Williamson04b16652010-07-02 11:13:17 -0600849static void *file_ram_alloc(RAMBlock *block,
850 ram_addr_t memory,
851 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300852{
853 char *filename;
854 void *area;
855 int fd;
856#ifdef MAP_POPULATE
857 int flags;
858#endif
859 unsigned long hpagesize;
860
861 hpagesize = gethugepagesize(path);
862 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900863 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300864 }
865
866 if (memory < hpagesize) {
867 return NULL;
868 }
869
870 if (kvm_enabled() && !kvm_has_sync_mmu()) {
871 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
872 return NULL;
873 }
874
Stefan Weile4ada482013-01-16 18:37:23 +0100875 filename = g_strdup_printf("%s/qemu_back_mem.XXXXXX", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300876
877 fd = mkstemp(filename);
878 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900879 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100880 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900881 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300882 }
883 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100884 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300885
886 memory = (memory+hpagesize-1) & ~(hpagesize-1);
887
888 /*
889 * ftruncate is not supported by hugetlbfs in older
890 * hosts, so don't bother bailing out on errors.
891 * If anything goes wrong with it under other filesystems,
892 * mmap will fail.
893 */
894 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900895 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300896
897#ifdef MAP_POPULATE
898 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
899 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
900 * to sidestep this quirk.
901 */
902 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
903 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
904#else
905 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
906#endif
907 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900908 perror("file_ram_alloc: can't mmap RAM pages");
909 close(fd);
910 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300911 }
Alex Williamson04b16652010-07-02 11:13:17 -0600912 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300913 return area;
914}
915#endif
916
Alex Williamsond17b5282010-06-25 11:08:38 -0600917static ram_addr_t find_ram_offset(ram_addr_t size)
918{
Alex Williamson04b16652010-07-02 11:13:17 -0600919 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600920 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600921
Paolo Bonzinia3161032012-11-14 15:54:48 +0100922 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600923 return 0;
924
Paolo Bonzinia3161032012-11-14 15:54:48 +0100925 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000926 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600927
928 end = block->offset + block->length;
929
Paolo Bonzinia3161032012-11-14 15:54:48 +0100930 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600931 if (next_block->offset >= end) {
932 next = MIN(next, next_block->offset);
933 }
934 }
935 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600936 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600937 mingap = next - end;
938 }
939 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600940
941 if (offset == RAM_ADDR_MAX) {
942 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
943 (uint64_t)size);
944 abort();
945 }
946
Alex Williamson04b16652010-07-02 11:13:17 -0600947 return offset;
948}
949
Juan Quintela652d7ec2012-07-20 10:37:54 +0200950ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600951{
Alex Williamsond17b5282010-06-25 11:08:38 -0600952 RAMBlock *block;
953 ram_addr_t last = 0;
954
Paolo Bonzinia3161032012-11-14 15:54:48 +0100955 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600956 last = MAX(last, block->offset + block->length);
957
958 return last;
959}
960
Jason Baronddb97f12012-08-02 15:44:16 -0400961static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
962{
963 int ret;
964 QemuOpts *machine_opts;
965
966 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
967 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
968 if (machine_opts &&
969 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
970 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
971 if (ret) {
972 perror("qemu_madvise");
973 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
974 "but dump_guest_core=off specified\n");
975 }
976 }
977}
978
Avi Kivityc5705a72011-12-20 15:59:12 +0200979void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -0600980{
981 RAMBlock *new_block, *block;
982
Avi Kivityc5705a72011-12-20 15:59:12 +0200983 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +0100984 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200985 if (block->offset == addr) {
986 new_block = block;
987 break;
988 }
989 }
990 assert(new_block);
991 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600992
Anthony Liguori09e5ab62012-02-03 12:28:43 -0600993 if (dev) {
994 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600995 if (id) {
996 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -0500997 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600998 }
999 }
1000 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1001
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001002 /* This assumes the iothread lock is taken here too. */
1003 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001004 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001005 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001006 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1007 new_block->idstr);
1008 abort();
1009 }
1010 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001011 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001012}
1013
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001014static int memory_try_enable_merging(void *addr, size_t len)
1015{
1016 QemuOpts *opts;
1017
1018 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1019 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1020 /* disabled by the user */
1021 return 0;
1022 }
1023
1024 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1025}
1026
Avi Kivityc5705a72011-12-20 15:59:12 +02001027ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1028 MemoryRegion *mr)
1029{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001030 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001031
1032 size = TARGET_PAGE_ALIGN(size);
1033 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001034
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001035 /* This assumes the iothread lock is taken here too. */
1036 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001037 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001038 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001039 if (host) {
1040 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001041 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001042 } else {
1043 if (mem_path) {
1044#if defined (__linux__) && !defined(TARGET_S390X)
1045 new_block->host = file_ram_alloc(new_block, size, mem_path);
1046 if (!new_block->host) {
1047 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001048 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001049 }
1050#else
1051 fprintf(stderr, "-mem-path option unsupported\n");
1052 exit(1);
1053#endif
1054 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001055 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001056 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001057 } else if (kvm_enabled()) {
1058 /* some s390/kvm configurations have special constraints */
1059 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001060 } else {
1061 new_block->host = qemu_vmalloc(size);
1062 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001063 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001064 }
1065 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001066 new_block->length = size;
1067
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001068 /* Keep the list sorted from biggest to smallest block. */
1069 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1070 if (block->length < new_block->length) {
1071 break;
1072 }
1073 }
1074 if (block) {
1075 QTAILQ_INSERT_BEFORE(block, new_block, next);
1076 } else {
1077 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1078 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001079 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001080
Umesh Deshpandef798b072011-08-18 11:41:17 -07001081 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001082 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001083
Anthony Liguori7267c092011-08-20 22:09:37 -05001084 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001085 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001086 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1087 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001088 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001089
Jason Baronddb97f12012-08-02 15:44:16 -04001090 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001091 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001092
Cam Macdonell84b89d72010-07-26 18:10:57 -06001093 if (kvm_enabled())
1094 kvm_setup_guest_memory(new_block->host, size);
1095
1096 return new_block->offset;
1097}
1098
Avi Kivityc5705a72011-12-20 15:59:12 +02001099ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001100{
Avi Kivityc5705a72011-12-20 15:59:12 +02001101 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001102}
bellarde9a1ab12007-02-08 23:08:38 +00001103
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001104void qemu_ram_free_from_ptr(ram_addr_t addr)
1105{
1106 RAMBlock *block;
1107
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001108 /* This assumes the iothread lock is taken here too. */
1109 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001110 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001111 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001112 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001113 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001114 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001115 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001116 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001117 }
1118 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001119 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001120}
1121
Anthony Liguoric227f092009-10-01 16:12:16 -05001122void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001123{
Alex Williamson04b16652010-07-02 11:13:17 -06001124 RAMBlock *block;
1125
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001126 /* This assumes the iothread lock is taken here too. */
1127 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001128 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001129 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001130 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001131 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001132 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001133 if (block->flags & RAM_PREALLOC_MASK) {
1134 ;
1135 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001136#if defined (__linux__) && !defined(TARGET_S390X)
1137 if (block->fd) {
1138 munmap(block->host, block->length);
1139 close(block->fd);
1140 } else {
1141 qemu_vfree(block->host);
1142 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001143#else
1144 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001145#endif
1146 } else {
1147#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1148 munmap(block->host, block->length);
1149#else
Jan Kiszka868bb332011-06-21 22:59:09 +02001150 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001151 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001152 } else {
1153 qemu_vfree(block->host);
1154 }
Alex Williamson04b16652010-07-02 11:13:17 -06001155#endif
1156 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001157 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001158 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001159 }
1160 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001161 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001162
bellarde9a1ab12007-02-08 23:08:38 +00001163}
1164
Huang Yingcd19cfa2011-03-02 08:56:19 +01001165#ifndef _WIN32
1166void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1167{
1168 RAMBlock *block;
1169 ram_addr_t offset;
1170 int flags;
1171 void *area, *vaddr;
1172
Paolo Bonzinia3161032012-11-14 15:54:48 +01001173 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001174 offset = addr - block->offset;
1175 if (offset < block->length) {
1176 vaddr = block->host + offset;
1177 if (block->flags & RAM_PREALLOC_MASK) {
1178 ;
1179 } else {
1180 flags = MAP_FIXED;
1181 munmap(vaddr, length);
1182 if (mem_path) {
1183#if defined(__linux__) && !defined(TARGET_S390X)
1184 if (block->fd) {
1185#ifdef MAP_POPULATE
1186 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1187 MAP_PRIVATE;
1188#else
1189 flags |= MAP_PRIVATE;
1190#endif
1191 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1192 flags, block->fd, offset);
1193 } else {
1194 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1195 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1196 flags, -1, 0);
1197 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001198#else
1199 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001200#endif
1201 } else {
1202#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1203 flags |= MAP_SHARED | MAP_ANONYMOUS;
1204 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1205 flags, -1, 0);
1206#else
1207 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1208 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1209 flags, -1, 0);
1210#endif
1211 }
1212 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001213 fprintf(stderr, "Could not remap addr: "
1214 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001215 length, addr);
1216 exit(1);
1217 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001218 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001219 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001220 }
1221 return;
1222 }
1223 }
1224}
1225#endif /* !_WIN32 */
1226
pbrookdc828ca2009-04-09 22:21:07 +00001227/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001228 With the exception of the softmmu code in this file, this should
1229 only be used for local memory (e.g. video ram) that the device owns,
1230 and knows it isn't going to access beyond the end of the block.
1231
1232 It should not be used for general purpose DMA.
1233 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1234 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001235void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001236{
pbrook94a6b542009-04-11 17:15:54 +00001237 RAMBlock *block;
1238
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001239 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001240 block = ram_list.mru_block;
1241 if (block && addr - block->offset < block->length) {
1242 goto found;
1243 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001244 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001245 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001246 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001247 }
pbrook94a6b542009-04-11 17:15:54 +00001248 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001249
1250 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1251 abort();
1252
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001253found:
1254 ram_list.mru_block = block;
1255 if (xen_enabled()) {
1256 /* We need to check if the requested address is in the RAM
1257 * because we don't want to map the entire memory in QEMU.
1258 * In that case just map until the end of the page.
1259 */
1260 if (block->offset == 0) {
1261 return xen_map_cache(addr, 0, 0);
1262 } else if (block->host == NULL) {
1263 block->host =
1264 xen_map_cache(block->offset, block->length, 1);
1265 }
1266 }
1267 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001268}
1269
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001270/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1271 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1272 *
1273 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001274 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001275static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001276{
1277 RAMBlock *block;
1278
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001279 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001280 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001281 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001282 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001283 /* We need to check if the requested address is in the RAM
1284 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001285 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001286 */
1287 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001288 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001289 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001290 block->host =
1291 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001292 }
1293 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001294 return block->host + (addr - block->offset);
1295 }
1296 }
1297
1298 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1299 abort();
1300
1301 return NULL;
1302}
1303
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001304/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1305 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001306static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001307{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001308 if (*size == 0) {
1309 return NULL;
1310 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001311 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001312 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001313 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001314 RAMBlock *block;
1315
Paolo Bonzinia3161032012-11-14 15:54:48 +01001316 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001317 if (addr - block->offset < block->length) {
1318 if (addr - block->offset + *size > block->length)
1319 *size = block->length - addr + block->offset;
1320 return block->host + (addr - block->offset);
1321 }
1322 }
1323
1324 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1325 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001326 }
1327}
1328
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001329void qemu_put_ram_ptr(void *addr)
1330{
1331 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001332}
1333
Marcelo Tosattie8902612010-10-11 15:31:19 -03001334int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001335{
pbrook94a6b542009-04-11 17:15:54 +00001336 RAMBlock *block;
1337 uint8_t *host = ptr;
1338
Jan Kiszka868bb332011-06-21 22:59:09 +02001339 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001340 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001341 return 0;
1342 }
1343
Paolo Bonzinia3161032012-11-14 15:54:48 +01001344 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001345 /* This case append when the block is not mapped. */
1346 if (block->host == NULL) {
1347 continue;
1348 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001349 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001350 *ram_addr = block->offset + (host - block->host);
1351 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001352 }
pbrook94a6b542009-04-11 17:15:54 +00001353 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001354
Marcelo Tosattie8902612010-10-11 15:31:19 -03001355 return -1;
1356}
Alex Williamsonf471a172010-06-11 11:11:42 -06001357
Marcelo Tosattie8902612010-10-11 15:31:19 -03001358/* Some of the softmmu routines need to translate from a host pointer
1359 (typically a TLB entry) back to a ram offset. */
1360ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1361{
1362 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001363
Marcelo Tosattie8902612010-10-11 15:31:19 -03001364 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1365 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1366 abort();
1367 }
1368 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001369}
1370
Avi Kivitya8170e52012-10-23 12:30:10 +02001371static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001372 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001373{
pbrook67d3b952006-12-18 05:03:52 +00001374#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001375 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001376#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001377#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001378 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001379#endif
1380 return 0;
1381}
1382
Avi Kivitya8170e52012-10-23 12:30:10 +02001383static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001384 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001385{
1386#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001387 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001388#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001389#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001390 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001391#endif
1392}
1393
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001394static const MemoryRegionOps unassigned_mem_ops = {
1395 .read = unassigned_mem_read,
1396 .write = unassigned_mem_write,
1397 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001398};
1399
Avi Kivitya8170e52012-10-23 12:30:10 +02001400static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001401 unsigned size)
1402{
1403 abort();
1404}
1405
Avi Kivitya8170e52012-10-23 12:30:10 +02001406static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001407 uint64_t value, unsigned size)
1408{
1409 abort();
1410}
1411
1412static const MemoryRegionOps error_mem_ops = {
1413 .read = error_mem_read,
1414 .write = error_mem_write,
1415 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001416};
1417
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001418static const MemoryRegionOps rom_mem_ops = {
1419 .read = error_mem_read,
1420 .write = unassigned_mem_write,
1421 .endianness = DEVICE_NATIVE_ENDIAN,
1422};
1423
Avi Kivitya8170e52012-10-23 12:30:10 +02001424static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001425 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001426{
bellard3a7d9292005-08-21 09:26:42 +00001427 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001428 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001429 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1430#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001431 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001432 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001433#endif
1434 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001435 switch (size) {
1436 case 1:
1437 stb_p(qemu_get_ram_ptr(ram_addr), val);
1438 break;
1439 case 2:
1440 stw_p(qemu_get_ram_ptr(ram_addr), val);
1441 break;
1442 case 4:
1443 stl_p(qemu_get_ram_ptr(ram_addr), val);
1444 break;
1445 default:
1446 abort();
1447 }
bellardf23db162005-08-21 19:12:28 +00001448 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001449 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001450 /* we remove the notdirty callback only if the code has been
1451 flushed */
1452 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001453 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001454}
1455
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001456static const MemoryRegionOps notdirty_mem_ops = {
1457 .read = error_mem_read,
1458 .write = notdirty_mem_write,
1459 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001460};
1461
pbrook0f459d12008-06-09 00:20:13 +00001462/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001463static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001464{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001465 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001466 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001467 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001468 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001469 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001470
aliguori06d55cc2008-11-18 20:24:06 +00001471 if (env->watchpoint_hit) {
1472 /* We re-entered the check after replacing the TB. Now raise
1473 * the debug interrupt so that is will trigger after the
1474 * current instruction. */
1475 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1476 return;
1477 }
pbrook2e70f6e2008-06-29 01:03:05 +00001478 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001479 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001480 if ((vaddr == (wp->vaddr & len_mask) ||
1481 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001482 wp->flags |= BP_WATCHPOINT_HIT;
1483 if (!env->watchpoint_hit) {
1484 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001485 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001486 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1487 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001488 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001489 } else {
1490 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1491 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001492 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001493 }
aliguori06d55cc2008-11-18 20:24:06 +00001494 }
aliguori6e140f22008-11-18 20:37:55 +00001495 } else {
1496 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001497 }
1498 }
1499}
1500
pbrook6658ffb2007-03-16 23:58:11 +00001501/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1502 so these check for a hit then pass through to the normal out-of-line
1503 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001504static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001505 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001506{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001507 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1508 switch (size) {
1509 case 1: return ldub_phys(addr);
1510 case 2: return lduw_phys(addr);
1511 case 4: return ldl_phys(addr);
1512 default: abort();
1513 }
pbrook6658ffb2007-03-16 23:58:11 +00001514}
1515
Avi Kivitya8170e52012-10-23 12:30:10 +02001516static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001517 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001518{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001519 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1520 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001521 case 1:
1522 stb_phys(addr, val);
1523 break;
1524 case 2:
1525 stw_phys(addr, val);
1526 break;
1527 case 4:
1528 stl_phys(addr, val);
1529 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001530 default: abort();
1531 }
pbrook6658ffb2007-03-16 23:58:11 +00001532}
1533
Avi Kivity1ec9b902012-01-02 12:47:48 +02001534static const MemoryRegionOps watch_mem_ops = {
1535 .read = watch_mem_read,
1536 .write = watch_mem_write,
1537 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001538};
pbrook6658ffb2007-03-16 23:58:11 +00001539
Avi Kivitya8170e52012-10-23 12:30:10 +02001540static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001541 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001542{
Avi Kivity70c68e42012-01-02 12:32:48 +02001543 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001544 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001545 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001546#if defined(DEBUG_SUBPAGE)
1547 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1548 mmio, len, addr, idx);
1549#endif
blueswir1db7b5422007-05-26 17:36:03 +00001550
Avi Kivity5312bd82012-02-12 18:32:55 +02001551 section = &phys_sections[mmio->sub_section[idx]];
1552 addr += mmio->base;
1553 addr -= section->offset_within_address_space;
1554 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001555 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001556}
1557
Avi Kivitya8170e52012-10-23 12:30:10 +02001558static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001559 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001560{
Avi Kivity70c68e42012-01-02 12:32:48 +02001561 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001562 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001563 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001564#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001565 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1566 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001567 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001568#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001569
Avi Kivity5312bd82012-02-12 18:32:55 +02001570 section = &phys_sections[mmio->sub_section[idx]];
1571 addr += mmio->base;
1572 addr -= section->offset_within_address_space;
1573 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001574 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001575}
1576
Avi Kivity70c68e42012-01-02 12:32:48 +02001577static const MemoryRegionOps subpage_ops = {
1578 .read = subpage_read,
1579 .write = subpage_write,
1580 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001581};
1582
Avi Kivitya8170e52012-10-23 12:30:10 +02001583static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001584 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001585{
1586 ram_addr_t raddr = addr;
1587 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001588 switch (size) {
1589 case 1: return ldub_p(ptr);
1590 case 2: return lduw_p(ptr);
1591 case 4: return ldl_p(ptr);
1592 default: abort();
1593 }
Andreas Färber56384e82011-11-30 16:26:21 +01001594}
1595
Avi Kivitya8170e52012-10-23 12:30:10 +02001596static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001597 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001598{
1599 ram_addr_t raddr = addr;
1600 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001601 switch (size) {
1602 case 1: return stb_p(ptr, value);
1603 case 2: return stw_p(ptr, value);
1604 case 4: return stl_p(ptr, value);
1605 default: abort();
1606 }
Andreas Färber56384e82011-11-30 16:26:21 +01001607}
1608
Avi Kivityde712f92012-01-02 12:41:07 +02001609static const MemoryRegionOps subpage_ram_ops = {
1610 .read = subpage_ram_read,
1611 .write = subpage_ram_write,
1612 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001613};
1614
Anthony Liguoric227f092009-10-01 16:12:16 -05001615static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001616 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001617{
1618 int idx, eidx;
1619
1620 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1621 return -1;
1622 idx = SUBPAGE_IDX(start);
1623 eidx = SUBPAGE_IDX(end);
1624#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001625 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001626 mmio, start, end, idx, eidx, memory);
1627#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001628 if (memory_region_is_ram(phys_sections[section].mr)) {
1629 MemoryRegionSection new_section = phys_sections[section];
1630 new_section.mr = &io_mem_subpage_ram;
1631 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001632 }
blueswir1db7b5422007-05-26 17:36:03 +00001633 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001634 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001635 }
1636
1637 return 0;
1638}
1639
Avi Kivitya8170e52012-10-23 12:30:10 +02001640static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001641{
Anthony Liguoric227f092009-10-01 16:12:16 -05001642 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001643
Anthony Liguori7267c092011-08-20 22:09:37 -05001644 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001645
1646 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001647 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1648 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001649 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001650#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001651 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1652 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001653#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001654 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001655
1656 return mmio;
1657}
1658
Avi Kivity5312bd82012-02-12 18:32:55 +02001659static uint16_t dummy_section(MemoryRegion *mr)
1660{
1661 MemoryRegionSection section = {
1662 .mr = mr,
1663 .offset_within_address_space = 0,
1664 .offset_within_region = 0,
1665 .size = UINT64_MAX,
1666 };
1667
1668 return phys_section_add(&section);
1669}
1670
Avi Kivitya8170e52012-10-23 12:30:10 +02001671MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001672{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001673 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001674}
1675
Avi Kivitye9179ce2009-06-14 11:38:52 +03001676static void io_mem_init(void)
1677{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001678 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001679 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1680 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1681 "unassigned", UINT64_MAX);
1682 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1683 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001684 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1685 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001686 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1687 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001688}
1689
Avi Kivityac1970f2012-10-03 16:22:53 +02001690static void mem_begin(MemoryListener *listener)
1691{
1692 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1693
1694 destroy_all_mappings(d);
1695 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1696}
1697
Avi Kivity50c1e142012-02-08 21:36:02 +02001698static void core_begin(MemoryListener *listener)
1699{
Avi Kivity5312bd82012-02-12 18:32:55 +02001700 phys_sections_clear();
1701 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001702 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1703 phys_section_rom = dummy_section(&io_mem_rom);
1704 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001705}
1706
Avi Kivity1d711482012-10-02 18:54:45 +02001707static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001708{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001709 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001710
1711 /* since each CPU stores ram addresses in its TLB cache, we must
1712 reset the modified entries */
1713 /* XXX: slow ! */
1714 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1715 tlb_flush(env, 1);
1716 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001717}
1718
Avi Kivity93632742012-02-08 16:54:16 +02001719static void core_log_global_start(MemoryListener *listener)
1720{
1721 cpu_physical_memory_set_dirty_tracking(1);
1722}
1723
1724static void core_log_global_stop(MemoryListener *listener)
1725{
1726 cpu_physical_memory_set_dirty_tracking(0);
1727}
1728
Avi Kivity4855d412012-02-08 21:16:05 +02001729static void io_region_add(MemoryListener *listener,
1730 MemoryRegionSection *section)
1731{
Avi Kivitya2d33522012-03-05 17:40:12 +02001732 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1733
1734 mrio->mr = section->mr;
1735 mrio->offset = section->offset_within_region;
1736 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001737 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001738 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001739}
1740
1741static void io_region_del(MemoryListener *listener,
1742 MemoryRegionSection *section)
1743{
1744 isa_unassign_ioport(section->offset_within_address_space, section->size);
1745}
1746
Avi Kivity93632742012-02-08 16:54:16 +02001747static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001748 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001749 .log_global_start = core_log_global_start,
1750 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001751 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001752};
1753
Avi Kivity4855d412012-02-08 21:16:05 +02001754static MemoryListener io_memory_listener = {
1755 .region_add = io_region_add,
1756 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001757 .priority = 0,
1758};
1759
Avi Kivity1d711482012-10-02 18:54:45 +02001760static MemoryListener tcg_memory_listener = {
1761 .commit = tcg_commit,
1762};
1763
Avi Kivityac1970f2012-10-03 16:22:53 +02001764void address_space_init_dispatch(AddressSpace *as)
1765{
1766 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1767
1768 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1769 d->listener = (MemoryListener) {
1770 .begin = mem_begin,
1771 .region_add = mem_add,
1772 .region_nop = mem_add,
1773 .priority = 0,
1774 };
1775 as->dispatch = d;
1776 memory_listener_register(&d->listener, as);
1777}
1778
Avi Kivity83f3c252012-10-07 12:59:55 +02001779void address_space_destroy_dispatch(AddressSpace *as)
1780{
1781 AddressSpaceDispatch *d = as->dispatch;
1782
1783 memory_listener_unregister(&d->listener);
1784 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1785 g_free(d);
1786 as->dispatch = NULL;
1787}
1788
Avi Kivity62152b82011-07-26 14:26:14 +03001789static void memory_map_init(void)
1790{
Anthony Liguori7267c092011-08-20 22:09:37 -05001791 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001792 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001793 address_space_init(&address_space_memory, system_memory);
1794 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001795
Anthony Liguori7267c092011-08-20 22:09:37 -05001796 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001797 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001798 address_space_init(&address_space_io, system_io);
1799 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001800
Avi Kivityf6790af2012-10-02 20:13:51 +02001801 memory_listener_register(&core_memory_listener, &address_space_memory);
1802 memory_listener_register(&io_memory_listener, &address_space_io);
1803 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001804
1805 dma_context_init(&dma_context_memory, &address_space_memory,
1806 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001807}
1808
1809MemoryRegion *get_system_memory(void)
1810{
1811 return system_memory;
1812}
1813
Avi Kivity309cb472011-08-08 16:09:03 +03001814MemoryRegion *get_system_io(void)
1815{
1816 return system_io;
1817}
1818
pbrooke2eef172008-06-08 01:09:01 +00001819#endif /* !defined(CONFIG_USER_ONLY) */
1820
bellard13eb76e2004-01-24 15:23:36 +00001821/* physical memory access (slow version, mainly for debug) */
1822#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001823int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001824 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001825{
1826 int l, flags;
1827 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001828 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001829
1830 while (len > 0) {
1831 page = addr & TARGET_PAGE_MASK;
1832 l = (page + TARGET_PAGE_SIZE) - addr;
1833 if (l > len)
1834 l = len;
1835 flags = page_get_flags(page);
1836 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001837 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001838 if (is_write) {
1839 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001840 return -1;
bellard579a97f2007-11-11 14:26:47 +00001841 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001842 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001843 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001844 memcpy(p, buf, l);
1845 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001846 } else {
1847 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001848 return -1;
bellard579a97f2007-11-11 14:26:47 +00001849 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001850 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001851 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001852 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001853 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001854 }
1855 len -= l;
1856 buf += l;
1857 addr += l;
1858 }
Paul Brooka68fe892010-03-01 00:08:59 +00001859 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001860}
bellard8df1cd02005-01-28 22:37:22 +00001861
bellard13eb76e2004-01-24 15:23:36 +00001862#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001863
Avi Kivitya8170e52012-10-23 12:30:10 +02001864static void invalidate_and_set_dirty(hwaddr addr,
1865 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001866{
1867 if (!cpu_physical_memory_is_dirty(addr)) {
1868 /* invalidate code */
1869 tb_invalidate_phys_page_range(addr, addr + length, 0);
1870 /* set dirty bit */
1871 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1872 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001873 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001874}
1875
Avi Kivitya8170e52012-10-23 12:30:10 +02001876void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001877 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001878{
Avi Kivityac1970f2012-10-03 16:22:53 +02001879 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001880 int l;
bellard13eb76e2004-01-24 15:23:36 +00001881 uint8_t *ptr;
1882 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001883 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001884 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001885
bellard13eb76e2004-01-24 15:23:36 +00001886 while (len > 0) {
1887 page = addr & TARGET_PAGE_MASK;
1888 l = (page + TARGET_PAGE_SIZE) - addr;
1889 if (l > len)
1890 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001891 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001892
bellard13eb76e2004-01-24 15:23:36 +00001893 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001894 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001895 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001896 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001897 /* XXX: could force cpu_single_env to NULL to avoid
1898 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001899 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001900 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001901 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001902 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001903 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001904 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001905 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001906 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001907 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001908 l = 2;
1909 } else {
bellard1c213d12005-09-03 10:49:04 +00001910 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001911 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001912 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001913 l = 1;
1914 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001915 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001916 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001917 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001918 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001919 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001920 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001921 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001922 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001923 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001924 }
1925 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001926 if (!(memory_region_is_ram(section->mr) ||
1927 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001928 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001929 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001930 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001931 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001932 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001933 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001934 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001935 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001936 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001937 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001938 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001939 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001940 l = 2;
1941 } else {
bellard1c213d12005-09-03 10:49:04 +00001942 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001943 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001944 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001945 l = 1;
1946 }
1947 } else {
1948 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001949 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001950 + memory_region_section_addr(section,
1951 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001952 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001953 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001954 }
1955 }
1956 len -= l;
1957 buf += l;
1958 addr += l;
1959 }
1960}
bellard8df1cd02005-01-28 22:37:22 +00001961
Avi Kivitya8170e52012-10-23 12:30:10 +02001962void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001963 const uint8_t *buf, int len)
1964{
1965 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1966}
1967
1968/**
1969 * address_space_read: read from an address space.
1970 *
1971 * @as: #AddressSpace to be accessed
1972 * @addr: address within that address space
1973 * @buf: buffer with the data transferred
1974 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001975void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001976{
1977 address_space_rw(as, addr, buf, len, false);
1978}
1979
1980
Avi Kivitya8170e52012-10-23 12:30:10 +02001981void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001982 int len, int is_write)
1983{
1984 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1985}
1986
bellardd0ecd2a2006-04-23 17:14:48 +00001987/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001988void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001989 const uint8_t *buf, int len)
1990{
Avi Kivityac1970f2012-10-03 16:22:53 +02001991 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00001992 int l;
1993 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02001994 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001995 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001996
bellardd0ecd2a2006-04-23 17:14:48 +00001997 while (len > 0) {
1998 page = addr & TARGET_PAGE_MASK;
1999 l = (page + TARGET_PAGE_SIZE) - addr;
2000 if (l > len)
2001 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002002 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002003
Blue Swirlcc5bea62012-04-14 14:56:48 +00002004 if (!(memory_region_is_ram(section->mr) ||
2005 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002006 /* do nothing */
2007 } else {
2008 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002009 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002010 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00002011 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002012 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002013 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002014 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002015 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00002016 }
2017 len -= l;
2018 buf += l;
2019 addr += l;
2020 }
2021}
2022
aliguori6d16c2f2009-01-22 16:59:11 +00002023typedef struct {
2024 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002025 hwaddr addr;
2026 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002027} BounceBuffer;
2028
2029static BounceBuffer bounce;
2030
aliguoriba223c22009-01-22 16:59:16 +00002031typedef struct MapClient {
2032 void *opaque;
2033 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002034 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002035} MapClient;
2036
Blue Swirl72cf2d42009-09-12 07:36:22 +00002037static QLIST_HEAD(map_client_list, MapClient) map_client_list
2038 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002039
2040void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2041{
Anthony Liguori7267c092011-08-20 22:09:37 -05002042 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002043
2044 client->opaque = opaque;
2045 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002046 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002047 return client;
2048}
2049
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002050static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002051{
2052 MapClient *client = (MapClient *)_client;
2053
Blue Swirl72cf2d42009-09-12 07:36:22 +00002054 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002055 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002056}
2057
2058static void cpu_notify_map_clients(void)
2059{
2060 MapClient *client;
2061
Blue Swirl72cf2d42009-09-12 07:36:22 +00002062 while (!QLIST_EMPTY(&map_client_list)) {
2063 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002064 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002065 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002066 }
2067}
2068
aliguori6d16c2f2009-01-22 16:59:11 +00002069/* Map a physical memory region into a host virtual address.
2070 * May map a subset of the requested range, given by and returned in *plen.
2071 * May return NULL if resources needed to perform the mapping are exhausted.
2072 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002073 * Use cpu_register_map_client() to know when retrying the map operation is
2074 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002075 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002076void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002077 hwaddr addr,
2078 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002079 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002080{
Avi Kivityac1970f2012-10-03 16:22:53 +02002081 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002082 hwaddr len = *plen;
2083 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002084 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002085 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002086 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002087 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002088 ram_addr_t rlen;
2089 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002090
2091 while (len > 0) {
2092 page = addr & TARGET_PAGE_MASK;
2093 l = (page + TARGET_PAGE_SIZE) - addr;
2094 if (l > len)
2095 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002096 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002097
Avi Kivityf3705d52012-03-08 16:16:34 +02002098 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002099 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002100 break;
2101 }
2102 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2103 bounce.addr = addr;
2104 bounce.len = l;
2105 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002106 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002107 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002108
2109 *plen = l;
2110 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002111 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002112 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002113 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002114 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002115 }
aliguori6d16c2f2009-01-22 16:59:11 +00002116
2117 len -= l;
2118 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002119 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002120 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002121 rlen = todo;
2122 ret = qemu_ram_ptr_length(raddr, &rlen);
2123 *plen = rlen;
2124 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002125}
2126
Avi Kivityac1970f2012-10-03 16:22:53 +02002127/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002128 * Will also mark the memory as dirty if is_write == 1. access_len gives
2129 * the amount of memory that was actually read or written by the caller.
2130 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002131void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2132 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002133{
2134 if (buffer != bounce.buffer) {
2135 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002136 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002137 while (access_len) {
2138 unsigned l;
2139 l = TARGET_PAGE_SIZE;
2140 if (l > access_len)
2141 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002142 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002143 addr1 += l;
2144 access_len -= l;
2145 }
2146 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002147 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002148 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002149 }
aliguori6d16c2f2009-01-22 16:59:11 +00002150 return;
2151 }
2152 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002153 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002154 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002155 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002156 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002157 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002158}
bellardd0ecd2a2006-04-23 17:14:48 +00002159
Avi Kivitya8170e52012-10-23 12:30:10 +02002160void *cpu_physical_memory_map(hwaddr addr,
2161 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002162 int is_write)
2163{
2164 return address_space_map(&address_space_memory, addr, plen, is_write);
2165}
2166
Avi Kivitya8170e52012-10-23 12:30:10 +02002167void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2168 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002169{
2170 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2171}
2172
bellard8df1cd02005-01-28 22:37:22 +00002173/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002174static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002175 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002176{
bellard8df1cd02005-01-28 22:37:22 +00002177 uint8_t *ptr;
2178 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002179 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002180
Avi Kivityac1970f2012-10-03 16:22:53 +02002181 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002182
Blue Swirlcc5bea62012-04-14 14:56:48 +00002183 if (!(memory_region_is_ram(section->mr) ||
2184 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002185 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002186 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002187 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002188#if defined(TARGET_WORDS_BIGENDIAN)
2189 if (endian == DEVICE_LITTLE_ENDIAN) {
2190 val = bswap32(val);
2191 }
2192#else
2193 if (endian == DEVICE_BIG_ENDIAN) {
2194 val = bswap32(val);
2195 }
2196#endif
bellard8df1cd02005-01-28 22:37:22 +00002197 } else {
2198 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002199 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002200 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002201 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002202 switch (endian) {
2203 case DEVICE_LITTLE_ENDIAN:
2204 val = ldl_le_p(ptr);
2205 break;
2206 case DEVICE_BIG_ENDIAN:
2207 val = ldl_be_p(ptr);
2208 break;
2209 default:
2210 val = ldl_p(ptr);
2211 break;
2212 }
bellard8df1cd02005-01-28 22:37:22 +00002213 }
2214 return val;
2215}
2216
Avi Kivitya8170e52012-10-23 12:30:10 +02002217uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002218{
2219 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2220}
2221
Avi Kivitya8170e52012-10-23 12:30:10 +02002222uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002223{
2224 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2225}
2226
Avi Kivitya8170e52012-10-23 12:30:10 +02002227uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002228{
2229 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2230}
2231
bellard84b7b8e2005-11-28 21:19:04 +00002232/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002233static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002234 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002235{
bellard84b7b8e2005-11-28 21:19:04 +00002236 uint8_t *ptr;
2237 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002238 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002239
Avi Kivityac1970f2012-10-03 16:22:53 +02002240 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002241
Blue Swirlcc5bea62012-04-14 14:56:48 +00002242 if (!(memory_region_is_ram(section->mr) ||
2243 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002244 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002245 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002246
2247 /* XXX This is broken when device endian != cpu endian.
2248 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002249#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002250 val = io_mem_read(section->mr, addr, 4) << 32;
2251 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002252#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002253 val = io_mem_read(section->mr, addr, 4);
2254 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002255#endif
2256 } else {
2257 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002258 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002259 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002260 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002261 switch (endian) {
2262 case DEVICE_LITTLE_ENDIAN:
2263 val = ldq_le_p(ptr);
2264 break;
2265 case DEVICE_BIG_ENDIAN:
2266 val = ldq_be_p(ptr);
2267 break;
2268 default:
2269 val = ldq_p(ptr);
2270 break;
2271 }
bellard84b7b8e2005-11-28 21:19:04 +00002272 }
2273 return val;
2274}
2275
Avi Kivitya8170e52012-10-23 12:30:10 +02002276uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002277{
2278 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2279}
2280
Avi Kivitya8170e52012-10-23 12:30:10 +02002281uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002282{
2283 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2284}
2285
Avi Kivitya8170e52012-10-23 12:30:10 +02002286uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002287{
2288 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2289}
2290
bellardaab33092005-10-30 20:48:42 +00002291/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002292uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002293{
2294 uint8_t val;
2295 cpu_physical_memory_read(addr, &val, 1);
2296 return val;
2297}
2298
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002299/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002300static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002301 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002302{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002303 uint8_t *ptr;
2304 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002305 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002306
Avi Kivityac1970f2012-10-03 16:22:53 +02002307 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002308
Blue Swirlcc5bea62012-04-14 14:56:48 +00002309 if (!(memory_region_is_ram(section->mr) ||
2310 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002311 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002312 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002313 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002314#if defined(TARGET_WORDS_BIGENDIAN)
2315 if (endian == DEVICE_LITTLE_ENDIAN) {
2316 val = bswap16(val);
2317 }
2318#else
2319 if (endian == DEVICE_BIG_ENDIAN) {
2320 val = bswap16(val);
2321 }
2322#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002323 } else {
2324 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002325 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002326 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002327 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002328 switch (endian) {
2329 case DEVICE_LITTLE_ENDIAN:
2330 val = lduw_le_p(ptr);
2331 break;
2332 case DEVICE_BIG_ENDIAN:
2333 val = lduw_be_p(ptr);
2334 break;
2335 default:
2336 val = lduw_p(ptr);
2337 break;
2338 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002339 }
2340 return val;
bellardaab33092005-10-30 20:48:42 +00002341}
2342
Avi Kivitya8170e52012-10-23 12:30:10 +02002343uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002344{
2345 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2346}
2347
Avi Kivitya8170e52012-10-23 12:30:10 +02002348uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002349{
2350 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2351}
2352
Avi Kivitya8170e52012-10-23 12:30:10 +02002353uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002354{
2355 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2356}
2357
bellard8df1cd02005-01-28 22:37:22 +00002358/* warning: addr must be aligned. The ram page is not masked as dirty
2359 and the code inside is not invalidated. It is useful if the dirty
2360 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002361void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002362{
bellard8df1cd02005-01-28 22:37:22 +00002363 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002364 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002365
Avi Kivityac1970f2012-10-03 16:22:53 +02002366 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002367
Avi Kivityf3705d52012-03-08 16:16:34 +02002368 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002369 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002370 if (memory_region_is_ram(section->mr)) {
2371 section = &phys_sections[phys_section_rom];
2372 }
2373 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002374 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002375 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002376 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002377 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002378 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002379 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002380
2381 if (unlikely(in_migration)) {
2382 if (!cpu_physical_memory_is_dirty(addr1)) {
2383 /* invalidate code */
2384 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2385 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002386 cpu_physical_memory_set_dirty_flags(
2387 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002388 }
2389 }
bellard8df1cd02005-01-28 22:37:22 +00002390 }
2391}
2392
Avi Kivitya8170e52012-10-23 12:30:10 +02002393void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00002394{
j_mayerbc98a7e2007-04-04 07:55:12 +00002395 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002396 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00002397
Avi Kivityac1970f2012-10-03 16:22:53 +02002398 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002399
Avi Kivityf3705d52012-03-08 16:16:34 +02002400 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002401 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002402 if (memory_region_is_ram(section->mr)) {
2403 section = &phys_sections[phys_section_rom];
2404 }
j_mayerbc98a7e2007-04-04 07:55:12 +00002405#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002406 io_mem_write(section->mr, addr, val >> 32, 4);
2407 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002408#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002409 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2410 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002411#endif
2412 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002413 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002414 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002415 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00002416 stq_p(ptr, val);
2417 }
2418}
2419
bellard8df1cd02005-01-28 22:37:22 +00002420/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002421static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002422 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002423{
bellard8df1cd02005-01-28 22:37:22 +00002424 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002425 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002426
Avi Kivityac1970f2012-10-03 16:22:53 +02002427 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002428
Avi Kivityf3705d52012-03-08 16:16:34 +02002429 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002430 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002431 if (memory_region_is_ram(section->mr)) {
2432 section = &phys_sections[phys_section_rom];
2433 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002434#if defined(TARGET_WORDS_BIGENDIAN)
2435 if (endian == DEVICE_LITTLE_ENDIAN) {
2436 val = bswap32(val);
2437 }
2438#else
2439 if (endian == DEVICE_BIG_ENDIAN) {
2440 val = bswap32(val);
2441 }
2442#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002443 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002444 } else {
2445 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002446 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002447 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002448 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002449 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002450 switch (endian) {
2451 case DEVICE_LITTLE_ENDIAN:
2452 stl_le_p(ptr, val);
2453 break;
2454 case DEVICE_BIG_ENDIAN:
2455 stl_be_p(ptr, val);
2456 break;
2457 default:
2458 stl_p(ptr, val);
2459 break;
2460 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002461 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002462 }
2463}
2464
Avi Kivitya8170e52012-10-23 12:30:10 +02002465void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002466{
2467 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2468}
2469
Avi Kivitya8170e52012-10-23 12:30:10 +02002470void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002471{
2472 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2473}
2474
Avi Kivitya8170e52012-10-23 12:30:10 +02002475void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002476{
2477 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2478}
2479
bellardaab33092005-10-30 20:48:42 +00002480/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002481void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002482{
2483 uint8_t v = val;
2484 cpu_physical_memory_write(addr, &v, 1);
2485}
2486
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002487/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002488static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002489 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002490{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002491 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002492 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002493
Avi Kivityac1970f2012-10-03 16:22:53 +02002494 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002495
Avi Kivityf3705d52012-03-08 16:16:34 +02002496 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002497 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002498 if (memory_region_is_ram(section->mr)) {
2499 section = &phys_sections[phys_section_rom];
2500 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002501#if defined(TARGET_WORDS_BIGENDIAN)
2502 if (endian == DEVICE_LITTLE_ENDIAN) {
2503 val = bswap16(val);
2504 }
2505#else
2506 if (endian == DEVICE_BIG_ENDIAN) {
2507 val = bswap16(val);
2508 }
2509#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002510 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002511 } else {
2512 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002513 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002514 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002515 /* RAM case */
2516 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002517 switch (endian) {
2518 case DEVICE_LITTLE_ENDIAN:
2519 stw_le_p(ptr, val);
2520 break;
2521 case DEVICE_BIG_ENDIAN:
2522 stw_be_p(ptr, val);
2523 break;
2524 default:
2525 stw_p(ptr, val);
2526 break;
2527 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002528 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002529 }
bellardaab33092005-10-30 20:48:42 +00002530}
2531
Avi Kivitya8170e52012-10-23 12:30:10 +02002532void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002533{
2534 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2535}
2536
Avi Kivitya8170e52012-10-23 12:30:10 +02002537void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002538{
2539 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2540}
2541
Avi Kivitya8170e52012-10-23 12:30:10 +02002542void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002543{
2544 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2545}
2546
bellardaab33092005-10-30 20:48:42 +00002547/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002548void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002549{
2550 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002551 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002552}
2553
Avi Kivitya8170e52012-10-23 12:30:10 +02002554void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002555{
2556 val = cpu_to_le64(val);
2557 cpu_physical_memory_write(addr, &val, 8);
2558}
2559
Avi Kivitya8170e52012-10-23 12:30:10 +02002560void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002561{
2562 val = cpu_to_be64(val);
2563 cpu_physical_memory_write(addr, &val, 8);
2564}
2565
aliguori5e2972f2009-03-28 17:51:36 +00002566/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002567int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002568 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002569{
2570 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002571 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002572 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002573
2574 while (len > 0) {
2575 page = addr & TARGET_PAGE_MASK;
2576 phys_addr = cpu_get_phys_page_debug(env, page);
2577 /* if no physical page mapped, return an error */
2578 if (phys_addr == -1)
2579 return -1;
2580 l = (page + TARGET_PAGE_SIZE) - addr;
2581 if (l > len)
2582 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002583 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002584 if (is_write)
2585 cpu_physical_memory_write_rom(phys_addr, buf, l);
2586 else
aliguori5e2972f2009-03-28 17:51:36 +00002587 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002588 len -= l;
2589 buf += l;
2590 addr += l;
2591 }
2592 return 0;
2593}
Paul Brooka68fe892010-03-01 00:08:59 +00002594#endif
bellard13eb76e2004-01-24 15:23:36 +00002595
Blue Swirl8e4a4242013-01-06 18:30:17 +00002596#if !defined(CONFIG_USER_ONLY)
2597
2598/*
2599 * A helper function for the _utterly broken_ virtio device model to find out if
2600 * it's running on a big endian machine. Don't do this at home kids!
2601 */
2602bool virtio_is_big_endian(void);
2603bool virtio_is_big_endian(void)
2604{
2605#if defined(TARGET_WORDS_BIGENDIAN)
2606 return true;
2607#else
2608 return false;
2609#endif
2610}
2611
2612#endif
2613
Wen Congyang76f35532012-05-07 12:04:18 +08002614#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002615bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002616{
2617 MemoryRegionSection *section;
2618
Avi Kivityac1970f2012-10-03 16:22:53 +02002619 section = phys_page_find(address_space_memory.dispatch,
2620 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002621
2622 return !(memory_region_is_ram(section->mr) ||
2623 memory_region_is_romd(section->mr));
2624}
2625#endif