blob: ae5a4b4430cbc50dbd00a06e846c656df2bded88 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020070static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200190 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 goto not_found;
195 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200198 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200199
Avi Kivityc19e8802012-02-13 20:25:31 +0200200 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200201not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200202 return &phys_sections[s_index];
203}
204
Blue Swirle5548612012-04-21 13:08:33 +0000205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
210}
bellard9fa3e852004-01-04 18:06:42 +0000211#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000212
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200213void cpu_exec_init_all(void)
214{
215#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700216 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200217 memory_map_init();
218 io_mem_init();
219#endif
220}
221
Andreas Färberb170fce2013-01-20 20:23:22 +0100222#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000223
Juan Quintelae59fb372009-09-29 22:48:21 +0200224static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200225{
Andreas Färber259186a2013-01-17 18:51:17 +0100226 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200227
aurel323098dba2009-03-07 21:28:24 +0000228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100230 cpu->interrupt_request &= ~0x01;
231 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000232
233 return 0;
234}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200235
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100243 VMSTATE_UINT32(halted, CPUState),
244 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200245 VMSTATE_END_OF_LIST()
246 }
247};
Andreas Färberb170fce2013-01-20 20:23:22 +0100248#else
249#define vmstate_cpu_common vmstate_dummy
pbrook9656f322008-07-01 20:01:19 +0000250#endif
251
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100252CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400253{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100254 CPUArchState *env = first_cpu;
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100255 CPUState *cpu = NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400256
257 while (env) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400260 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100261 }
Glauber Costa950f1472009-06-09 12:15:18 -0400262 env = env->next_cpu;
263 }
264
Igor Mammedovd76fdda2013-03-07 19:12:43 +0100265 return env ? cpu : NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400266}
267
Andreas Färber9349b4f2012-03-14 01:38:32 +0100268void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000269{
Andreas Färber9f09e182012-05-03 06:59:07 +0200270 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100271 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100272 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000273 int cpu_index;
274
pbrookc2764712009-03-07 15:24:59 +0000275#if defined(CONFIG_USER_ONLY)
276 cpu_list_lock();
277#endif
bellard6a00d602005-11-21 23:25:50 +0000278 env->next_cpu = NULL;
279 penv = &first_cpu;
280 cpu_index = 0;
281 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700282 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000283 cpu_index++;
284 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100285 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100286 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000287 QTAILQ_INIT(&env->breakpoints);
288 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100289#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200290 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100291#endif
bellard6a00d602005-11-21 23:25:50 +0000292 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000293#if defined(CONFIG_USER_ONLY)
294 cpu_list_unlock();
295#endif
Andreas Färber259186a2013-01-17 18:51:17 +0100296 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100297#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600298 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000299 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100300 assert(cc->vmsd == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000301#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100302 if (cc->vmsd != NULL) {
303 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
304 }
bellardfd6ce8f2003-05-14 19:00:11 +0000305}
306
bellard1fddef42005-04-17 19:16:13 +0000307#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000308#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100309static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000310{
311 tb_invalidate_phys_page_range(pc, pc + 1, 0);
312}
313#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400314static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
315{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400316 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
317 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400318}
bellardc27004e2005-01-03 23:35:10 +0000319#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000320#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000321
Paul Brookc527ee82010-03-01 03:31:14 +0000322#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100323void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000324
325{
326}
327
Andreas Färber9349b4f2012-03-14 01:38:32 +0100328int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000329 int flags, CPUWatchpoint **watchpoint)
330{
331 return -ENOSYS;
332}
333#else
pbrook6658ffb2007-03-16 23:58:11 +0000334/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100335int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000336 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000337{
aliguorib4051332008-11-18 20:14:20 +0000338 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000339 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000340
aliguorib4051332008-11-18 20:14:20 +0000341 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400342 if ((len & (len - 1)) || (addr & ~len_mask) ||
343 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000344 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
345 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
346 return -EINVAL;
347 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500348 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000349
aliguoria1d1bb32008-11-18 20:07:32 +0000350 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000351 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000352 wp->flags = flags;
353
aliguori2dc9f412008-11-18 20:56:59 +0000354 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000355 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000356 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000357 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000358 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000359
pbrook6658ffb2007-03-16 23:58:11 +0000360 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000361
362 if (watchpoint)
363 *watchpoint = wp;
364 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000365}
366
aliguoria1d1bb32008-11-18 20:07:32 +0000367/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100368int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000369 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000370{
aliguorib4051332008-11-18 20:14:20 +0000371 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000372 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000373
Blue Swirl72cf2d42009-09-12 07:36:22 +0000374 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000375 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000376 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000377 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000378 return 0;
379 }
380 }
aliguoria1d1bb32008-11-18 20:07:32 +0000381 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000382}
383
aliguoria1d1bb32008-11-18 20:07:32 +0000384/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100385void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000386{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000387 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000388
aliguoria1d1bb32008-11-18 20:07:32 +0000389 tlb_flush_page(env, watchpoint->vaddr);
390
Anthony Liguori7267c092011-08-20 22:09:37 -0500391 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000392}
393
aliguoria1d1bb32008-11-18 20:07:32 +0000394/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100395void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000396{
aliguoric0ce9982008-11-25 22:13:57 +0000397 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000398
Blue Swirl72cf2d42009-09-12 07:36:22 +0000399 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000400 if (wp->flags & mask)
401 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000402 }
aliguoria1d1bb32008-11-18 20:07:32 +0000403}
Paul Brookc527ee82010-03-01 03:31:14 +0000404#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000405
406/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100407int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000408 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000409{
bellard1fddef42005-04-17 19:16:13 +0000410#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000411 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000412
Anthony Liguori7267c092011-08-20 22:09:37 -0500413 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000414
415 bp->pc = pc;
416 bp->flags = flags;
417
aliguori2dc9f412008-11-18 20:56:59 +0000418 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000419 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000420 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000421 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000422 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000423
424 breakpoint_invalidate(env, pc);
425
426 if (breakpoint)
427 *breakpoint = bp;
428 return 0;
429#else
430 return -ENOSYS;
431#endif
432}
433
434/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100435int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000436{
437#if defined(TARGET_HAS_ICE)
438 CPUBreakpoint *bp;
439
Blue Swirl72cf2d42009-09-12 07:36:22 +0000440 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000441 if (bp->pc == pc && bp->flags == flags) {
442 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000443 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000444 }
bellard4c3a88a2003-07-26 12:06:08 +0000445 }
aliguoria1d1bb32008-11-18 20:07:32 +0000446 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000447#else
aliguoria1d1bb32008-11-18 20:07:32 +0000448 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000449#endif
450}
451
aliguoria1d1bb32008-11-18 20:07:32 +0000452/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100453void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000454{
bellard1fddef42005-04-17 19:16:13 +0000455#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000456 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000457
aliguoria1d1bb32008-11-18 20:07:32 +0000458 breakpoint_invalidate(env, breakpoint->pc);
459
Anthony Liguori7267c092011-08-20 22:09:37 -0500460 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000461#endif
462}
463
464/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100465void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000466{
467#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000468 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000469
Blue Swirl72cf2d42009-09-12 07:36:22 +0000470 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000471 if (bp->flags & mask)
472 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000473 }
bellard4c3a88a2003-07-26 12:06:08 +0000474#endif
475}
476
bellardc33a3462003-07-29 20:50:33 +0000477/* enable or disable single step mode. EXCP_DEBUG is returned by the
478 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100479void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000480{
bellard1fddef42005-04-17 19:16:13 +0000481#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000482 if (env->singlestep_enabled != enabled) {
483 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000484 if (kvm_enabled())
485 kvm_update_guest_debug(env, 0);
486 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100487 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000488 /* XXX: only flush what is necessary */
489 tb_flush(env);
490 }
bellardc33a3462003-07-29 20:50:33 +0000491 }
492#endif
493}
494
Andreas Färber9349b4f2012-03-14 01:38:32 +0100495void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000496{
Andreas Färberfcd7d002012-12-17 08:02:44 +0100497 CPUState *cpu = ENV_GET_CPU(env);
498
499 cpu->exit_request = 1;
Peter Maydell378df4b2013-02-22 18:10:03 +0000500 cpu->tcg_exit_req = 1;
aurel323098dba2009-03-07 21:28:24 +0000501}
502
Andreas Färber9349b4f2012-03-14 01:38:32 +0100503void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000504{
505 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000506 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000507
508 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000509 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000510 fprintf(stderr, "qemu: fatal: ");
511 vfprintf(stderr, fmt, ap);
512 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100513 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000514 if (qemu_log_enabled()) {
515 qemu_log("qemu: fatal: ");
516 qemu_log_vprintf(fmt, ap2);
517 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100518 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000519 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000520 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000521 }
pbrook493ae1f2007-11-23 16:53:59 +0000522 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000523 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200524#if defined(CONFIG_USER_ONLY)
525 {
526 struct sigaction act;
527 sigfillset(&act.sa_mask);
528 act.sa_handler = SIG_DFL;
529 sigaction(SIGABRT, &act, NULL);
530 }
531#endif
bellard75012672003-06-21 13:11:07 +0000532 abort();
533}
534
Andreas Färber9349b4f2012-03-14 01:38:32 +0100535CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000536{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100537 CPUArchState *new_env = cpu_init(env->cpu_model_str);
538 CPUArchState *next_cpu = new_env->next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000539#if defined(TARGET_HAS_ICE)
540 CPUBreakpoint *bp;
541 CPUWatchpoint *wp;
542#endif
543
Andreas Färber9349b4f2012-03-14 01:38:32 +0100544 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000545
Andreas Färber55e5c282012-12-17 06:18:02 +0100546 /* Preserve chaining. */
thsc5be9f02007-02-28 20:20:53 +0000547 new_env->next_cpu = next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000548
549 /* Clone all break/watchpoints.
550 Note: Once we support ptrace with hw-debug register access, make sure
551 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000552 QTAILQ_INIT(&env->breakpoints);
553 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000554#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000555 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000556 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
557 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000558 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000559 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
560 wp->flags, NULL);
561 }
562#endif
563
thsc5be9f02007-02-28 20:20:53 +0000564 return new_env;
565}
566
bellard01243112004-01-04 15:48:17 +0000567#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200568static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
569 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000570{
Juan Quintelad24981d2012-05-22 00:42:40 +0200571 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000572
bellard1ccde1c2004-02-06 19:46:14 +0000573 /* we modify the TLB cache so that the dirty bit will be set again
574 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200575 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200576 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000577 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200578 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000579 != (end - 1) - start) {
580 abort();
581 }
Blue Swirle5548612012-04-21 13:08:33 +0000582 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200583
584}
585
586/* Note: start and end must be within the same ram block. */
587void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
588 int dirty_flags)
589{
590 uintptr_t length;
591
592 start &= TARGET_PAGE_MASK;
593 end = TARGET_PAGE_ALIGN(end);
594
595 length = end - start;
596 if (length == 0)
597 return;
598 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
599
600 if (tcg_enabled()) {
601 tlb_reset_dirty_range_all(start, end, length);
602 }
bellard1ccde1c2004-02-06 19:46:14 +0000603}
604
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000605static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000606{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200607 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000608 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200609 return ret;
aliguori74576192008-10-06 14:02:03 +0000610}
611
Avi Kivitya8170e52012-10-23 12:30:10 +0200612hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000613 MemoryRegionSection *section,
614 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200615 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000616 int prot,
617 target_ulong *address)
618{
Avi Kivitya8170e52012-10-23 12:30:10 +0200619 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000620 CPUWatchpoint *wp;
621
Blue Swirlcc5bea62012-04-14 14:56:48 +0000622 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000623 /* Normal RAM. */
624 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000625 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000626 if (!section->readonly) {
627 iotlb |= phys_section_notdirty;
628 } else {
629 iotlb |= phys_section_rom;
630 }
631 } else {
632 /* IO handlers are currently passed a physical address.
633 It would be nice to pass an offset from the base address
634 of that region. This would avoid having to special case RAM,
635 and avoid full address decoding in every device.
636 We can't use the high bits of pd for this because
637 IO_MEM_ROMD uses these as a ram address. */
638 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000639 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000640 }
641
642 /* Make accesses to pages with watchpoints go via the
643 watchpoint trap routines. */
644 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
645 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
646 /* Avoid trapping reads of pages with a write breakpoint. */
647 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
648 iotlb = phys_section_watch + paddr;
649 *address |= TLB_MMIO;
650 break;
651 }
652 }
653 }
654
655 return iotlb;
656}
bellard9fa3e852004-01-04 18:06:42 +0000657#endif /* defined(CONFIG_USER_ONLY) */
658
pbrooke2eef172008-06-08 01:09:01 +0000659#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000660
Paul Brookc04b2b72010-03-01 03:31:14 +0000661#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
662typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200663 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200664 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200665 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000666} subpage_t;
667
Anthony Liguoric227f092009-10-01 16:12:16 -0500668static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200669 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200670static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200671static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200672{
Avi Kivity5312bd82012-02-12 18:32:55 +0200673 MemoryRegionSection *section = &phys_sections[section_index];
674 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200675
676 if (mr->subpage) {
677 subpage_t *subpage = container_of(mr, subpage_t, iomem);
678 memory_region_destroy(&subpage->iomem);
679 g_free(subpage);
680 }
681}
682
Avi Kivity4346ae32012-02-10 17:00:01 +0200683static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200684{
685 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200686 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200687
Avi Kivityc19e8802012-02-13 20:25:31 +0200688 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200689 return;
690 }
691
Avi Kivityc19e8802012-02-13 20:25:31 +0200692 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200693 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200694 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200695 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200696 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200697 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200698 }
Avi Kivity54688b12012-02-09 17:34:32 +0200699 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200700 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200701 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200702}
703
Avi Kivityac1970f2012-10-03 16:22:53 +0200704static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200705{
Avi Kivityac1970f2012-10-03 16:22:53 +0200706 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200707 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200708}
709
Avi Kivity5312bd82012-02-12 18:32:55 +0200710static uint16_t phys_section_add(MemoryRegionSection *section)
711{
712 if (phys_sections_nb == phys_sections_nb_alloc) {
713 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
714 phys_sections = g_renew(MemoryRegionSection, phys_sections,
715 phys_sections_nb_alloc);
716 }
717 phys_sections[phys_sections_nb] = *section;
718 return phys_sections_nb++;
719}
720
721static void phys_sections_clear(void)
722{
723 phys_sections_nb = 0;
724}
725
Avi Kivityac1970f2012-10-03 16:22:53 +0200726static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200727{
728 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200729 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200730 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200731 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200732 MemoryRegionSection subsection = {
733 .offset_within_address_space = base,
734 .size = TARGET_PAGE_SIZE,
735 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200736 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200737
Avi Kivityf3705d52012-03-08 16:16:34 +0200738 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200739
Avi Kivityf3705d52012-03-08 16:16:34 +0200740 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200741 subpage = subpage_init(base);
742 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200743 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200744 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200745 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200746 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200747 }
748 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400749 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200750 subpage_register(subpage, start, end, phys_section_add(section));
751}
752
753
Avi Kivityac1970f2012-10-03 16:22:53 +0200754static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000755{
Avi Kivitya8170e52012-10-23 12:30:10 +0200756 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200757 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200758 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200759 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200760
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200761 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200762
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200763 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200764 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200765 section_index);
bellard33417e72003-08-10 21:47:01 +0000766}
767
Avi Kivityac1970f2012-10-03 16:22:53 +0200768static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200769{
Avi Kivityac1970f2012-10-03 16:22:53 +0200770 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200771 MemoryRegionSection now = *section, remain = *section;
772
773 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
774 || (now.size < TARGET_PAGE_SIZE)) {
775 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
776 - now.offset_within_address_space,
777 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200778 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200779 remain.size -= now.size;
780 remain.offset_within_address_space += now.size;
781 remain.offset_within_region += now.size;
782 }
Tyler Hall69b67642012-07-25 18:45:04 -0400783 while (remain.size >= TARGET_PAGE_SIZE) {
784 now = remain;
785 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
786 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200787 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400788 } else {
789 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200790 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400791 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200792 remain.size -= now.size;
793 remain.offset_within_address_space += now.size;
794 remain.offset_within_region += now.size;
795 }
796 now = remain;
797 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200798 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200799 }
800}
801
Sheng Yang62a27442010-01-26 19:21:16 +0800802void qemu_flush_coalesced_mmio_buffer(void)
803{
804 if (kvm_enabled())
805 kvm_flush_coalesced_mmio_buffer();
806}
807
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700808void qemu_mutex_lock_ramlist(void)
809{
810 qemu_mutex_lock(&ram_list.mutex);
811}
812
813void qemu_mutex_unlock_ramlist(void)
814{
815 qemu_mutex_unlock(&ram_list.mutex);
816}
817
Marcelo Tosattic9027602010-03-01 20:25:08 -0300818#if defined(__linux__) && !defined(TARGET_S390X)
819
820#include <sys/vfs.h>
821
822#define HUGETLBFS_MAGIC 0x958458f6
823
824static long gethugepagesize(const char *path)
825{
826 struct statfs fs;
827 int ret;
828
829 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900830 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300831 } while (ret != 0 && errno == EINTR);
832
833 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900834 perror(path);
835 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300836 }
837
838 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900839 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300840
841 return fs.f_bsize;
842}
843
Alex Williamson04b16652010-07-02 11:13:17 -0600844static void *file_ram_alloc(RAMBlock *block,
845 ram_addr_t memory,
846 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300847{
848 char *filename;
849 void *area;
850 int fd;
851#ifdef MAP_POPULATE
852 int flags;
853#endif
854 unsigned long hpagesize;
855
856 hpagesize = gethugepagesize(path);
857 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900858 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300859 }
860
861 if (memory < hpagesize) {
862 return NULL;
863 }
864
865 if (kvm_enabled() && !kvm_has_sync_mmu()) {
866 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
867 return NULL;
868 }
869
Stefan Weile4ada482013-01-16 18:37:23 +0100870 filename = g_strdup_printf("%s/qemu_back_mem.XXXXXX", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300871
872 fd = mkstemp(filename);
873 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900874 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100875 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900876 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300877 }
878 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100879 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300880
881 memory = (memory+hpagesize-1) & ~(hpagesize-1);
882
883 /*
884 * ftruncate is not supported by hugetlbfs in older
885 * hosts, so don't bother bailing out on errors.
886 * If anything goes wrong with it under other filesystems,
887 * mmap will fail.
888 */
889 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900890 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300891
892#ifdef MAP_POPULATE
893 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
894 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
895 * to sidestep this quirk.
896 */
897 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
898 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
899#else
900 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
901#endif
902 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900903 perror("file_ram_alloc: can't mmap RAM pages");
904 close(fd);
905 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300906 }
Alex Williamson04b16652010-07-02 11:13:17 -0600907 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300908 return area;
909}
910#endif
911
Alex Williamsond17b5282010-06-25 11:08:38 -0600912static ram_addr_t find_ram_offset(ram_addr_t size)
913{
Alex Williamson04b16652010-07-02 11:13:17 -0600914 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600915 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600916
Paolo Bonzinia3161032012-11-14 15:54:48 +0100917 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600918 return 0;
919
Paolo Bonzinia3161032012-11-14 15:54:48 +0100920 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000921 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600922
923 end = block->offset + block->length;
924
Paolo Bonzinia3161032012-11-14 15:54:48 +0100925 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600926 if (next_block->offset >= end) {
927 next = MIN(next, next_block->offset);
928 }
929 }
930 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600931 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600932 mingap = next - end;
933 }
934 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600935
936 if (offset == RAM_ADDR_MAX) {
937 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
938 (uint64_t)size);
939 abort();
940 }
941
Alex Williamson04b16652010-07-02 11:13:17 -0600942 return offset;
943}
944
Juan Quintela652d7ec2012-07-20 10:37:54 +0200945ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600946{
Alex Williamsond17b5282010-06-25 11:08:38 -0600947 RAMBlock *block;
948 ram_addr_t last = 0;
949
Paolo Bonzinia3161032012-11-14 15:54:48 +0100950 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600951 last = MAX(last, block->offset + block->length);
952
953 return last;
954}
955
Jason Baronddb97f12012-08-02 15:44:16 -0400956static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
957{
958 int ret;
959 QemuOpts *machine_opts;
960
961 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
962 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
963 if (machine_opts &&
964 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
965 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
966 if (ret) {
967 perror("qemu_madvise");
968 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
969 "but dump_guest_core=off specified\n");
970 }
971 }
972}
973
Avi Kivityc5705a72011-12-20 15:59:12 +0200974void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -0600975{
976 RAMBlock *new_block, *block;
977
Avi Kivityc5705a72011-12-20 15:59:12 +0200978 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +0100979 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200980 if (block->offset == addr) {
981 new_block = block;
982 break;
983 }
984 }
985 assert(new_block);
986 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600987
Anthony Liguori09e5ab62012-02-03 12:28:43 -0600988 if (dev) {
989 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600990 if (id) {
991 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -0500992 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600993 }
994 }
995 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
996
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700997 /* This assumes the iothread lock is taken here too. */
998 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +0100999 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001000 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001001 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1002 new_block->idstr);
1003 abort();
1004 }
1005 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001006 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001007}
1008
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001009static int memory_try_enable_merging(void *addr, size_t len)
1010{
1011 QemuOpts *opts;
1012
1013 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1014 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1015 /* disabled by the user */
1016 return 0;
1017 }
1018
1019 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1020}
1021
Avi Kivityc5705a72011-12-20 15:59:12 +02001022ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1023 MemoryRegion *mr)
1024{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001025 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001026
1027 size = TARGET_PAGE_ALIGN(size);
1028 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001029
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001030 /* This assumes the iothread lock is taken here too. */
1031 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001032 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001033 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001034 if (host) {
1035 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001036 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001037 } else {
1038 if (mem_path) {
1039#if defined (__linux__) && !defined(TARGET_S390X)
1040 new_block->host = file_ram_alloc(new_block, size, mem_path);
1041 if (!new_block->host) {
1042 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001043 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001044 }
1045#else
1046 fprintf(stderr, "-mem-path option unsupported\n");
1047 exit(1);
1048#endif
1049 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001050 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001051 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001052 } else if (kvm_enabled()) {
1053 /* some s390/kvm configurations have special constraints */
1054 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001055 } else {
1056 new_block->host = qemu_vmalloc(size);
1057 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001058 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001059 }
1060 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001061 new_block->length = size;
1062
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001063 /* Keep the list sorted from biggest to smallest block. */
1064 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1065 if (block->length < new_block->length) {
1066 break;
1067 }
1068 }
1069 if (block) {
1070 QTAILQ_INSERT_BEFORE(block, new_block, next);
1071 } else {
1072 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1073 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001074 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001075
Umesh Deshpandef798b072011-08-18 11:41:17 -07001076 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001077 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001078
Anthony Liguori7267c092011-08-20 22:09:37 -05001079 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001080 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001081 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1082 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001083 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001084
Jason Baronddb97f12012-08-02 15:44:16 -04001085 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001086 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001087
Cam Macdonell84b89d72010-07-26 18:10:57 -06001088 if (kvm_enabled())
1089 kvm_setup_guest_memory(new_block->host, size);
1090
1091 return new_block->offset;
1092}
1093
Avi Kivityc5705a72011-12-20 15:59:12 +02001094ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001095{
Avi Kivityc5705a72011-12-20 15:59:12 +02001096 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001097}
bellarde9a1ab12007-02-08 23:08:38 +00001098
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001099void qemu_ram_free_from_ptr(ram_addr_t addr)
1100{
1101 RAMBlock *block;
1102
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001103 /* This assumes the iothread lock is taken here too. */
1104 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001105 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001106 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001107 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001108 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001109 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001110 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001111 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001112 }
1113 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001114 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001115}
1116
Anthony Liguoric227f092009-10-01 16:12:16 -05001117void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001118{
Alex Williamson04b16652010-07-02 11:13:17 -06001119 RAMBlock *block;
1120
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001121 /* This assumes the iothread lock is taken here too. */
1122 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001123 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001124 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001125 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001126 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001127 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001128 if (block->flags & RAM_PREALLOC_MASK) {
1129 ;
1130 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001131#if defined (__linux__) && !defined(TARGET_S390X)
1132 if (block->fd) {
1133 munmap(block->host, block->length);
1134 close(block->fd);
1135 } else {
1136 qemu_vfree(block->host);
1137 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001138#else
1139 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001140#endif
1141 } else {
1142#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1143 munmap(block->host, block->length);
1144#else
Jan Kiszka868bb332011-06-21 22:59:09 +02001145 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001146 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001147 } else {
1148 qemu_vfree(block->host);
1149 }
Alex Williamson04b16652010-07-02 11:13:17 -06001150#endif
1151 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001152 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001153 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001154 }
1155 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001156 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001157
bellarde9a1ab12007-02-08 23:08:38 +00001158}
1159
Huang Yingcd19cfa2011-03-02 08:56:19 +01001160#ifndef _WIN32
1161void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1162{
1163 RAMBlock *block;
1164 ram_addr_t offset;
1165 int flags;
1166 void *area, *vaddr;
1167
Paolo Bonzinia3161032012-11-14 15:54:48 +01001168 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001169 offset = addr - block->offset;
1170 if (offset < block->length) {
1171 vaddr = block->host + offset;
1172 if (block->flags & RAM_PREALLOC_MASK) {
1173 ;
1174 } else {
1175 flags = MAP_FIXED;
1176 munmap(vaddr, length);
1177 if (mem_path) {
1178#if defined(__linux__) && !defined(TARGET_S390X)
1179 if (block->fd) {
1180#ifdef MAP_POPULATE
1181 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1182 MAP_PRIVATE;
1183#else
1184 flags |= MAP_PRIVATE;
1185#endif
1186 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1187 flags, block->fd, offset);
1188 } else {
1189 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1190 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1191 flags, -1, 0);
1192 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001193#else
1194 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001195#endif
1196 } else {
1197#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1198 flags |= MAP_SHARED | MAP_ANONYMOUS;
1199 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1200 flags, -1, 0);
1201#else
1202 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1203 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1204 flags, -1, 0);
1205#endif
1206 }
1207 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001208 fprintf(stderr, "Could not remap addr: "
1209 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001210 length, addr);
1211 exit(1);
1212 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001213 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001214 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001215 }
1216 return;
1217 }
1218 }
1219}
1220#endif /* !_WIN32 */
1221
pbrookdc828ca2009-04-09 22:21:07 +00001222/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001223 With the exception of the softmmu code in this file, this should
1224 only be used for local memory (e.g. video ram) that the device owns,
1225 and knows it isn't going to access beyond the end of the block.
1226
1227 It should not be used for general purpose DMA.
1228 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1229 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001230void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001231{
pbrook94a6b542009-04-11 17:15:54 +00001232 RAMBlock *block;
1233
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001234 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001235 block = ram_list.mru_block;
1236 if (block && addr - block->offset < block->length) {
1237 goto found;
1238 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001239 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001240 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001241 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001242 }
pbrook94a6b542009-04-11 17:15:54 +00001243 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001244
1245 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1246 abort();
1247
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001248found:
1249 ram_list.mru_block = block;
1250 if (xen_enabled()) {
1251 /* We need to check if the requested address is in the RAM
1252 * because we don't want to map the entire memory in QEMU.
1253 * In that case just map until the end of the page.
1254 */
1255 if (block->offset == 0) {
1256 return xen_map_cache(addr, 0, 0);
1257 } else if (block->host == NULL) {
1258 block->host =
1259 xen_map_cache(block->offset, block->length, 1);
1260 }
1261 }
1262 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001263}
1264
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001265/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1266 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1267 *
1268 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001269 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001270static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001271{
1272 RAMBlock *block;
1273
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001274 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001275 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001276 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001277 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001278 /* We need to check if the requested address is in the RAM
1279 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001280 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001281 */
1282 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001283 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001284 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001285 block->host =
1286 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001287 }
1288 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001289 return block->host + (addr - block->offset);
1290 }
1291 }
1292
1293 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1294 abort();
1295
1296 return NULL;
1297}
1298
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001299/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1300 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001301static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001302{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001303 if (*size == 0) {
1304 return NULL;
1305 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001306 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001307 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001308 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001309 RAMBlock *block;
1310
Paolo Bonzinia3161032012-11-14 15:54:48 +01001311 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001312 if (addr - block->offset < block->length) {
1313 if (addr - block->offset + *size > block->length)
1314 *size = block->length - addr + block->offset;
1315 return block->host + (addr - block->offset);
1316 }
1317 }
1318
1319 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1320 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001321 }
1322}
1323
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001324void qemu_put_ram_ptr(void *addr)
1325{
1326 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001327}
1328
Marcelo Tosattie8902612010-10-11 15:31:19 -03001329int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001330{
pbrook94a6b542009-04-11 17:15:54 +00001331 RAMBlock *block;
1332 uint8_t *host = ptr;
1333
Jan Kiszka868bb332011-06-21 22:59:09 +02001334 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001335 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001336 return 0;
1337 }
1338
Paolo Bonzinia3161032012-11-14 15:54:48 +01001339 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001340 /* This case append when the block is not mapped. */
1341 if (block->host == NULL) {
1342 continue;
1343 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001344 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001345 *ram_addr = block->offset + (host - block->host);
1346 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001347 }
pbrook94a6b542009-04-11 17:15:54 +00001348 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001349
Marcelo Tosattie8902612010-10-11 15:31:19 -03001350 return -1;
1351}
Alex Williamsonf471a172010-06-11 11:11:42 -06001352
Marcelo Tosattie8902612010-10-11 15:31:19 -03001353/* Some of the softmmu routines need to translate from a host pointer
1354 (typically a TLB entry) back to a ram offset. */
1355ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1356{
1357 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001358
Marcelo Tosattie8902612010-10-11 15:31:19 -03001359 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1360 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1361 abort();
1362 }
1363 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001364}
1365
Avi Kivitya8170e52012-10-23 12:30:10 +02001366static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001367 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001368{
pbrook67d3b952006-12-18 05:03:52 +00001369#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001370 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001371#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001372#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001373 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001374#endif
1375 return 0;
1376}
1377
Avi Kivitya8170e52012-10-23 12:30:10 +02001378static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001379 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001380{
1381#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001382 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001383#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001384#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001385 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001386#endif
1387}
1388
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001389static const MemoryRegionOps unassigned_mem_ops = {
1390 .read = unassigned_mem_read,
1391 .write = unassigned_mem_write,
1392 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001393};
1394
Avi Kivitya8170e52012-10-23 12:30:10 +02001395static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001396 unsigned size)
1397{
1398 abort();
1399}
1400
Avi Kivitya8170e52012-10-23 12:30:10 +02001401static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001402 uint64_t value, unsigned size)
1403{
1404 abort();
1405}
1406
1407static const MemoryRegionOps error_mem_ops = {
1408 .read = error_mem_read,
1409 .write = error_mem_write,
1410 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001411};
1412
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001413static const MemoryRegionOps rom_mem_ops = {
1414 .read = error_mem_read,
1415 .write = unassigned_mem_write,
1416 .endianness = DEVICE_NATIVE_ENDIAN,
1417};
1418
Avi Kivitya8170e52012-10-23 12:30:10 +02001419static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001420 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001421{
bellard3a7d9292005-08-21 09:26:42 +00001422 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001423 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001424 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1425#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001426 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001427 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001428#endif
1429 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001430 switch (size) {
1431 case 1:
1432 stb_p(qemu_get_ram_ptr(ram_addr), val);
1433 break;
1434 case 2:
1435 stw_p(qemu_get_ram_ptr(ram_addr), val);
1436 break;
1437 case 4:
1438 stl_p(qemu_get_ram_ptr(ram_addr), val);
1439 break;
1440 default:
1441 abort();
1442 }
bellardf23db162005-08-21 19:12:28 +00001443 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001444 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001445 /* we remove the notdirty callback only if the code has been
1446 flushed */
1447 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001448 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001449}
1450
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001451static const MemoryRegionOps notdirty_mem_ops = {
1452 .read = error_mem_read,
1453 .write = notdirty_mem_write,
1454 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001455};
1456
pbrook0f459d12008-06-09 00:20:13 +00001457/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001458static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001459{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001460 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001461 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001462 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001463 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001464 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001465
aliguori06d55cc2008-11-18 20:24:06 +00001466 if (env->watchpoint_hit) {
1467 /* We re-entered the check after replacing the TB. Now raise
1468 * the debug interrupt so that is will trigger after the
1469 * current instruction. */
1470 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1471 return;
1472 }
pbrook2e70f6e2008-06-29 01:03:05 +00001473 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001474 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001475 if ((vaddr == (wp->vaddr & len_mask) ||
1476 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001477 wp->flags |= BP_WATCHPOINT_HIT;
1478 if (!env->watchpoint_hit) {
1479 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001480 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001481 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1482 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001483 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001484 } else {
1485 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1486 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001487 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001488 }
aliguori06d55cc2008-11-18 20:24:06 +00001489 }
aliguori6e140f22008-11-18 20:37:55 +00001490 } else {
1491 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001492 }
1493 }
1494}
1495
pbrook6658ffb2007-03-16 23:58:11 +00001496/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1497 so these check for a hit then pass through to the normal out-of-line
1498 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001499static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001500 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001501{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001502 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1503 switch (size) {
1504 case 1: return ldub_phys(addr);
1505 case 2: return lduw_phys(addr);
1506 case 4: return ldl_phys(addr);
1507 default: abort();
1508 }
pbrook6658ffb2007-03-16 23:58:11 +00001509}
1510
Avi Kivitya8170e52012-10-23 12:30:10 +02001511static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001512 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001513{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001514 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1515 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001516 case 1:
1517 stb_phys(addr, val);
1518 break;
1519 case 2:
1520 stw_phys(addr, val);
1521 break;
1522 case 4:
1523 stl_phys(addr, val);
1524 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001525 default: abort();
1526 }
pbrook6658ffb2007-03-16 23:58:11 +00001527}
1528
Avi Kivity1ec9b902012-01-02 12:47:48 +02001529static const MemoryRegionOps watch_mem_ops = {
1530 .read = watch_mem_read,
1531 .write = watch_mem_write,
1532 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001533};
pbrook6658ffb2007-03-16 23:58:11 +00001534
Avi Kivitya8170e52012-10-23 12:30:10 +02001535static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001536 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001537{
Avi Kivity70c68e42012-01-02 12:32:48 +02001538 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001539 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001540 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001541#if defined(DEBUG_SUBPAGE)
1542 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1543 mmio, len, addr, idx);
1544#endif
blueswir1db7b5422007-05-26 17:36:03 +00001545
Avi Kivity5312bd82012-02-12 18:32:55 +02001546 section = &phys_sections[mmio->sub_section[idx]];
1547 addr += mmio->base;
1548 addr -= section->offset_within_address_space;
1549 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001550 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001551}
1552
Avi Kivitya8170e52012-10-23 12:30:10 +02001553static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001554 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001555{
Avi Kivity70c68e42012-01-02 12:32:48 +02001556 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001557 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001558 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001559#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001560 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1561 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001562 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001563#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001564
Avi Kivity5312bd82012-02-12 18:32:55 +02001565 section = &phys_sections[mmio->sub_section[idx]];
1566 addr += mmio->base;
1567 addr -= section->offset_within_address_space;
1568 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001569 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001570}
1571
Avi Kivity70c68e42012-01-02 12:32:48 +02001572static const MemoryRegionOps subpage_ops = {
1573 .read = subpage_read,
1574 .write = subpage_write,
1575 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001576};
1577
Avi Kivitya8170e52012-10-23 12:30:10 +02001578static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001579 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001580{
1581 ram_addr_t raddr = addr;
1582 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001583 switch (size) {
1584 case 1: return ldub_p(ptr);
1585 case 2: return lduw_p(ptr);
1586 case 4: return ldl_p(ptr);
1587 default: abort();
1588 }
Andreas Färber56384e82011-11-30 16:26:21 +01001589}
1590
Avi Kivitya8170e52012-10-23 12:30:10 +02001591static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001592 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001593{
1594 ram_addr_t raddr = addr;
1595 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001596 switch (size) {
1597 case 1: return stb_p(ptr, value);
1598 case 2: return stw_p(ptr, value);
1599 case 4: return stl_p(ptr, value);
1600 default: abort();
1601 }
Andreas Färber56384e82011-11-30 16:26:21 +01001602}
1603
Avi Kivityde712f92012-01-02 12:41:07 +02001604static const MemoryRegionOps subpage_ram_ops = {
1605 .read = subpage_ram_read,
1606 .write = subpage_ram_write,
1607 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001608};
1609
Anthony Liguoric227f092009-10-01 16:12:16 -05001610static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001611 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001612{
1613 int idx, eidx;
1614
1615 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1616 return -1;
1617 idx = SUBPAGE_IDX(start);
1618 eidx = SUBPAGE_IDX(end);
1619#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001620 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001621 mmio, start, end, idx, eidx, memory);
1622#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001623 if (memory_region_is_ram(phys_sections[section].mr)) {
1624 MemoryRegionSection new_section = phys_sections[section];
1625 new_section.mr = &io_mem_subpage_ram;
1626 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001627 }
blueswir1db7b5422007-05-26 17:36:03 +00001628 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001629 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001630 }
1631
1632 return 0;
1633}
1634
Avi Kivitya8170e52012-10-23 12:30:10 +02001635static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001636{
Anthony Liguoric227f092009-10-01 16:12:16 -05001637 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001638
Anthony Liguori7267c092011-08-20 22:09:37 -05001639 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001640
1641 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001642 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1643 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001644 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001645#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001646 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1647 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001648#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001649 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001650
1651 return mmio;
1652}
1653
Avi Kivity5312bd82012-02-12 18:32:55 +02001654static uint16_t dummy_section(MemoryRegion *mr)
1655{
1656 MemoryRegionSection section = {
1657 .mr = mr,
1658 .offset_within_address_space = 0,
1659 .offset_within_region = 0,
1660 .size = UINT64_MAX,
1661 };
1662
1663 return phys_section_add(&section);
1664}
1665
Avi Kivitya8170e52012-10-23 12:30:10 +02001666MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001667{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001668 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001669}
1670
Avi Kivitye9179ce2009-06-14 11:38:52 +03001671static void io_mem_init(void)
1672{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001673 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001674 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1675 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1676 "unassigned", UINT64_MAX);
1677 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1678 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001679 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1680 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001681 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1682 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001683}
1684
Avi Kivityac1970f2012-10-03 16:22:53 +02001685static void mem_begin(MemoryListener *listener)
1686{
1687 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1688
1689 destroy_all_mappings(d);
1690 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1691}
1692
Avi Kivity50c1e142012-02-08 21:36:02 +02001693static void core_begin(MemoryListener *listener)
1694{
Avi Kivity5312bd82012-02-12 18:32:55 +02001695 phys_sections_clear();
1696 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001697 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1698 phys_section_rom = dummy_section(&io_mem_rom);
1699 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001700}
1701
Avi Kivity1d711482012-10-02 18:54:45 +02001702static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001703{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001704 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001705
1706 /* since each CPU stores ram addresses in its TLB cache, we must
1707 reset the modified entries */
1708 /* XXX: slow ! */
1709 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1710 tlb_flush(env, 1);
1711 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001712}
1713
Avi Kivity93632742012-02-08 16:54:16 +02001714static void core_log_global_start(MemoryListener *listener)
1715{
1716 cpu_physical_memory_set_dirty_tracking(1);
1717}
1718
1719static void core_log_global_stop(MemoryListener *listener)
1720{
1721 cpu_physical_memory_set_dirty_tracking(0);
1722}
1723
Avi Kivity4855d412012-02-08 21:16:05 +02001724static void io_region_add(MemoryListener *listener,
1725 MemoryRegionSection *section)
1726{
Avi Kivitya2d33522012-03-05 17:40:12 +02001727 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1728
1729 mrio->mr = section->mr;
1730 mrio->offset = section->offset_within_region;
1731 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001732 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001733 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001734}
1735
1736static void io_region_del(MemoryListener *listener,
1737 MemoryRegionSection *section)
1738{
1739 isa_unassign_ioport(section->offset_within_address_space, section->size);
1740}
1741
Avi Kivity93632742012-02-08 16:54:16 +02001742static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001743 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001744 .log_global_start = core_log_global_start,
1745 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001746 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001747};
1748
Avi Kivity4855d412012-02-08 21:16:05 +02001749static MemoryListener io_memory_listener = {
1750 .region_add = io_region_add,
1751 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001752 .priority = 0,
1753};
1754
Avi Kivity1d711482012-10-02 18:54:45 +02001755static MemoryListener tcg_memory_listener = {
1756 .commit = tcg_commit,
1757};
1758
Avi Kivityac1970f2012-10-03 16:22:53 +02001759void address_space_init_dispatch(AddressSpace *as)
1760{
1761 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1762
1763 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1764 d->listener = (MemoryListener) {
1765 .begin = mem_begin,
1766 .region_add = mem_add,
1767 .region_nop = mem_add,
1768 .priority = 0,
1769 };
1770 as->dispatch = d;
1771 memory_listener_register(&d->listener, as);
1772}
1773
Avi Kivity83f3c252012-10-07 12:59:55 +02001774void address_space_destroy_dispatch(AddressSpace *as)
1775{
1776 AddressSpaceDispatch *d = as->dispatch;
1777
1778 memory_listener_unregister(&d->listener);
1779 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1780 g_free(d);
1781 as->dispatch = NULL;
1782}
1783
Avi Kivity62152b82011-07-26 14:26:14 +03001784static void memory_map_init(void)
1785{
Anthony Liguori7267c092011-08-20 22:09:37 -05001786 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001787 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001788 address_space_init(&address_space_memory, system_memory);
1789 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001790
Anthony Liguori7267c092011-08-20 22:09:37 -05001791 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001792 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001793 address_space_init(&address_space_io, system_io);
1794 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001795
Avi Kivityf6790af2012-10-02 20:13:51 +02001796 memory_listener_register(&core_memory_listener, &address_space_memory);
1797 memory_listener_register(&io_memory_listener, &address_space_io);
1798 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001799
1800 dma_context_init(&dma_context_memory, &address_space_memory,
1801 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001802}
1803
1804MemoryRegion *get_system_memory(void)
1805{
1806 return system_memory;
1807}
1808
Avi Kivity309cb472011-08-08 16:09:03 +03001809MemoryRegion *get_system_io(void)
1810{
1811 return system_io;
1812}
1813
pbrooke2eef172008-06-08 01:09:01 +00001814#endif /* !defined(CONFIG_USER_ONLY) */
1815
bellard13eb76e2004-01-24 15:23:36 +00001816/* physical memory access (slow version, mainly for debug) */
1817#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001818int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001819 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001820{
1821 int l, flags;
1822 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001823 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001824
1825 while (len > 0) {
1826 page = addr & TARGET_PAGE_MASK;
1827 l = (page + TARGET_PAGE_SIZE) - addr;
1828 if (l > len)
1829 l = len;
1830 flags = page_get_flags(page);
1831 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001832 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001833 if (is_write) {
1834 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001835 return -1;
bellard579a97f2007-11-11 14:26:47 +00001836 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001837 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001838 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001839 memcpy(p, buf, l);
1840 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001841 } else {
1842 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001843 return -1;
bellard579a97f2007-11-11 14:26:47 +00001844 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001845 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001846 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001847 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001848 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001849 }
1850 len -= l;
1851 buf += l;
1852 addr += l;
1853 }
Paul Brooka68fe892010-03-01 00:08:59 +00001854 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001855}
bellard8df1cd02005-01-28 22:37:22 +00001856
bellard13eb76e2004-01-24 15:23:36 +00001857#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001858
Avi Kivitya8170e52012-10-23 12:30:10 +02001859static void invalidate_and_set_dirty(hwaddr addr,
1860 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001861{
1862 if (!cpu_physical_memory_is_dirty(addr)) {
1863 /* invalidate code */
1864 tb_invalidate_phys_page_range(addr, addr + length, 0);
1865 /* set dirty bit */
1866 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1867 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001868 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001869}
1870
Avi Kivitya8170e52012-10-23 12:30:10 +02001871void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001872 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001873{
Avi Kivityac1970f2012-10-03 16:22:53 +02001874 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001875 int l;
bellard13eb76e2004-01-24 15:23:36 +00001876 uint8_t *ptr;
1877 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001878 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001879 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001880
bellard13eb76e2004-01-24 15:23:36 +00001881 while (len > 0) {
1882 page = addr & TARGET_PAGE_MASK;
1883 l = (page + TARGET_PAGE_SIZE) - addr;
1884 if (l > len)
1885 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001886 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001887
bellard13eb76e2004-01-24 15:23:36 +00001888 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001889 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001890 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001891 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001892 /* XXX: could force cpu_single_env to NULL to avoid
1893 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001894 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001895 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001896 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001897 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001898 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001899 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001900 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001901 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001902 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001903 l = 2;
1904 } else {
bellard1c213d12005-09-03 10:49:04 +00001905 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001906 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001907 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001908 l = 1;
1909 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001910 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001911 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001912 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001913 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001914 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001915 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001916 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001917 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001918 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001919 }
1920 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001921 if (!(memory_region_is_ram(section->mr) ||
1922 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001923 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001924 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001925 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001926 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001927 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001928 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001929 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001930 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001931 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001932 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001933 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001934 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001935 l = 2;
1936 } else {
bellard1c213d12005-09-03 10:49:04 +00001937 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001938 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001939 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001940 l = 1;
1941 }
1942 } else {
1943 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001944 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001945 + memory_region_section_addr(section,
1946 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001947 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001948 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001949 }
1950 }
1951 len -= l;
1952 buf += l;
1953 addr += l;
1954 }
1955}
bellard8df1cd02005-01-28 22:37:22 +00001956
Avi Kivitya8170e52012-10-23 12:30:10 +02001957void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001958 const uint8_t *buf, int len)
1959{
1960 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1961}
1962
1963/**
1964 * address_space_read: read from an address space.
1965 *
1966 * @as: #AddressSpace to be accessed
1967 * @addr: address within that address space
1968 * @buf: buffer with the data transferred
1969 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001970void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001971{
1972 address_space_rw(as, addr, buf, len, false);
1973}
1974
1975
Avi Kivitya8170e52012-10-23 12:30:10 +02001976void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001977 int len, int is_write)
1978{
1979 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1980}
1981
bellardd0ecd2a2006-04-23 17:14:48 +00001982/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001983void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001984 const uint8_t *buf, int len)
1985{
Avi Kivityac1970f2012-10-03 16:22:53 +02001986 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00001987 int l;
1988 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02001989 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001990 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001991
bellardd0ecd2a2006-04-23 17:14:48 +00001992 while (len > 0) {
1993 page = addr & TARGET_PAGE_MASK;
1994 l = (page + TARGET_PAGE_SIZE) - addr;
1995 if (l > len)
1996 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001997 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001998
Blue Swirlcc5bea62012-04-14 14:56:48 +00001999 if (!(memory_region_is_ram(section->mr) ||
2000 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002001 /* do nothing */
2002 } else {
2003 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002004 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002005 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00002006 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002007 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002008 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002009 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002010 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00002011 }
2012 len -= l;
2013 buf += l;
2014 addr += l;
2015 }
2016}
2017
aliguori6d16c2f2009-01-22 16:59:11 +00002018typedef struct {
2019 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002020 hwaddr addr;
2021 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002022} BounceBuffer;
2023
2024static BounceBuffer bounce;
2025
aliguoriba223c22009-01-22 16:59:16 +00002026typedef struct MapClient {
2027 void *opaque;
2028 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002029 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002030} MapClient;
2031
Blue Swirl72cf2d42009-09-12 07:36:22 +00002032static QLIST_HEAD(map_client_list, MapClient) map_client_list
2033 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002034
2035void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2036{
Anthony Liguori7267c092011-08-20 22:09:37 -05002037 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002038
2039 client->opaque = opaque;
2040 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002041 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002042 return client;
2043}
2044
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002045static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002046{
2047 MapClient *client = (MapClient *)_client;
2048
Blue Swirl72cf2d42009-09-12 07:36:22 +00002049 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002050 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002051}
2052
2053static void cpu_notify_map_clients(void)
2054{
2055 MapClient *client;
2056
Blue Swirl72cf2d42009-09-12 07:36:22 +00002057 while (!QLIST_EMPTY(&map_client_list)) {
2058 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002059 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002060 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002061 }
2062}
2063
aliguori6d16c2f2009-01-22 16:59:11 +00002064/* Map a physical memory region into a host virtual address.
2065 * May map a subset of the requested range, given by and returned in *plen.
2066 * May return NULL if resources needed to perform the mapping are exhausted.
2067 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002068 * Use cpu_register_map_client() to know when retrying the map operation is
2069 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002070 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002071void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002072 hwaddr addr,
2073 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002074 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002075{
Avi Kivityac1970f2012-10-03 16:22:53 +02002076 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002077 hwaddr len = *plen;
2078 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002079 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002080 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002081 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002082 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002083 ram_addr_t rlen;
2084 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002085
2086 while (len > 0) {
2087 page = addr & TARGET_PAGE_MASK;
2088 l = (page + TARGET_PAGE_SIZE) - addr;
2089 if (l > len)
2090 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002091 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002092
Avi Kivityf3705d52012-03-08 16:16:34 +02002093 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002094 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002095 break;
2096 }
2097 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2098 bounce.addr = addr;
2099 bounce.len = l;
2100 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002101 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002102 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002103
2104 *plen = l;
2105 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002106 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002107 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002108 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002109 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002110 }
aliguori6d16c2f2009-01-22 16:59:11 +00002111
2112 len -= l;
2113 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002114 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002115 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002116 rlen = todo;
2117 ret = qemu_ram_ptr_length(raddr, &rlen);
2118 *plen = rlen;
2119 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002120}
2121
Avi Kivityac1970f2012-10-03 16:22:53 +02002122/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002123 * Will also mark the memory as dirty if is_write == 1. access_len gives
2124 * the amount of memory that was actually read or written by the caller.
2125 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002126void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2127 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002128{
2129 if (buffer != bounce.buffer) {
2130 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002131 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002132 while (access_len) {
2133 unsigned l;
2134 l = TARGET_PAGE_SIZE;
2135 if (l > access_len)
2136 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002137 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002138 addr1 += l;
2139 access_len -= l;
2140 }
2141 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002142 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002143 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002144 }
aliguori6d16c2f2009-01-22 16:59:11 +00002145 return;
2146 }
2147 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002148 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002149 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002150 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002151 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002152 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002153}
bellardd0ecd2a2006-04-23 17:14:48 +00002154
Avi Kivitya8170e52012-10-23 12:30:10 +02002155void *cpu_physical_memory_map(hwaddr addr,
2156 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002157 int is_write)
2158{
2159 return address_space_map(&address_space_memory, addr, plen, is_write);
2160}
2161
Avi Kivitya8170e52012-10-23 12:30:10 +02002162void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2163 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002164{
2165 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2166}
2167
bellard8df1cd02005-01-28 22:37:22 +00002168/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002169static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002170 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002171{
bellard8df1cd02005-01-28 22:37:22 +00002172 uint8_t *ptr;
2173 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002174 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002175
Avi Kivityac1970f2012-10-03 16:22:53 +02002176 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002177
Blue Swirlcc5bea62012-04-14 14:56:48 +00002178 if (!(memory_region_is_ram(section->mr) ||
2179 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002180 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002181 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002182 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002183#if defined(TARGET_WORDS_BIGENDIAN)
2184 if (endian == DEVICE_LITTLE_ENDIAN) {
2185 val = bswap32(val);
2186 }
2187#else
2188 if (endian == DEVICE_BIG_ENDIAN) {
2189 val = bswap32(val);
2190 }
2191#endif
bellard8df1cd02005-01-28 22:37:22 +00002192 } else {
2193 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002194 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002195 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002196 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002197 switch (endian) {
2198 case DEVICE_LITTLE_ENDIAN:
2199 val = ldl_le_p(ptr);
2200 break;
2201 case DEVICE_BIG_ENDIAN:
2202 val = ldl_be_p(ptr);
2203 break;
2204 default:
2205 val = ldl_p(ptr);
2206 break;
2207 }
bellard8df1cd02005-01-28 22:37:22 +00002208 }
2209 return val;
2210}
2211
Avi Kivitya8170e52012-10-23 12:30:10 +02002212uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002213{
2214 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2215}
2216
Avi Kivitya8170e52012-10-23 12:30:10 +02002217uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002218{
2219 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2220}
2221
Avi Kivitya8170e52012-10-23 12:30:10 +02002222uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002223{
2224 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2225}
2226
bellard84b7b8e2005-11-28 21:19:04 +00002227/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002228static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002229 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002230{
bellard84b7b8e2005-11-28 21:19:04 +00002231 uint8_t *ptr;
2232 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002233 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002234
Avi Kivityac1970f2012-10-03 16:22:53 +02002235 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002236
Blue Swirlcc5bea62012-04-14 14:56:48 +00002237 if (!(memory_region_is_ram(section->mr) ||
2238 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002239 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002240 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002241
2242 /* XXX This is broken when device endian != cpu endian.
2243 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002244#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002245 val = io_mem_read(section->mr, addr, 4) << 32;
2246 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002247#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002248 val = io_mem_read(section->mr, addr, 4);
2249 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002250#endif
2251 } else {
2252 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002253 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002254 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002255 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002256 switch (endian) {
2257 case DEVICE_LITTLE_ENDIAN:
2258 val = ldq_le_p(ptr);
2259 break;
2260 case DEVICE_BIG_ENDIAN:
2261 val = ldq_be_p(ptr);
2262 break;
2263 default:
2264 val = ldq_p(ptr);
2265 break;
2266 }
bellard84b7b8e2005-11-28 21:19:04 +00002267 }
2268 return val;
2269}
2270
Avi Kivitya8170e52012-10-23 12:30:10 +02002271uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002272{
2273 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2274}
2275
Avi Kivitya8170e52012-10-23 12:30:10 +02002276uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002277{
2278 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2279}
2280
Avi Kivitya8170e52012-10-23 12:30:10 +02002281uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002282{
2283 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2284}
2285
bellardaab33092005-10-30 20:48:42 +00002286/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002287uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002288{
2289 uint8_t val;
2290 cpu_physical_memory_read(addr, &val, 1);
2291 return val;
2292}
2293
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002294/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002295static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002296 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002297{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002298 uint8_t *ptr;
2299 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002300 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002301
Avi Kivityac1970f2012-10-03 16:22:53 +02002302 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002303
Blue Swirlcc5bea62012-04-14 14:56:48 +00002304 if (!(memory_region_is_ram(section->mr) ||
2305 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002306 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002307 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002308 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002309#if defined(TARGET_WORDS_BIGENDIAN)
2310 if (endian == DEVICE_LITTLE_ENDIAN) {
2311 val = bswap16(val);
2312 }
2313#else
2314 if (endian == DEVICE_BIG_ENDIAN) {
2315 val = bswap16(val);
2316 }
2317#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002318 } else {
2319 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002320 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002321 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002322 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002323 switch (endian) {
2324 case DEVICE_LITTLE_ENDIAN:
2325 val = lduw_le_p(ptr);
2326 break;
2327 case DEVICE_BIG_ENDIAN:
2328 val = lduw_be_p(ptr);
2329 break;
2330 default:
2331 val = lduw_p(ptr);
2332 break;
2333 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002334 }
2335 return val;
bellardaab33092005-10-30 20:48:42 +00002336}
2337
Avi Kivitya8170e52012-10-23 12:30:10 +02002338uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002339{
2340 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2341}
2342
Avi Kivitya8170e52012-10-23 12:30:10 +02002343uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002344{
2345 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2346}
2347
Avi Kivitya8170e52012-10-23 12:30:10 +02002348uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002349{
2350 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2351}
2352
bellard8df1cd02005-01-28 22:37:22 +00002353/* warning: addr must be aligned. The ram page is not masked as dirty
2354 and the code inside is not invalidated. It is useful if the dirty
2355 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002356void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002357{
bellard8df1cd02005-01-28 22:37:22 +00002358 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002359 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002360
Avi Kivityac1970f2012-10-03 16:22:53 +02002361 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002362
Avi Kivityf3705d52012-03-08 16:16:34 +02002363 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002364 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002365 if (memory_region_is_ram(section->mr)) {
2366 section = &phys_sections[phys_section_rom];
2367 }
2368 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002369 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002370 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002371 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002372 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002373 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002374 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002375
2376 if (unlikely(in_migration)) {
2377 if (!cpu_physical_memory_is_dirty(addr1)) {
2378 /* invalidate code */
2379 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2380 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002381 cpu_physical_memory_set_dirty_flags(
2382 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002383 }
2384 }
bellard8df1cd02005-01-28 22:37:22 +00002385 }
2386}
2387
Avi Kivitya8170e52012-10-23 12:30:10 +02002388void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00002389{
j_mayerbc98a7e2007-04-04 07:55:12 +00002390 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002391 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00002392
Avi Kivityac1970f2012-10-03 16:22:53 +02002393 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002394
Avi Kivityf3705d52012-03-08 16:16:34 +02002395 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002396 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002397 if (memory_region_is_ram(section->mr)) {
2398 section = &phys_sections[phys_section_rom];
2399 }
j_mayerbc98a7e2007-04-04 07:55:12 +00002400#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002401 io_mem_write(section->mr, addr, val >> 32, 4);
2402 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002403#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002404 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2405 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002406#endif
2407 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002408 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002409 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002410 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00002411 stq_p(ptr, val);
2412 }
2413}
2414
bellard8df1cd02005-01-28 22:37:22 +00002415/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002416static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002417 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002418{
bellard8df1cd02005-01-28 22:37:22 +00002419 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002420 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002421
Avi Kivityac1970f2012-10-03 16:22:53 +02002422 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002423
Avi Kivityf3705d52012-03-08 16:16:34 +02002424 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002425 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002426 if (memory_region_is_ram(section->mr)) {
2427 section = &phys_sections[phys_section_rom];
2428 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002429#if defined(TARGET_WORDS_BIGENDIAN)
2430 if (endian == DEVICE_LITTLE_ENDIAN) {
2431 val = bswap32(val);
2432 }
2433#else
2434 if (endian == DEVICE_BIG_ENDIAN) {
2435 val = bswap32(val);
2436 }
2437#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002438 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002439 } else {
2440 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002441 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002442 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002443 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002444 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002445 switch (endian) {
2446 case DEVICE_LITTLE_ENDIAN:
2447 stl_le_p(ptr, val);
2448 break;
2449 case DEVICE_BIG_ENDIAN:
2450 stl_be_p(ptr, val);
2451 break;
2452 default:
2453 stl_p(ptr, val);
2454 break;
2455 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002456 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002457 }
2458}
2459
Avi Kivitya8170e52012-10-23 12:30:10 +02002460void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002461{
2462 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2463}
2464
Avi Kivitya8170e52012-10-23 12:30:10 +02002465void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002466{
2467 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2468}
2469
Avi Kivitya8170e52012-10-23 12:30:10 +02002470void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002471{
2472 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2473}
2474
bellardaab33092005-10-30 20:48:42 +00002475/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002476void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002477{
2478 uint8_t v = val;
2479 cpu_physical_memory_write(addr, &v, 1);
2480}
2481
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002482/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002483static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002484 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002485{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002486 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002487 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002488
Avi Kivityac1970f2012-10-03 16:22:53 +02002489 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002490
Avi Kivityf3705d52012-03-08 16:16:34 +02002491 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002492 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002493 if (memory_region_is_ram(section->mr)) {
2494 section = &phys_sections[phys_section_rom];
2495 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002496#if defined(TARGET_WORDS_BIGENDIAN)
2497 if (endian == DEVICE_LITTLE_ENDIAN) {
2498 val = bswap16(val);
2499 }
2500#else
2501 if (endian == DEVICE_BIG_ENDIAN) {
2502 val = bswap16(val);
2503 }
2504#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002505 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002506 } else {
2507 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002508 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002509 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002510 /* RAM case */
2511 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002512 switch (endian) {
2513 case DEVICE_LITTLE_ENDIAN:
2514 stw_le_p(ptr, val);
2515 break;
2516 case DEVICE_BIG_ENDIAN:
2517 stw_be_p(ptr, val);
2518 break;
2519 default:
2520 stw_p(ptr, val);
2521 break;
2522 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002523 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002524 }
bellardaab33092005-10-30 20:48:42 +00002525}
2526
Avi Kivitya8170e52012-10-23 12:30:10 +02002527void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002528{
2529 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2530}
2531
Avi Kivitya8170e52012-10-23 12:30:10 +02002532void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002533{
2534 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2535}
2536
Avi Kivitya8170e52012-10-23 12:30:10 +02002537void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002538{
2539 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2540}
2541
bellardaab33092005-10-30 20:48:42 +00002542/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002543void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002544{
2545 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002546 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002547}
2548
Avi Kivitya8170e52012-10-23 12:30:10 +02002549void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002550{
2551 val = cpu_to_le64(val);
2552 cpu_physical_memory_write(addr, &val, 8);
2553}
2554
Avi Kivitya8170e52012-10-23 12:30:10 +02002555void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002556{
2557 val = cpu_to_be64(val);
2558 cpu_physical_memory_write(addr, &val, 8);
2559}
2560
aliguori5e2972f2009-03-28 17:51:36 +00002561/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002562int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002563 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002564{
2565 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002566 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002567 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002568
2569 while (len > 0) {
2570 page = addr & TARGET_PAGE_MASK;
2571 phys_addr = cpu_get_phys_page_debug(env, page);
2572 /* if no physical page mapped, return an error */
2573 if (phys_addr == -1)
2574 return -1;
2575 l = (page + TARGET_PAGE_SIZE) - addr;
2576 if (l > len)
2577 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002578 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002579 if (is_write)
2580 cpu_physical_memory_write_rom(phys_addr, buf, l);
2581 else
aliguori5e2972f2009-03-28 17:51:36 +00002582 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002583 len -= l;
2584 buf += l;
2585 addr += l;
2586 }
2587 return 0;
2588}
Paul Brooka68fe892010-03-01 00:08:59 +00002589#endif
bellard13eb76e2004-01-24 15:23:36 +00002590
Blue Swirl8e4a4242013-01-06 18:30:17 +00002591#if !defined(CONFIG_USER_ONLY)
2592
2593/*
2594 * A helper function for the _utterly broken_ virtio device model to find out if
2595 * it's running on a big endian machine. Don't do this at home kids!
2596 */
2597bool virtio_is_big_endian(void);
2598bool virtio_is_big_endian(void)
2599{
2600#if defined(TARGET_WORDS_BIGENDIAN)
2601 return true;
2602#else
2603 return false;
2604#endif
2605}
2606
2607#endif
2608
Wen Congyang76f35532012-05-07 12:04:18 +08002609#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002610bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002611{
2612 MemoryRegionSection *section;
2613
Avi Kivityac1970f2012-10-03 16:22:53 +02002614 section = phys_page_find(address_space_memory.dispatch,
2615 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002616
2617 return !(memory_region_is_ram(section->mr) ||
2618 memory_region_is_romd(section->mr));
2619}
2620#endif