blob: 786987a0165dd6acc01a6677957d7019e648d5a7 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020070static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200190 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 goto not_found;
195 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200198 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200199
Avi Kivityc19e8802012-02-13 20:25:31 +0200200 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200201not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200202 return &phys_sections[s_index];
203}
204
Blue Swirle5548612012-04-21 13:08:33 +0000205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
210}
bellard9fa3e852004-01-04 18:06:42 +0000211#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000212
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200213void cpu_exec_init_all(void)
214{
215#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700216 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200217 memory_map_init();
218 io_mem_init();
219#endif
220}
221
Andreas Färberb170fce2013-01-20 20:23:22 +0100222#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000223
Juan Quintelae59fb372009-09-29 22:48:21 +0200224static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200225{
Andreas Färber259186a2013-01-17 18:51:17 +0100226 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200227
aurel323098dba2009-03-07 21:28:24 +0000228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100230 cpu->interrupt_request &= ~0x01;
231 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000232
233 return 0;
234}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200235
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100243 VMSTATE_UINT32(halted, CPUState),
244 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200245 VMSTATE_END_OF_LIST()
246 }
247};
Andreas Färberb170fce2013-01-20 20:23:22 +0100248#else
249#define vmstate_cpu_common vmstate_dummy
pbrook9656f322008-07-01 20:01:19 +0000250#endif
251
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100252CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400253{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100254 CPUArchState *env = first_cpu;
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100255 CPUState *cpu = NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400256
257 while (env) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400260 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100261 }
Glauber Costa950f1472009-06-09 12:15:18 -0400262 env = env->next_cpu;
263 }
264
Igor Mammedovd76fdda2013-03-07 19:12:43 +0100265 return env ? cpu : NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400266}
267
Andreas Färber9349b4f2012-03-14 01:38:32 +0100268void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000269{
Andreas Färber9f09e182012-05-03 06:59:07 +0200270 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100271 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100272 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000273 int cpu_index;
274
pbrookc2764712009-03-07 15:24:59 +0000275#if defined(CONFIG_USER_ONLY)
276 cpu_list_lock();
277#endif
bellard6a00d602005-11-21 23:25:50 +0000278 env->next_cpu = NULL;
279 penv = &first_cpu;
280 cpu_index = 0;
281 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700282 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000283 cpu_index++;
284 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100285 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100286 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000287 QTAILQ_INIT(&env->breakpoints);
288 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100289#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200290 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100291#endif
bellard6a00d602005-11-21 23:25:50 +0000292 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000293#if defined(CONFIG_USER_ONLY)
294 cpu_list_unlock();
295#endif
Andreas Färber259186a2013-01-17 18:51:17 +0100296 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
pbrookb3c77242008-06-30 16:31:04 +0000297#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600298 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000299 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100300 assert(cc->vmsd == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000301#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100302 if (cc->vmsd != NULL) {
303 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
304 }
bellardfd6ce8f2003-05-14 19:00:11 +0000305}
306
bellard1fddef42005-04-17 19:16:13 +0000307#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000308#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100309static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000310{
311 tb_invalidate_phys_page_range(pc, pc + 1, 0);
312}
313#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400314static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
315{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400316 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
317 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400318}
bellardc27004e2005-01-03 23:35:10 +0000319#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000320#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000321
Paul Brookc527ee82010-03-01 03:31:14 +0000322#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100323void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000324
325{
326}
327
Andreas Färber9349b4f2012-03-14 01:38:32 +0100328int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000329 int flags, CPUWatchpoint **watchpoint)
330{
331 return -ENOSYS;
332}
333#else
pbrook6658ffb2007-03-16 23:58:11 +0000334/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100335int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000336 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000337{
aliguorib4051332008-11-18 20:14:20 +0000338 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000339 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000340
aliguorib4051332008-11-18 20:14:20 +0000341 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400342 if ((len & (len - 1)) || (addr & ~len_mask) ||
343 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000344 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
345 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
346 return -EINVAL;
347 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500348 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000349
aliguoria1d1bb32008-11-18 20:07:32 +0000350 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000351 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000352 wp->flags = flags;
353
aliguori2dc9f412008-11-18 20:56:59 +0000354 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000355 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000356 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000357 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000358 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000359
pbrook6658ffb2007-03-16 23:58:11 +0000360 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000361
362 if (watchpoint)
363 *watchpoint = wp;
364 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000365}
366
aliguoria1d1bb32008-11-18 20:07:32 +0000367/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100368int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000369 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000370{
aliguorib4051332008-11-18 20:14:20 +0000371 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000372 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000373
Blue Swirl72cf2d42009-09-12 07:36:22 +0000374 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000375 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000376 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000377 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000378 return 0;
379 }
380 }
aliguoria1d1bb32008-11-18 20:07:32 +0000381 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000382}
383
aliguoria1d1bb32008-11-18 20:07:32 +0000384/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100385void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000386{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000387 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000388
aliguoria1d1bb32008-11-18 20:07:32 +0000389 tlb_flush_page(env, watchpoint->vaddr);
390
Anthony Liguori7267c092011-08-20 22:09:37 -0500391 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000392}
393
aliguoria1d1bb32008-11-18 20:07:32 +0000394/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100395void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000396{
aliguoric0ce9982008-11-25 22:13:57 +0000397 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000398
Blue Swirl72cf2d42009-09-12 07:36:22 +0000399 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000400 if (wp->flags & mask)
401 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000402 }
aliguoria1d1bb32008-11-18 20:07:32 +0000403}
Paul Brookc527ee82010-03-01 03:31:14 +0000404#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000405
406/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100407int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000408 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000409{
bellard1fddef42005-04-17 19:16:13 +0000410#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000411 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000412
Anthony Liguori7267c092011-08-20 22:09:37 -0500413 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000414
415 bp->pc = pc;
416 bp->flags = flags;
417
aliguori2dc9f412008-11-18 20:56:59 +0000418 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000419 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000420 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000421 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000422 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000423
424 breakpoint_invalidate(env, pc);
425
426 if (breakpoint)
427 *breakpoint = bp;
428 return 0;
429#else
430 return -ENOSYS;
431#endif
432}
433
434/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100435int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000436{
437#if defined(TARGET_HAS_ICE)
438 CPUBreakpoint *bp;
439
Blue Swirl72cf2d42009-09-12 07:36:22 +0000440 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000441 if (bp->pc == pc && bp->flags == flags) {
442 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000443 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000444 }
bellard4c3a88a2003-07-26 12:06:08 +0000445 }
aliguoria1d1bb32008-11-18 20:07:32 +0000446 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000447#else
aliguoria1d1bb32008-11-18 20:07:32 +0000448 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000449#endif
450}
451
aliguoria1d1bb32008-11-18 20:07:32 +0000452/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100453void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000454{
bellard1fddef42005-04-17 19:16:13 +0000455#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000456 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000457
aliguoria1d1bb32008-11-18 20:07:32 +0000458 breakpoint_invalidate(env, breakpoint->pc);
459
Anthony Liguori7267c092011-08-20 22:09:37 -0500460 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000461#endif
462}
463
464/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100465void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000466{
467#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000468 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000469
Blue Swirl72cf2d42009-09-12 07:36:22 +0000470 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000471 if (bp->flags & mask)
472 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000473 }
bellard4c3a88a2003-07-26 12:06:08 +0000474#endif
475}
476
bellardc33a3462003-07-29 20:50:33 +0000477/* enable or disable single step mode. EXCP_DEBUG is returned by the
478 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100479void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000480{
bellard1fddef42005-04-17 19:16:13 +0000481#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000482 if (env->singlestep_enabled != enabled) {
483 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000484 if (kvm_enabled())
485 kvm_update_guest_debug(env, 0);
486 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100487 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000488 /* XXX: only flush what is necessary */
489 tb_flush(env);
490 }
bellardc33a3462003-07-29 20:50:33 +0000491 }
492#endif
493}
494
Andreas Färber9349b4f2012-03-14 01:38:32 +0100495void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000496{
Andreas Färberfcd7d002012-12-17 08:02:44 +0100497 CPUState *cpu = ENV_GET_CPU(env);
498
499 cpu->exit_request = 1;
Peter Maydell378df4b2013-02-22 18:10:03 +0000500 cpu->tcg_exit_req = 1;
aurel323098dba2009-03-07 21:28:24 +0000501}
502
Andreas Färber9349b4f2012-03-14 01:38:32 +0100503void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000504{
505 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000506 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000507
508 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000509 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000510 fprintf(stderr, "qemu: fatal: ");
511 vfprintf(stderr, fmt, ap);
512 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100513 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000514 if (qemu_log_enabled()) {
515 qemu_log("qemu: fatal: ");
516 qemu_log_vprintf(fmt, ap2);
517 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100518 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000519 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000520 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000521 }
pbrook493ae1f2007-11-23 16:53:59 +0000522 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000523 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200524#if defined(CONFIG_USER_ONLY)
525 {
526 struct sigaction act;
527 sigfillset(&act.sa_mask);
528 act.sa_handler = SIG_DFL;
529 sigaction(SIGABRT, &act, NULL);
530 }
531#endif
bellard75012672003-06-21 13:11:07 +0000532 abort();
533}
534
Andreas Färber9349b4f2012-03-14 01:38:32 +0100535CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000536{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100537 CPUArchState *new_env = cpu_init(env->cpu_model_str);
538 CPUArchState *next_cpu = new_env->next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000539#if defined(TARGET_HAS_ICE)
540 CPUBreakpoint *bp;
541 CPUWatchpoint *wp;
542#endif
543
Andreas Färber9349b4f2012-03-14 01:38:32 +0100544 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000545
Andreas Färber55e5c282012-12-17 06:18:02 +0100546 /* Preserve chaining. */
thsc5be9f02007-02-28 20:20:53 +0000547 new_env->next_cpu = next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000548
549 /* Clone all break/watchpoints.
550 Note: Once we support ptrace with hw-debug register access, make sure
551 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000552 QTAILQ_INIT(&env->breakpoints);
553 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000554#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000555 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000556 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
557 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000558 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000559 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
560 wp->flags, NULL);
561 }
562#endif
563
thsc5be9f02007-02-28 20:20:53 +0000564 return new_env;
565}
566
bellard01243112004-01-04 15:48:17 +0000567#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200568static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
569 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000570{
Juan Quintelad24981d2012-05-22 00:42:40 +0200571 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000572
bellard1ccde1c2004-02-06 19:46:14 +0000573 /* we modify the TLB cache so that the dirty bit will be set again
574 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200575 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200576 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000577 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200578 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000579 != (end - 1) - start) {
580 abort();
581 }
Blue Swirle5548612012-04-21 13:08:33 +0000582 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200583
584}
585
586/* Note: start and end must be within the same ram block. */
587void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
588 int dirty_flags)
589{
590 uintptr_t length;
591
592 start &= TARGET_PAGE_MASK;
593 end = TARGET_PAGE_ALIGN(end);
594
595 length = end - start;
596 if (length == 0)
597 return;
598 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
599
600 if (tcg_enabled()) {
601 tlb_reset_dirty_range_all(start, end, length);
602 }
bellard1ccde1c2004-02-06 19:46:14 +0000603}
604
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000605static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000606{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200607 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000608 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200609 return ret;
aliguori74576192008-10-06 14:02:03 +0000610}
611
Avi Kivitya8170e52012-10-23 12:30:10 +0200612hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000613 MemoryRegionSection *section,
614 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200615 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000616 int prot,
617 target_ulong *address)
618{
Avi Kivitya8170e52012-10-23 12:30:10 +0200619 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000620 CPUWatchpoint *wp;
621
Blue Swirlcc5bea62012-04-14 14:56:48 +0000622 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000623 /* Normal RAM. */
624 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000625 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000626 if (!section->readonly) {
627 iotlb |= phys_section_notdirty;
628 } else {
629 iotlb |= phys_section_rom;
630 }
631 } else {
632 /* IO handlers are currently passed a physical address.
633 It would be nice to pass an offset from the base address
634 of that region. This would avoid having to special case RAM,
635 and avoid full address decoding in every device.
636 We can't use the high bits of pd for this because
637 IO_MEM_ROMD uses these as a ram address. */
638 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000639 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000640 }
641
642 /* Make accesses to pages with watchpoints go via the
643 watchpoint trap routines. */
644 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
645 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
646 /* Avoid trapping reads of pages with a write breakpoint. */
647 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
648 iotlb = phys_section_watch + paddr;
649 *address |= TLB_MMIO;
650 break;
651 }
652 }
653 }
654
655 return iotlb;
656}
bellard9fa3e852004-01-04 18:06:42 +0000657#endif /* defined(CONFIG_USER_ONLY) */
658
pbrooke2eef172008-06-08 01:09:01 +0000659#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000660
Paul Brookc04b2b72010-03-01 03:31:14 +0000661#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
662typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200663 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200664 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200665 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000666} subpage_t;
667
Anthony Liguoric227f092009-10-01 16:12:16 -0500668static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200669 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200670static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200671static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200672{
Avi Kivity5312bd82012-02-12 18:32:55 +0200673 MemoryRegionSection *section = &phys_sections[section_index];
674 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200675
676 if (mr->subpage) {
677 subpage_t *subpage = container_of(mr, subpage_t, iomem);
678 memory_region_destroy(&subpage->iomem);
679 g_free(subpage);
680 }
681}
682
Avi Kivity4346ae32012-02-10 17:00:01 +0200683static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200684{
685 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200686 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200687
Avi Kivityc19e8802012-02-13 20:25:31 +0200688 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200689 return;
690 }
691
Avi Kivityc19e8802012-02-13 20:25:31 +0200692 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200693 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200694 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200695 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200696 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200697 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200698 }
Avi Kivity54688b12012-02-09 17:34:32 +0200699 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200700 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200701 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200702}
703
Avi Kivityac1970f2012-10-03 16:22:53 +0200704static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200705{
Avi Kivityac1970f2012-10-03 16:22:53 +0200706 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200707 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200708}
709
Avi Kivity5312bd82012-02-12 18:32:55 +0200710static uint16_t phys_section_add(MemoryRegionSection *section)
711{
712 if (phys_sections_nb == phys_sections_nb_alloc) {
713 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
714 phys_sections = g_renew(MemoryRegionSection, phys_sections,
715 phys_sections_nb_alloc);
716 }
717 phys_sections[phys_sections_nb] = *section;
718 return phys_sections_nb++;
719}
720
721static void phys_sections_clear(void)
722{
723 phys_sections_nb = 0;
724}
725
Avi Kivityac1970f2012-10-03 16:22:53 +0200726static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200727{
728 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200729 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200730 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200731 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200732 MemoryRegionSection subsection = {
733 .offset_within_address_space = base,
734 .size = TARGET_PAGE_SIZE,
735 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200736 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200737
Avi Kivityf3705d52012-03-08 16:16:34 +0200738 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200739
Avi Kivityf3705d52012-03-08 16:16:34 +0200740 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200741 subpage = subpage_init(base);
742 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200743 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200744 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200745 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200746 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200747 }
748 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400749 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200750 subpage_register(subpage, start, end, phys_section_add(section));
751}
752
753
Avi Kivityac1970f2012-10-03 16:22:53 +0200754static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000755{
Avi Kivitya8170e52012-10-23 12:30:10 +0200756 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200757 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200758 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200759 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200760
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200761 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200762
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200763 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200764 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200765 section_index);
bellard33417e72003-08-10 21:47:01 +0000766}
767
Avi Kivityac1970f2012-10-03 16:22:53 +0200768static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200769{
Avi Kivityac1970f2012-10-03 16:22:53 +0200770 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200771 MemoryRegionSection now = *section, remain = *section;
772
773 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
774 || (now.size < TARGET_PAGE_SIZE)) {
775 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
776 - now.offset_within_address_space,
777 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200778 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200779 remain.size -= now.size;
780 remain.offset_within_address_space += now.size;
781 remain.offset_within_region += now.size;
782 }
Tyler Hall69b67642012-07-25 18:45:04 -0400783 while (remain.size >= TARGET_PAGE_SIZE) {
784 now = remain;
785 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
786 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200787 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400788 } else {
789 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200790 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400791 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200792 remain.size -= now.size;
793 remain.offset_within_address_space += now.size;
794 remain.offset_within_region += now.size;
795 }
796 now = remain;
797 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200798 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200799 }
800}
801
Sheng Yang62a27442010-01-26 19:21:16 +0800802void qemu_flush_coalesced_mmio_buffer(void)
803{
804 if (kvm_enabled())
805 kvm_flush_coalesced_mmio_buffer();
806}
807
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700808void qemu_mutex_lock_ramlist(void)
809{
810 qemu_mutex_lock(&ram_list.mutex);
811}
812
813void qemu_mutex_unlock_ramlist(void)
814{
815 qemu_mutex_unlock(&ram_list.mutex);
816}
817
Marcelo Tosattic9027602010-03-01 20:25:08 -0300818#if defined(__linux__) && !defined(TARGET_S390X)
819
820#include <sys/vfs.h>
821
822#define HUGETLBFS_MAGIC 0x958458f6
823
824static long gethugepagesize(const char *path)
825{
826 struct statfs fs;
827 int ret;
828
829 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900830 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300831 } while (ret != 0 && errno == EINTR);
832
833 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900834 perror(path);
835 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300836 }
837
838 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900839 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300840
841 return fs.f_bsize;
842}
843
Alex Williamson04b16652010-07-02 11:13:17 -0600844static void *file_ram_alloc(RAMBlock *block,
845 ram_addr_t memory,
846 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300847{
848 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500849 char *sanitized_name;
850 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300851 void *area;
852 int fd;
853#ifdef MAP_POPULATE
854 int flags;
855#endif
856 unsigned long hpagesize;
857
858 hpagesize = gethugepagesize(path);
859 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900860 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300861 }
862
863 if (memory < hpagesize) {
864 return NULL;
865 }
866
867 if (kvm_enabled() && !kvm_has_sync_mmu()) {
868 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
869 return NULL;
870 }
871
Peter Feiner8ca761f2013-03-04 13:54:25 -0500872 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
873 sanitized_name = g_strdup(block->mr->name);
874 for (c = sanitized_name; *c != '\0'; c++) {
875 if (*c == '/')
876 *c = '_';
877 }
878
879 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
880 sanitized_name);
881 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300882
883 fd = mkstemp(filename);
884 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900885 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100886 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900887 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300888 }
889 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100890 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300891
892 memory = (memory+hpagesize-1) & ~(hpagesize-1);
893
894 /*
895 * ftruncate is not supported by hugetlbfs in older
896 * hosts, so don't bother bailing out on errors.
897 * If anything goes wrong with it under other filesystems,
898 * mmap will fail.
899 */
900 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900901 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300902
903#ifdef MAP_POPULATE
904 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
905 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
906 * to sidestep this quirk.
907 */
908 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
909 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
910#else
911 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
912#endif
913 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900914 perror("file_ram_alloc: can't mmap RAM pages");
915 close(fd);
916 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300917 }
Alex Williamson04b16652010-07-02 11:13:17 -0600918 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300919 return area;
920}
921#endif
922
Alex Williamsond17b5282010-06-25 11:08:38 -0600923static ram_addr_t find_ram_offset(ram_addr_t size)
924{
Alex Williamson04b16652010-07-02 11:13:17 -0600925 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600926 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600927
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +0100928 assert(size != 0); /* it would hand out same offset multiple times */
929
Paolo Bonzinia3161032012-11-14 15:54:48 +0100930 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600931 return 0;
932
Paolo Bonzinia3161032012-11-14 15:54:48 +0100933 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000934 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600935
936 end = block->offset + block->length;
937
Paolo Bonzinia3161032012-11-14 15:54:48 +0100938 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600939 if (next_block->offset >= end) {
940 next = MIN(next, next_block->offset);
941 }
942 }
943 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600944 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600945 mingap = next - end;
946 }
947 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600948
949 if (offset == RAM_ADDR_MAX) {
950 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
951 (uint64_t)size);
952 abort();
953 }
954
Alex Williamson04b16652010-07-02 11:13:17 -0600955 return offset;
956}
957
Juan Quintela652d7ec2012-07-20 10:37:54 +0200958ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600959{
Alex Williamsond17b5282010-06-25 11:08:38 -0600960 RAMBlock *block;
961 ram_addr_t last = 0;
962
Paolo Bonzinia3161032012-11-14 15:54:48 +0100963 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600964 last = MAX(last, block->offset + block->length);
965
966 return last;
967}
968
Jason Baronddb97f12012-08-02 15:44:16 -0400969static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
970{
971 int ret;
972 QemuOpts *machine_opts;
973
974 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
975 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
976 if (machine_opts &&
977 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
978 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
979 if (ret) {
980 perror("qemu_madvise");
981 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
982 "but dump_guest_core=off specified\n");
983 }
984 }
985}
986
Avi Kivityc5705a72011-12-20 15:59:12 +0200987void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -0600988{
989 RAMBlock *new_block, *block;
990
Avi Kivityc5705a72011-12-20 15:59:12 +0200991 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +0100992 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200993 if (block->offset == addr) {
994 new_block = block;
995 break;
996 }
997 }
998 assert(new_block);
999 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001000
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001001 if (dev) {
1002 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001003 if (id) {
1004 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001005 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001006 }
1007 }
1008 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1009
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001010 /* This assumes the iothread lock is taken here too. */
1011 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001012 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001013 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001014 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1015 new_block->idstr);
1016 abort();
1017 }
1018 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001019 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001020}
1021
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001022static int memory_try_enable_merging(void *addr, size_t len)
1023{
1024 QemuOpts *opts;
1025
1026 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1027 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1028 /* disabled by the user */
1029 return 0;
1030 }
1031
1032 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1033}
1034
Avi Kivityc5705a72011-12-20 15:59:12 +02001035ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1036 MemoryRegion *mr)
1037{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001038 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001039
1040 size = TARGET_PAGE_ALIGN(size);
1041 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001042
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001043 /* This assumes the iothread lock is taken here too. */
1044 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001045 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001046 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001047 if (host) {
1048 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001049 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001050 } else {
1051 if (mem_path) {
1052#if defined (__linux__) && !defined(TARGET_S390X)
1053 new_block->host = file_ram_alloc(new_block, size, mem_path);
1054 if (!new_block->host) {
1055 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001056 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001057 }
1058#else
1059 fprintf(stderr, "-mem-path option unsupported\n");
1060 exit(1);
1061#endif
1062 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001063 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001064 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001065 } else if (kvm_enabled()) {
1066 /* some s390/kvm configurations have special constraints */
1067 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001068 } else {
1069 new_block->host = qemu_vmalloc(size);
1070 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001071 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001072 }
1073 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001074 new_block->length = size;
1075
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001076 /* Keep the list sorted from biggest to smallest block. */
1077 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1078 if (block->length < new_block->length) {
1079 break;
1080 }
1081 }
1082 if (block) {
1083 QTAILQ_INSERT_BEFORE(block, new_block, next);
1084 } else {
1085 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1086 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001087 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001088
Umesh Deshpandef798b072011-08-18 11:41:17 -07001089 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001090 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001091
Anthony Liguori7267c092011-08-20 22:09:37 -05001092 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001093 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001094 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1095 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001096 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001097
Jason Baronddb97f12012-08-02 15:44:16 -04001098 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001099 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001100
Cam Macdonell84b89d72010-07-26 18:10:57 -06001101 if (kvm_enabled())
1102 kvm_setup_guest_memory(new_block->host, size);
1103
1104 return new_block->offset;
1105}
1106
Avi Kivityc5705a72011-12-20 15:59:12 +02001107ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001108{
Avi Kivityc5705a72011-12-20 15:59:12 +02001109 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001110}
bellarde9a1ab12007-02-08 23:08:38 +00001111
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001112void qemu_ram_free_from_ptr(ram_addr_t addr)
1113{
1114 RAMBlock *block;
1115
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001116 /* This assumes the iothread lock is taken here too. */
1117 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001118 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001119 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001120 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001121 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001122 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001123 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001124 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001125 }
1126 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001127 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001128}
1129
Anthony Liguoric227f092009-10-01 16:12:16 -05001130void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001131{
Alex Williamson04b16652010-07-02 11:13:17 -06001132 RAMBlock *block;
1133
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001134 /* This assumes the iothread lock is taken here too. */
1135 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001136 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001137 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001138 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001139 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001140 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001141 if (block->flags & RAM_PREALLOC_MASK) {
1142 ;
1143 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001144#if defined (__linux__) && !defined(TARGET_S390X)
1145 if (block->fd) {
1146 munmap(block->host, block->length);
1147 close(block->fd);
1148 } else {
1149 qemu_vfree(block->host);
1150 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001151#else
1152 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001153#endif
1154 } else {
1155#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1156 munmap(block->host, block->length);
1157#else
Jan Kiszka868bb332011-06-21 22:59:09 +02001158 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001159 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001160 } else {
1161 qemu_vfree(block->host);
1162 }
Alex Williamson04b16652010-07-02 11:13:17 -06001163#endif
1164 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001165 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001166 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001167 }
1168 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001169 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001170
bellarde9a1ab12007-02-08 23:08:38 +00001171}
1172
Huang Yingcd19cfa2011-03-02 08:56:19 +01001173#ifndef _WIN32
1174void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1175{
1176 RAMBlock *block;
1177 ram_addr_t offset;
1178 int flags;
1179 void *area, *vaddr;
1180
Paolo Bonzinia3161032012-11-14 15:54:48 +01001181 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001182 offset = addr - block->offset;
1183 if (offset < block->length) {
1184 vaddr = block->host + offset;
1185 if (block->flags & RAM_PREALLOC_MASK) {
1186 ;
1187 } else {
1188 flags = MAP_FIXED;
1189 munmap(vaddr, length);
1190 if (mem_path) {
1191#if defined(__linux__) && !defined(TARGET_S390X)
1192 if (block->fd) {
1193#ifdef MAP_POPULATE
1194 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1195 MAP_PRIVATE;
1196#else
1197 flags |= MAP_PRIVATE;
1198#endif
1199 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1200 flags, block->fd, offset);
1201 } else {
1202 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1203 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1204 flags, -1, 0);
1205 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001206#else
1207 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001208#endif
1209 } else {
1210#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1211 flags |= MAP_SHARED | MAP_ANONYMOUS;
1212 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1213 flags, -1, 0);
1214#else
1215 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1216 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1217 flags, -1, 0);
1218#endif
1219 }
1220 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001221 fprintf(stderr, "Could not remap addr: "
1222 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001223 length, addr);
1224 exit(1);
1225 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001226 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001227 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001228 }
1229 return;
1230 }
1231 }
1232}
1233#endif /* !_WIN32 */
1234
pbrookdc828ca2009-04-09 22:21:07 +00001235/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001236 With the exception of the softmmu code in this file, this should
1237 only be used for local memory (e.g. video ram) that the device owns,
1238 and knows it isn't going to access beyond the end of the block.
1239
1240 It should not be used for general purpose DMA.
1241 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1242 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001243void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001244{
pbrook94a6b542009-04-11 17:15:54 +00001245 RAMBlock *block;
1246
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001247 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001248 block = ram_list.mru_block;
1249 if (block && addr - block->offset < block->length) {
1250 goto found;
1251 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001252 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001253 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001254 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001255 }
pbrook94a6b542009-04-11 17:15:54 +00001256 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001257
1258 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1259 abort();
1260
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001261found:
1262 ram_list.mru_block = block;
1263 if (xen_enabled()) {
1264 /* We need to check if the requested address is in the RAM
1265 * because we don't want to map the entire memory in QEMU.
1266 * In that case just map until the end of the page.
1267 */
1268 if (block->offset == 0) {
1269 return xen_map_cache(addr, 0, 0);
1270 } else if (block->host == NULL) {
1271 block->host =
1272 xen_map_cache(block->offset, block->length, 1);
1273 }
1274 }
1275 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001276}
1277
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001278/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1279 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1280 *
1281 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001282 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001283static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001284{
1285 RAMBlock *block;
1286
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001287 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001288 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001289 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001290 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001291 /* We need to check if the requested address is in the RAM
1292 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001293 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001294 */
1295 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001296 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001297 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001298 block->host =
1299 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001300 }
1301 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001302 return block->host + (addr - block->offset);
1303 }
1304 }
1305
1306 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1307 abort();
1308
1309 return NULL;
1310}
1311
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001312/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1313 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001314static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001315{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001316 if (*size == 0) {
1317 return NULL;
1318 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001319 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001320 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001321 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001322 RAMBlock *block;
1323
Paolo Bonzinia3161032012-11-14 15:54:48 +01001324 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001325 if (addr - block->offset < block->length) {
1326 if (addr - block->offset + *size > block->length)
1327 *size = block->length - addr + block->offset;
1328 return block->host + (addr - block->offset);
1329 }
1330 }
1331
1332 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1333 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001334 }
1335}
1336
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001337void qemu_put_ram_ptr(void *addr)
1338{
1339 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001340}
1341
Marcelo Tosattie8902612010-10-11 15:31:19 -03001342int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001343{
pbrook94a6b542009-04-11 17:15:54 +00001344 RAMBlock *block;
1345 uint8_t *host = ptr;
1346
Jan Kiszka868bb332011-06-21 22:59:09 +02001347 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001348 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001349 return 0;
1350 }
1351
Paolo Bonzinia3161032012-11-14 15:54:48 +01001352 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001353 /* This case append when the block is not mapped. */
1354 if (block->host == NULL) {
1355 continue;
1356 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001357 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001358 *ram_addr = block->offset + (host - block->host);
1359 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001360 }
pbrook94a6b542009-04-11 17:15:54 +00001361 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001362
Marcelo Tosattie8902612010-10-11 15:31:19 -03001363 return -1;
1364}
Alex Williamsonf471a172010-06-11 11:11:42 -06001365
Marcelo Tosattie8902612010-10-11 15:31:19 -03001366/* Some of the softmmu routines need to translate from a host pointer
1367 (typically a TLB entry) back to a ram offset. */
1368ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1369{
1370 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001371
Marcelo Tosattie8902612010-10-11 15:31:19 -03001372 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1373 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1374 abort();
1375 }
1376 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001377}
1378
Avi Kivitya8170e52012-10-23 12:30:10 +02001379static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001380 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001381{
pbrook67d3b952006-12-18 05:03:52 +00001382#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001383 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001384#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001385#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001386 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001387#endif
1388 return 0;
1389}
1390
Avi Kivitya8170e52012-10-23 12:30:10 +02001391static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001392 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001393{
1394#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001395 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001396#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001397#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001398 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001399#endif
1400}
1401
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001402static const MemoryRegionOps unassigned_mem_ops = {
1403 .read = unassigned_mem_read,
1404 .write = unassigned_mem_write,
1405 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001406};
1407
Avi Kivitya8170e52012-10-23 12:30:10 +02001408static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001409 unsigned size)
1410{
1411 abort();
1412}
1413
Avi Kivitya8170e52012-10-23 12:30:10 +02001414static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001415 uint64_t value, unsigned size)
1416{
1417 abort();
1418}
1419
1420static const MemoryRegionOps error_mem_ops = {
1421 .read = error_mem_read,
1422 .write = error_mem_write,
1423 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001424};
1425
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001426static const MemoryRegionOps rom_mem_ops = {
1427 .read = error_mem_read,
1428 .write = unassigned_mem_write,
1429 .endianness = DEVICE_NATIVE_ENDIAN,
1430};
1431
Avi Kivitya8170e52012-10-23 12:30:10 +02001432static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001433 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001434{
bellard3a7d9292005-08-21 09:26:42 +00001435 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001436 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001437 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1438#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001439 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001440 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001441#endif
1442 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001443 switch (size) {
1444 case 1:
1445 stb_p(qemu_get_ram_ptr(ram_addr), val);
1446 break;
1447 case 2:
1448 stw_p(qemu_get_ram_ptr(ram_addr), val);
1449 break;
1450 case 4:
1451 stl_p(qemu_get_ram_ptr(ram_addr), val);
1452 break;
1453 default:
1454 abort();
1455 }
bellardf23db162005-08-21 19:12:28 +00001456 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001457 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001458 /* we remove the notdirty callback only if the code has been
1459 flushed */
1460 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001461 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001462}
1463
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001464static const MemoryRegionOps notdirty_mem_ops = {
1465 .read = error_mem_read,
1466 .write = notdirty_mem_write,
1467 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001468};
1469
pbrook0f459d12008-06-09 00:20:13 +00001470/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001471static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001472{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001473 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001474 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001475 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001476 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001477 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001478
aliguori06d55cc2008-11-18 20:24:06 +00001479 if (env->watchpoint_hit) {
1480 /* We re-entered the check after replacing the TB. Now raise
1481 * the debug interrupt so that is will trigger after the
1482 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001483 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001484 return;
1485 }
pbrook2e70f6e2008-06-29 01:03:05 +00001486 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001487 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001488 if ((vaddr == (wp->vaddr & len_mask) ||
1489 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001490 wp->flags |= BP_WATCHPOINT_HIT;
1491 if (!env->watchpoint_hit) {
1492 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001493 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001494 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1495 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001496 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001497 } else {
1498 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1499 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001500 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001501 }
aliguori06d55cc2008-11-18 20:24:06 +00001502 }
aliguori6e140f22008-11-18 20:37:55 +00001503 } else {
1504 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001505 }
1506 }
1507}
1508
pbrook6658ffb2007-03-16 23:58:11 +00001509/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1510 so these check for a hit then pass through to the normal out-of-line
1511 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001512static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001513 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001514{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001515 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1516 switch (size) {
1517 case 1: return ldub_phys(addr);
1518 case 2: return lduw_phys(addr);
1519 case 4: return ldl_phys(addr);
1520 default: abort();
1521 }
pbrook6658ffb2007-03-16 23:58:11 +00001522}
1523
Avi Kivitya8170e52012-10-23 12:30:10 +02001524static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001525 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001526{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001527 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1528 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001529 case 1:
1530 stb_phys(addr, val);
1531 break;
1532 case 2:
1533 stw_phys(addr, val);
1534 break;
1535 case 4:
1536 stl_phys(addr, val);
1537 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001538 default: abort();
1539 }
pbrook6658ffb2007-03-16 23:58:11 +00001540}
1541
Avi Kivity1ec9b902012-01-02 12:47:48 +02001542static const MemoryRegionOps watch_mem_ops = {
1543 .read = watch_mem_read,
1544 .write = watch_mem_write,
1545 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001546};
pbrook6658ffb2007-03-16 23:58:11 +00001547
Avi Kivitya8170e52012-10-23 12:30:10 +02001548static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001549 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001550{
Avi Kivity70c68e42012-01-02 12:32:48 +02001551 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001552 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001553 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001554#if defined(DEBUG_SUBPAGE)
1555 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1556 mmio, len, addr, idx);
1557#endif
blueswir1db7b5422007-05-26 17:36:03 +00001558
Avi Kivity5312bd82012-02-12 18:32:55 +02001559 section = &phys_sections[mmio->sub_section[idx]];
1560 addr += mmio->base;
1561 addr -= section->offset_within_address_space;
1562 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001563 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001564}
1565
Avi Kivitya8170e52012-10-23 12:30:10 +02001566static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001567 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001568{
Avi Kivity70c68e42012-01-02 12:32:48 +02001569 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001570 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001571 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001572#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001573 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1574 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001575 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001576#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001577
Avi Kivity5312bd82012-02-12 18:32:55 +02001578 section = &phys_sections[mmio->sub_section[idx]];
1579 addr += mmio->base;
1580 addr -= section->offset_within_address_space;
1581 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001582 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001583}
1584
Avi Kivity70c68e42012-01-02 12:32:48 +02001585static const MemoryRegionOps subpage_ops = {
1586 .read = subpage_read,
1587 .write = subpage_write,
1588 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001589};
1590
Avi Kivitya8170e52012-10-23 12:30:10 +02001591static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001592 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001593{
1594 ram_addr_t raddr = addr;
1595 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001596 switch (size) {
1597 case 1: return ldub_p(ptr);
1598 case 2: return lduw_p(ptr);
1599 case 4: return ldl_p(ptr);
1600 default: abort();
1601 }
Andreas Färber56384e82011-11-30 16:26:21 +01001602}
1603
Avi Kivitya8170e52012-10-23 12:30:10 +02001604static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001605 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001606{
1607 ram_addr_t raddr = addr;
1608 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001609 switch (size) {
1610 case 1: return stb_p(ptr, value);
1611 case 2: return stw_p(ptr, value);
1612 case 4: return stl_p(ptr, value);
1613 default: abort();
1614 }
Andreas Färber56384e82011-11-30 16:26:21 +01001615}
1616
Avi Kivityde712f92012-01-02 12:41:07 +02001617static const MemoryRegionOps subpage_ram_ops = {
1618 .read = subpage_ram_read,
1619 .write = subpage_ram_write,
1620 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001621};
1622
Anthony Liguoric227f092009-10-01 16:12:16 -05001623static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001624 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001625{
1626 int idx, eidx;
1627
1628 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1629 return -1;
1630 idx = SUBPAGE_IDX(start);
1631 eidx = SUBPAGE_IDX(end);
1632#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001633 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001634 mmio, start, end, idx, eidx, memory);
1635#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001636 if (memory_region_is_ram(phys_sections[section].mr)) {
1637 MemoryRegionSection new_section = phys_sections[section];
1638 new_section.mr = &io_mem_subpage_ram;
1639 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001640 }
blueswir1db7b5422007-05-26 17:36:03 +00001641 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001642 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001643 }
1644
1645 return 0;
1646}
1647
Avi Kivitya8170e52012-10-23 12:30:10 +02001648static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001649{
Anthony Liguoric227f092009-10-01 16:12:16 -05001650 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001651
Anthony Liguori7267c092011-08-20 22:09:37 -05001652 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001653
1654 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001655 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1656 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001657 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001658#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001659 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1660 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001661#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001662 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001663
1664 return mmio;
1665}
1666
Avi Kivity5312bd82012-02-12 18:32:55 +02001667static uint16_t dummy_section(MemoryRegion *mr)
1668{
1669 MemoryRegionSection section = {
1670 .mr = mr,
1671 .offset_within_address_space = 0,
1672 .offset_within_region = 0,
1673 .size = UINT64_MAX,
1674 };
1675
1676 return phys_section_add(&section);
1677}
1678
Avi Kivitya8170e52012-10-23 12:30:10 +02001679MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001680{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001681 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001682}
1683
Avi Kivitye9179ce2009-06-14 11:38:52 +03001684static void io_mem_init(void)
1685{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001686 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001687 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1688 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1689 "unassigned", UINT64_MAX);
1690 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1691 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001692 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1693 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001694 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1695 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001696}
1697
Avi Kivityac1970f2012-10-03 16:22:53 +02001698static void mem_begin(MemoryListener *listener)
1699{
1700 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1701
1702 destroy_all_mappings(d);
1703 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1704}
1705
Avi Kivity50c1e142012-02-08 21:36:02 +02001706static void core_begin(MemoryListener *listener)
1707{
Avi Kivity5312bd82012-02-12 18:32:55 +02001708 phys_sections_clear();
1709 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001710 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1711 phys_section_rom = dummy_section(&io_mem_rom);
1712 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001713}
1714
Avi Kivity1d711482012-10-02 18:54:45 +02001715static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001716{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001717 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001718
1719 /* since each CPU stores ram addresses in its TLB cache, we must
1720 reset the modified entries */
1721 /* XXX: slow ! */
1722 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1723 tlb_flush(env, 1);
1724 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001725}
1726
Avi Kivity93632742012-02-08 16:54:16 +02001727static void core_log_global_start(MemoryListener *listener)
1728{
1729 cpu_physical_memory_set_dirty_tracking(1);
1730}
1731
1732static void core_log_global_stop(MemoryListener *listener)
1733{
1734 cpu_physical_memory_set_dirty_tracking(0);
1735}
1736
Avi Kivity4855d412012-02-08 21:16:05 +02001737static void io_region_add(MemoryListener *listener,
1738 MemoryRegionSection *section)
1739{
Avi Kivitya2d33522012-03-05 17:40:12 +02001740 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1741
1742 mrio->mr = section->mr;
1743 mrio->offset = section->offset_within_region;
1744 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001745 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001746 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001747}
1748
1749static void io_region_del(MemoryListener *listener,
1750 MemoryRegionSection *section)
1751{
1752 isa_unassign_ioport(section->offset_within_address_space, section->size);
1753}
1754
Avi Kivity93632742012-02-08 16:54:16 +02001755static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001756 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001757 .log_global_start = core_log_global_start,
1758 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001759 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001760};
1761
Avi Kivity4855d412012-02-08 21:16:05 +02001762static MemoryListener io_memory_listener = {
1763 .region_add = io_region_add,
1764 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001765 .priority = 0,
1766};
1767
Avi Kivity1d711482012-10-02 18:54:45 +02001768static MemoryListener tcg_memory_listener = {
1769 .commit = tcg_commit,
1770};
1771
Avi Kivityac1970f2012-10-03 16:22:53 +02001772void address_space_init_dispatch(AddressSpace *as)
1773{
1774 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1775
1776 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1777 d->listener = (MemoryListener) {
1778 .begin = mem_begin,
1779 .region_add = mem_add,
1780 .region_nop = mem_add,
1781 .priority = 0,
1782 };
1783 as->dispatch = d;
1784 memory_listener_register(&d->listener, as);
1785}
1786
Avi Kivity83f3c252012-10-07 12:59:55 +02001787void address_space_destroy_dispatch(AddressSpace *as)
1788{
1789 AddressSpaceDispatch *d = as->dispatch;
1790
1791 memory_listener_unregister(&d->listener);
1792 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1793 g_free(d);
1794 as->dispatch = NULL;
1795}
1796
Avi Kivity62152b82011-07-26 14:26:14 +03001797static void memory_map_init(void)
1798{
Anthony Liguori7267c092011-08-20 22:09:37 -05001799 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001800 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001801 address_space_init(&address_space_memory, system_memory);
1802 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001803
Anthony Liguori7267c092011-08-20 22:09:37 -05001804 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001805 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001806 address_space_init(&address_space_io, system_io);
1807 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001808
Avi Kivityf6790af2012-10-02 20:13:51 +02001809 memory_listener_register(&core_memory_listener, &address_space_memory);
1810 memory_listener_register(&io_memory_listener, &address_space_io);
1811 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001812
1813 dma_context_init(&dma_context_memory, &address_space_memory,
1814 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001815}
1816
1817MemoryRegion *get_system_memory(void)
1818{
1819 return system_memory;
1820}
1821
Avi Kivity309cb472011-08-08 16:09:03 +03001822MemoryRegion *get_system_io(void)
1823{
1824 return system_io;
1825}
1826
pbrooke2eef172008-06-08 01:09:01 +00001827#endif /* !defined(CONFIG_USER_ONLY) */
1828
bellard13eb76e2004-01-24 15:23:36 +00001829/* physical memory access (slow version, mainly for debug) */
1830#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001831int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001832 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001833{
1834 int l, flags;
1835 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001836 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001837
1838 while (len > 0) {
1839 page = addr & TARGET_PAGE_MASK;
1840 l = (page + TARGET_PAGE_SIZE) - addr;
1841 if (l > len)
1842 l = len;
1843 flags = page_get_flags(page);
1844 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001845 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001846 if (is_write) {
1847 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001848 return -1;
bellard579a97f2007-11-11 14:26:47 +00001849 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001850 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001851 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001852 memcpy(p, buf, l);
1853 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001854 } else {
1855 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001856 return -1;
bellard579a97f2007-11-11 14:26:47 +00001857 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001858 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001859 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001860 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001861 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001862 }
1863 len -= l;
1864 buf += l;
1865 addr += l;
1866 }
Paul Brooka68fe892010-03-01 00:08:59 +00001867 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001868}
bellard8df1cd02005-01-28 22:37:22 +00001869
bellard13eb76e2004-01-24 15:23:36 +00001870#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001871
Avi Kivitya8170e52012-10-23 12:30:10 +02001872static void invalidate_and_set_dirty(hwaddr addr,
1873 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001874{
1875 if (!cpu_physical_memory_is_dirty(addr)) {
1876 /* invalidate code */
1877 tb_invalidate_phys_page_range(addr, addr + length, 0);
1878 /* set dirty bit */
1879 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1880 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001881 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001882}
1883
Avi Kivitya8170e52012-10-23 12:30:10 +02001884void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001885 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001886{
Avi Kivityac1970f2012-10-03 16:22:53 +02001887 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001888 int l;
bellard13eb76e2004-01-24 15:23:36 +00001889 uint8_t *ptr;
1890 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001891 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001892 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001893
bellard13eb76e2004-01-24 15:23:36 +00001894 while (len > 0) {
1895 page = addr & TARGET_PAGE_MASK;
1896 l = (page + TARGET_PAGE_SIZE) - addr;
1897 if (l > len)
1898 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001899 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001900
bellard13eb76e2004-01-24 15:23:36 +00001901 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001902 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001903 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001904 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001905 /* XXX: could force cpu_single_env to NULL to avoid
1906 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001907 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001908 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001909 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001910 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001911 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001912 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001913 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001914 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001915 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001916 l = 2;
1917 } else {
bellard1c213d12005-09-03 10:49:04 +00001918 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001919 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001920 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001921 l = 1;
1922 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001923 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001924 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001925 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001926 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001927 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001928 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001929 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001930 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001931 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001932 }
1933 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001934 if (!(memory_region_is_ram(section->mr) ||
1935 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001936 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001937 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001938 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001939 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001940 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001941 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001942 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001943 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001944 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001945 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001946 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001947 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001948 l = 2;
1949 } else {
bellard1c213d12005-09-03 10:49:04 +00001950 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001951 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001952 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001953 l = 1;
1954 }
1955 } else {
1956 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001957 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001958 + memory_region_section_addr(section,
1959 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001960 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001961 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001962 }
1963 }
1964 len -= l;
1965 buf += l;
1966 addr += l;
1967 }
1968}
bellard8df1cd02005-01-28 22:37:22 +00001969
Avi Kivitya8170e52012-10-23 12:30:10 +02001970void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001971 const uint8_t *buf, int len)
1972{
1973 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1974}
1975
1976/**
1977 * address_space_read: read from an address space.
1978 *
1979 * @as: #AddressSpace to be accessed
1980 * @addr: address within that address space
1981 * @buf: buffer with the data transferred
1982 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001983void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001984{
1985 address_space_rw(as, addr, buf, len, false);
1986}
1987
1988
Avi Kivitya8170e52012-10-23 12:30:10 +02001989void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001990 int len, int is_write)
1991{
1992 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1993}
1994
bellardd0ecd2a2006-04-23 17:14:48 +00001995/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001996void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001997 const uint8_t *buf, int len)
1998{
Avi Kivityac1970f2012-10-03 16:22:53 +02001999 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00002000 int l;
2001 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02002002 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002003 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00002004
bellardd0ecd2a2006-04-23 17:14:48 +00002005 while (len > 0) {
2006 page = addr & TARGET_PAGE_MASK;
2007 l = (page + TARGET_PAGE_SIZE) - addr;
2008 if (l > len)
2009 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002010 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002011
Blue Swirlcc5bea62012-04-14 14:56:48 +00002012 if (!(memory_region_is_ram(section->mr) ||
2013 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002014 /* do nothing */
2015 } else {
2016 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002017 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002018 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00002019 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002020 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002021 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002022 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002023 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00002024 }
2025 len -= l;
2026 buf += l;
2027 addr += l;
2028 }
2029}
2030
aliguori6d16c2f2009-01-22 16:59:11 +00002031typedef struct {
2032 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002033 hwaddr addr;
2034 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002035} BounceBuffer;
2036
2037static BounceBuffer bounce;
2038
aliguoriba223c22009-01-22 16:59:16 +00002039typedef struct MapClient {
2040 void *opaque;
2041 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002042 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002043} MapClient;
2044
Blue Swirl72cf2d42009-09-12 07:36:22 +00002045static QLIST_HEAD(map_client_list, MapClient) map_client_list
2046 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002047
2048void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2049{
Anthony Liguori7267c092011-08-20 22:09:37 -05002050 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002051
2052 client->opaque = opaque;
2053 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002054 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002055 return client;
2056}
2057
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002058static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002059{
2060 MapClient *client = (MapClient *)_client;
2061
Blue Swirl72cf2d42009-09-12 07:36:22 +00002062 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002063 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002064}
2065
2066static void cpu_notify_map_clients(void)
2067{
2068 MapClient *client;
2069
Blue Swirl72cf2d42009-09-12 07:36:22 +00002070 while (!QLIST_EMPTY(&map_client_list)) {
2071 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002072 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002073 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002074 }
2075}
2076
aliguori6d16c2f2009-01-22 16:59:11 +00002077/* Map a physical memory region into a host virtual address.
2078 * May map a subset of the requested range, given by and returned in *plen.
2079 * May return NULL if resources needed to perform the mapping are exhausted.
2080 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002081 * Use cpu_register_map_client() to know when retrying the map operation is
2082 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002083 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002084void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002085 hwaddr addr,
2086 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002087 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002088{
Avi Kivityac1970f2012-10-03 16:22:53 +02002089 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002090 hwaddr len = *plen;
2091 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002092 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002093 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002094 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002095 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002096 ram_addr_t rlen;
2097 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002098
2099 while (len > 0) {
2100 page = addr & TARGET_PAGE_MASK;
2101 l = (page + TARGET_PAGE_SIZE) - addr;
2102 if (l > len)
2103 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002104 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002105
Avi Kivityf3705d52012-03-08 16:16:34 +02002106 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002107 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002108 break;
2109 }
2110 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2111 bounce.addr = addr;
2112 bounce.len = l;
2113 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002114 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002115 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002116
2117 *plen = l;
2118 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002119 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002120 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002121 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002122 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002123 }
aliguori6d16c2f2009-01-22 16:59:11 +00002124
2125 len -= l;
2126 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002127 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002128 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002129 rlen = todo;
2130 ret = qemu_ram_ptr_length(raddr, &rlen);
2131 *plen = rlen;
2132 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002133}
2134
Avi Kivityac1970f2012-10-03 16:22:53 +02002135/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002136 * Will also mark the memory as dirty if is_write == 1. access_len gives
2137 * the amount of memory that was actually read or written by the caller.
2138 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002139void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2140 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002141{
2142 if (buffer != bounce.buffer) {
2143 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002144 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002145 while (access_len) {
2146 unsigned l;
2147 l = TARGET_PAGE_SIZE;
2148 if (l > access_len)
2149 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002150 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002151 addr1 += l;
2152 access_len -= l;
2153 }
2154 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002155 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002156 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002157 }
aliguori6d16c2f2009-01-22 16:59:11 +00002158 return;
2159 }
2160 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002161 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002162 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002163 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002164 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002165 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002166}
bellardd0ecd2a2006-04-23 17:14:48 +00002167
Avi Kivitya8170e52012-10-23 12:30:10 +02002168void *cpu_physical_memory_map(hwaddr addr,
2169 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002170 int is_write)
2171{
2172 return address_space_map(&address_space_memory, addr, plen, is_write);
2173}
2174
Avi Kivitya8170e52012-10-23 12:30:10 +02002175void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2176 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002177{
2178 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2179}
2180
bellard8df1cd02005-01-28 22:37:22 +00002181/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002182static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002183 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002184{
bellard8df1cd02005-01-28 22:37:22 +00002185 uint8_t *ptr;
2186 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002187 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002188
Avi Kivityac1970f2012-10-03 16:22:53 +02002189 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002190
Blue Swirlcc5bea62012-04-14 14:56:48 +00002191 if (!(memory_region_is_ram(section->mr) ||
2192 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002193 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002194 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002195 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002196#if defined(TARGET_WORDS_BIGENDIAN)
2197 if (endian == DEVICE_LITTLE_ENDIAN) {
2198 val = bswap32(val);
2199 }
2200#else
2201 if (endian == DEVICE_BIG_ENDIAN) {
2202 val = bswap32(val);
2203 }
2204#endif
bellard8df1cd02005-01-28 22:37:22 +00002205 } else {
2206 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002207 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002208 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002209 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002210 switch (endian) {
2211 case DEVICE_LITTLE_ENDIAN:
2212 val = ldl_le_p(ptr);
2213 break;
2214 case DEVICE_BIG_ENDIAN:
2215 val = ldl_be_p(ptr);
2216 break;
2217 default:
2218 val = ldl_p(ptr);
2219 break;
2220 }
bellard8df1cd02005-01-28 22:37:22 +00002221 }
2222 return val;
2223}
2224
Avi Kivitya8170e52012-10-23 12:30:10 +02002225uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002226{
2227 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2228}
2229
Avi Kivitya8170e52012-10-23 12:30:10 +02002230uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002231{
2232 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2233}
2234
Avi Kivitya8170e52012-10-23 12:30:10 +02002235uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002236{
2237 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2238}
2239
bellard84b7b8e2005-11-28 21:19:04 +00002240/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002241static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002242 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002243{
bellard84b7b8e2005-11-28 21:19:04 +00002244 uint8_t *ptr;
2245 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002246 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002247
Avi Kivityac1970f2012-10-03 16:22:53 +02002248 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002249
Blue Swirlcc5bea62012-04-14 14:56:48 +00002250 if (!(memory_region_is_ram(section->mr) ||
2251 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002252 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002253 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002254
2255 /* XXX This is broken when device endian != cpu endian.
2256 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002257#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002258 val = io_mem_read(section->mr, addr, 4) << 32;
2259 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002260#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002261 val = io_mem_read(section->mr, addr, 4);
2262 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002263#endif
2264 } else {
2265 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002266 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002267 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002268 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002269 switch (endian) {
2270 case DEVICE_LITTLE_ENDIAN:
2271 val = ldq_le_p(ptr);
2272 break;
2273 case DEVICE_BIG_ENDIAN:
2274 val = ldq_be_p(ptr);
2275 break;
2276 default:
2277 val = ldq_p(ptr);
2278 break;
2279 }
bellard84b7b8e2005-11-28 21:19:04 +00002280 }
2281 return val;
2282}
2283
Avi Kivitya8170e52012-10-23 12:30:10 +02002284uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002285{
2286 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2287}
2288
Avi Kivitya8170e52012-10-23 12:30:10 +02002289uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002290{
2291 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2292}
2293
Avi Kivitya8170e52012-10-23 12:30:10 +02002294uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002295{
2296 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2297}
2298
bellardaab33092005-10-30 20:48:42 +00002299/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002300uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002301{
2302 uint8_t val;
2303 cpu_physical_memory_read(addr, &val, 1);
2304 return val;
2305}
2306
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002307/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002308static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002309 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002310{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002311 uint8_t *ptr;
2312 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002313 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002314
Avi Kivityac1970f2012-10-03 16:22:53 +02002315 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002316
Blue Swirlcc5bea62012-04-14 14:56:48 +00002317 if (!(memory_region_is_ram(section->mr) ||
2318 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002319 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002320 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002321 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002322#if defined(TARGET_WORDS_BIGENDIAN)
2323 if (endian == DEVICE_LITTLE_ENDIAN) {
2324 val = bswap16(val);
2325 }
2326#else
2327 if (endian == DEVICE_BIG_ENDIAN) {
2328 val = bswap16(val);
2329 }
2330#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002331 } else {
2332 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002333 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002334 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002335 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002336 switch (endian) {
2337 case DEVICE_LITTLE_ENDIAN:
2338 val = lduw_le_p(ptr);
2339 break;
2340 case DEVICE_BIG_ENDIAN:
2341 val = lduw_be_p(ptr);
2342 break;
2343 default:
2344 val = lduw_p(ptr);
2345 break;
2346 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002347 }
2348 return val;
bellardaab33092005-10-30 20:48:42 +00002349}
2350
Avi Kivitya8170e52012-10-23 12:30:10 +02002351uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002352{
2353 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2354}
2355
Avi Kivitya8170e52012-10-23 12:30:10 +02002356uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002357{
2358 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2359}
2360
Avi Kivitya8170e52012-10-23 12:30:10 +02002361uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002362{
2363 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2364}
2365
bellard8df1cd02005-01-28 22:37:22 +00002366/* warning: addr must be aligned. The ram page is not masked as dirty
2367 and the code inside is not invalidated. It is useful if the dirty
2368 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002369void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002370{
bellard8df1cd02005-01-28 22:37:22 +00002371 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002372 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002373
Avi Kivityac1970f2012-10-03 16:22:53 +02002374 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002375
Avi Kivityf3705d52012-03-08 16:16:34 +02002376 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002377 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002378 if (memory_region_is_ram(section->mr)) {
2379 section = &phys_sections[phys_section_rom];
2380 }
2381 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002382 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002383 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002384 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002385 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002386 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002387 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002388
2389 if (unlikely(in_migration)) {
2390 if (!cpu_physical_memory_is_dirty(addr1)) {
2391 /* invalidate code */
2392 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2393 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002394 cpu_physical_memory_set_dirty_flags(
2395 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002396 }
2397 }
bellard8df1cd02005-01-28 22:37:22 +00002398 }
2399}
2400
Avi Kivitya8170e52012-10-23 12:30:10 +02002401void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00002402{
j_mayerbc98a7e2007-04-04 07:55:12 +00002403 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002404 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00002405
Avi Kivityac1970f2012-10-03 16:22:53 +02002406 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002407
Avi Kivityf3705d52012-03-08 16:16:34 +02002408 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002409 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002410 if (memory_region_is_ram(section->mr)) {
2411 section = &phys_sections[phys_section_rom];
2412 }
j_mayerbc98a7e2007-04-04 07:55:12 +00002413#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002414 io_mem_write(section->mr, addr, val >> 32, 4);
2415 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002416#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002417 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2418 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002419#endif
2420 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002421 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002422 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002423 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00002424 stq_p(ptr, val);
2425 }
2426}
2427
bellard8df1cd02005-01-28 22:37:22 +00002428/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002429static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002430 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002431{
bellard8df1cd02005-01-28 22:37:22 +00002432 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002433 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002434
Avi Kivityac1970f2012-10-03 16:22:53 +02002435 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002436
Avi Kivityf3705d52012-03-08 16:16:34 +02002437 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002438 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002439 if (memory_region_is_ram(section->mr)) {
2440 section = &phys_sections[phys_section_rom];
2441 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002442#if defined(TARGET_WORDS_BIGENDIAN)
2443 if (endian == DEVICE_LITTLE_ENDIAN) {
2444 val = bswap32(val);
2445 }
2446#else
2447 if (endian == DEVICE_BIG_ENDIAN) {
2448 val = bswap32(val);
2449 }
2450#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002451 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002452 } else {
2453 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002454 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002455 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002456 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002457 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002458 switch (endian) {
2459 case DEVICE_LITTLE_ENDIAN:
2460 stl_le_p(ptr, val);
2461 break;
2462 case DEVICE_BIG_ENDIAN:
2463 stl_be_p(ptr, val);
2464 break;
2465 default:
2466 stl_p(ptr, val);
2467 break;
2468 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002469 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002470 }
2471}
2472
Avi Kivitya8170e52012-10-23 12:30:10 +02002473void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002474{
2475 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2476}
2477
Avi Kivitya8170e52012-10-23 12:30:10 +02002478void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002479{
2480 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2481}
2482
Avi Kivitya8170e52012-10-23 12:30:10 +02002483void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002484{
2485 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2486}
2487
bellardaab33092005-10-30 20:48:42 +00002488/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002489void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002490{
2491 uint8_t v = val;
2492 cpu_physical_memory_write(addr, &v, 1);
2493}
2494
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002495/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002496static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002497 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002498{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002499 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002500 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002501
Avi Kivityac1970f2012-10-03 16:22:53 +02002502 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002503
Avi Kivityf3705d52012-03-08 16:16:34 +02002504 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002505 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002506 if (memory_region_is_ram(section->mr)) {
2507 section = &phys_sections[phys_section_rom];
2508 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002509#if defined(TARGET_WORDS_BIGENDIAN)
2510 if (endian == DEVICE_LITTLE_ENDIAN) {
2511 val = bswap16(val);
2512 }
2513#else
2514 if (endian == DEVICE_BIG_ENDIAN) {
2515 val = bswap16(val);
2516 }
2517#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002518 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002519 } else {
2520 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002521 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002522 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002523 /* RAM case */
2524 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002525 switch (endian) {
2526 case DEVICE_LITTLE_ENDIAN:
2527 stw_le_p(ptr, val);
2528 break;
2529 case DEVICE_BIG_ENDIAN:
2530 stw_be_p(ptr, val);
2531 break;
2532 default:
2533 stw_p(ptr, val);
2534 break;
2535 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002536 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002537 }
bellardaab33092005-10-30 20:48:42 +00002538}
2539
Avi Kivitya8170e52012-10-23 12:30:10 +02002540void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002541{
2542 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2543}
2544
Avi Kivitya8170e52012-10-23 12:30:10 +02002545void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002546{
2547 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2548}
2549
Avi Kivitya8170e52012-10-23 12:30:10 +02002550void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002551{
2552 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2553}
2554
bellardaab33092005-10-30 20:48:42 +00002555/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002556void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002557{
2558 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002559 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002560}
2561
Avi Kivitya8170e52012-10-23 12:30:10 +02002562void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002563{
2564 val = cpu_to_le64(val);
2565 cpu_physical_memory_write(addr, &val, 8);
2566}
2567
Avi Kivitya8170e52012-10-23 12:30:10 +02002568void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002569{
2570 val = cpu_to_be64(val);
2571 cpu_physical_memory_write(addr, &val, 8);
2572}
2573
aliguori5e2972f2009-03-28 17:51:36 +00002574/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002575int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002576 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002577{
2578 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002579 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002580 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002581
2582 while (len > 0) {
2583 page = addr & TARGET_PAGE_MASK;
2584 phys_addr = cpu_get_phys_page_debug(env, page);
2585 /* if no physical page mapped, return an error */
2586 if (phys_addr == -1)
2587 return -1;
2588 l = (page + TARGET_PAGE_SIZE) - addr;
2589 if (l > len)
2590 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002591 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002592 if (is_write)
2593 cpu_physical_memory_write_rom(phys_addr, buf, l);
2594 else
aliguori5e2972f2009-03-28 17:51:36 +00002595 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002596 len -= l;
2597 buf += l;
2598 addr += l;
2599 }
2600 return 0;
2601}
Paul Brooka68fe892010-03-01 00:08:59 +00002602#endif
bellard13eb76e2004-01-24 15:23:36 +00002603
Blue Swirl8e4a4242013-01-06 18:30:17 +00002604#if !defined(CONFIG_USER_ONLY)
2605
2606/*
2607 * A helper function for the _utterly broken_ virtio device model to find out if
2608 * it's running on a big endian machine. Don't do this at home kids!
2609 */
2610bool virtio_is_big_endian(void);
2611bool virtio_is_big_endian(void)
2612{
2613#if defined(TARGET_WORDS_BIGENDIAN)
2614 return true;
2615#else
2616 return false;
2617#endif
2618}
2619
2620#endif
2621
Wen Congyang76f35532012-05-07 12:04:18 +08002622#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002623bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002624{
2625 MemoryRegionSection *section;
2626
Avi Kivityac1970f2012-10-03 16:22:53 +02002627 section = phys_page_find(address_space_memory.dispatch,
2628 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002629
2630 return !(memory_region_is_ram(section->mr) ||
2631 memory_region_is_romd(section->mr));
2632}
2633#endif