blob: 19725dbc05e0a72767c95239337d1ed800bdf82a [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010034#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020070static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200190 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 goto not_found;
195 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200198 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200199
Avi Kivityc19e8802012-02-13 20:25:31 +0200200 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200201not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200202 return &phys_sections[s_index];
203}
204
Blue Swirle5548612012-04-21 13:08:33 +0000205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
210}
bellard9fa3e852004-01-04 18:06:42 +0000211#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000212
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200213void cpu_exec_init_all(void)
214{
215#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700216 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200217 memory_map_init();
218 io_mem_init();
219#endif
220}
221
Andreas Färberb170fce2013-01-20 20:23:22 +0100222#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000223
Juan Quintelae59fb372009-09-29 22:48:21 +0200224static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200225{
Andreas Färber259186a2013-01-17 18:51:17 +0100226 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200227
aurel323098dba2009-03-07 21:28:24 +0000228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100230 cpu->interrupt_request &= ~0x01;
231 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000232
233 return 0;
234}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200235
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100243 VMSTATE_UINT32(halted, CPUState),
244 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200245 VMSTATE_END_OF_LIST()
246 }
247};
Andreas Färberb170fce2013-01-20 20:23:22 +0100248#else
249#define vmstate_cpu_common vmstate_dummy
pbrook9656f322008-07-01 20:01:19 +0000250#endif
251
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100252CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400253{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100254 CPUArchState *env = first_cpu;
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100255 CPUState *cpu = NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400256
257 while (env) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400260 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100261 }
Glauber Costa950f1472009-06-09 12:15:18 -0400262 env = env->next_cpu;
263 }
264
Igor Mammedovd76fdda2013-03-07 19:12:43 +0100265 return env ? cpu : NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400266}
267
Michael S. Tsirkind6b9e0d2013-04-24 22:58:04 +0200268void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
269{
270 CPUArchState *env = first_cpu;
271
272 while (env) {
273 func(ENV_GET_CPU(env), data);
274 env = env->next_cpu;
275 }
276}
277
Andreas Färber9349b4f2012-03-14 01:38:32 +0100278void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000279{
Andreas Färber9f09e182012-05-03 06:59:07 +0200280 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100281 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100282 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000283 int cpu_index;
284
pbrookc2764712009-03-07 15:24:59 +0000285#if defined(CONFIG_USER_ONLY)
286 cpu_list_lock();
287#endif
bellard6a00d602005-11-21 23:25:50 +0000288 env->next_cpu = NULL;
289 penv = &first_cpu;
290 cpu_index = 0;
291 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700292 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000293 cpu_index++;
294 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100295 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100296 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000297 QTAILQ_INIT(&env->breakpoints);
298 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100299#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200300 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100301#endif
bellard6a00d602005-11-21 23:25:50 +0000302 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000303#if defined(CONFIG_USER_ONLY)
304 cpu_list_unlock();
305#endif
Andreas Färber259186a2013-01-17 18:51:17 +0100306 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
pbrookb3c77242008-06-30 16:31:04 +0000307#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600308 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000309 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100310 assert(cc->vmsd == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000311#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100312 if (cc->vmsd != NULL) {
313 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
314 }
bellardfd6ce8f2003-05-14 19:00:11 +0000315}
316
bellard1fddef42005-04-17 19:16:13 +0000317#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000318#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100319static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000320{
321 tb_invalidate_phys_page_range(pc, pc + 1, 0);
322}
323#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400324static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
325{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400326 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
327 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400328}
bellardc27004e2005-01-03 23:35:10 +0000329#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000330#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000331
Paul Brookc527ee82010-03-01 03:31:14 +0000332#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100333void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000334
335{
336}
337
Andreas Färber9349b4f2012-03-14 01:38:32 +0100338int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000339 int flags, CPUWatchpoint **watchpoint)
340{
341 return -ENOSYS;
342}
343#else
pbrook6658ffb2007-03-16 23:58:11 +0000344/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100345int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000346 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000347{
aliguorib4051332008-11-18 20:14:20 +0000348 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000349 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000350
aliguorib4051332008-11-18 20:14:20 +0000351 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400352 if ((len & (len - 1)) || (addr & ~len_mask) ||
353 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000354 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
355 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
356 return -EINVAL;
357 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500358 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000359
aliguoria1d1bb32008-11-18 20:07:32 +0000360 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000361 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000362 wp->flags = flags;
363
aliguori2dc9f412008-11-18 20:56:59 +0000364 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000365 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000366 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000367 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000368 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000369
pbrook6658ffb2007-03-16 23:58:11 +0000370 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000371
372 if (watchpoint)
373 *watchpoint = wp;
374 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000375}
376
aliguoria1d1bb32008-11-18 20:07:32 +0000377/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100378int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000379 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000380{
aliguorib4051332008-11-18 20:14:20 +0000381 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000382 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000383
Blue Swirl72cf2d42009-09-12 07:36:22 +0000384 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000385 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000386 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000387 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000388 return 0;
389 }
390 }
aliguoria1d1bb32008-11-18 20:07:32 +0000391 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000392}
393
aliguoria1d1bb32008-11-18 20:07:32 +0000394/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100395void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000396{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000397 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000398
aliguoria1d1bb32008-11-18 20:07:32 +0000399 tlb_flush_page(env, watchpoint->vaddr);
400
Anthony Liguori7267c092011-08-20 22:09:37 -0500401 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000402}
403
aliguoria1d1bb32008-11-18 20:07:32 +0000404/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100405void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000406{
aliguoric0ce9982008-11-25 22:13:57 +0000407 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000408
Blue Swirl72cf2d42009-09-12 07:36:22 +0000409 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000410 if (wp->flags & mask)
411 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000412 }
aliguoria1d1bb32008-11-18 20:07:32 +0000413}
Paul Brookc527ee82010-03-01 03:31:14 +0000414#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000415
416/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100417int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000418 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000419{
bellard1fddef42005-04-17 19:16:13 +0000420#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000421 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000422
Anthony Liguori7267c092011-08-20 22:09:37 -0500423 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000424
425 bp->pc = pc;
426 bp->flags = flags;
427
aliguori2dc9f412008-11-18 20:56:59 +0000428 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000429 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000430 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000431 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000432 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000433
434 breakpoint_invalidate(env, pc);
435
436 if (breakpoint)
437 *breakpoint = bp;
438 return 0;
439#else
440 return -ENOSYS;
441#endif
442}
443
444/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100445int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000446{
447#if defined(TARGET_HAS_ICE)
448 CPUBreakpoint *bp;
449
Blue Swirl72cf2d42009-09-12 07:36:22 +0000450 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000451 if (bp->pc == pc && bp->flags == flags) {
452 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000453 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000454 }
bellard4c3a88a2003-07-26 12:06:08 +0000455 }
aliguoria1d1bb32008-11-18 20:07:32 +0000456 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000457#else
aliguoria1d1bb32008-11-18 20:07:32 +0000458 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000459#endif
460}
461
aliguoria1d1bb32008-11-18 20:07:32 +0000462/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100463void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000464{
bellard1fddef42005-04-17 19:16:13 +0000465#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000466 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000467
aliguoria1d1bb32008-11-18 20:07:32 +0000468 breakpoint_invalidate(env, breakpoint->pc);
469
Anthony Liguori7267c092011-08-20 22:09:37 -0500470 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000471#endif
472}
473
474/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100475void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000476{
477#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000478 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000479
Blue Swirl72cf2d42009-09-12 07:36:22 +0000480 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000481 if (bp->flags & mask)
482 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000483 }
bellard4c3a88a2003-07-26 12:06:08 +0000484#endif
485}
486
bellardc33a3462003-07-29 20:50:33 +0000487/* enable or disable single step mode. EXCP_DEBUG is returned by the
488 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100489void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000490{
bellard1fddef42005-04-17 19:16:13 +0000491#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000492 if (env->singlestep_enabled != enabled) {
493 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000494 if (kvm_enabled())
495 kvm_update_guest_debug(env, 0);
496 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100497 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000498 /* XXX: only flush what is necessary */
499 tb_flush(env);
500 }
bellardc33a3462003-07-29 20:50:33 +0000501 }
502#endif
503}
504
Andreas Färber9349b4f2012-03-14 01:38:32 +0100505void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000506{
Andreas Färberfcd7d002012-12-17 08:02:44 +0100507 CPUState *cpu = ENV_GET_CPU(env);
508
509 cpu->exit_request = 1;
Peter Maydell378df4b2013-02-22 18:10:03 +0000510 cpu->tcg_exit_req = 1;
aurel323098dba2009-03-07 21:28:24 +0000511}
512
Andreas Färber9349b4f2012-03-14 01:38:32 +0100513void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000514{
515 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000516 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000517
518 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000519 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000520 fprintf(stderr, "qemu: fatal: ");
521 vfprintf(stderr, fmt, ap);
522 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100523 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000524 if (qemu_log_enabled()) {
525 qemu_log("qemu: fatal: ");
526 qemu_log_vprintf(fmt, ap2);
527 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100528 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000529 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000530 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000531 }
pbrook493ae1f2007-11-23 16:53:59 +0000532 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000533 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200534#if defined(CONFIG_USER_ONLY)
535 {
536 struct sigaction act;
537 sigfillset(&act.sa_mask);
538 act.sa_handler = SIG_DFL;
539 sigaction(SIGABRT, &act, NULL);
540 }
541#endif
bellard75012672003-06-21 13:11:07 +0000542 abort();
543}
544
Andreas Färber9349b4f2012-03-14 01:38:32 +0100545CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000546{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100547 CPUArchState *new_env = cpu_init(env->cpu_model_str);
548 CPUArchState *next_cpu = new_env->next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000549#if defined(TARGET_HAS_ICE)
550 CPUBreakpoint *bp;
551 CPUWatchpoint *wp;
552#endif
553
Andreas Färber9349b4f2012-03-14 01:38:32 +0100554 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000555
Andreas Färber55e5c282012-12-17 06:18:02 +0100556 /* Preserve chaining. */
thsc5be9f02007-02-28 20:20:53 +0000557 new_env->next_cpu = next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000558
559 /* Clone all break/watchpoints.
560 Note: Once we support ptrace with hw-debug register access, make sure
561 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000562 QTAILQ_INIT(&env->breakpoints);
563 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000564#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000565 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000566 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
567 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000568 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000569 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
570 wp->flags, NULL);
571 }
572#endif
573
thsc5be9f02007-02-28 20:20:53 +0000574 return new_env;
575}
576
bellard01243112004-01-04 15:48:17 +0000577#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200578static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
579 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000580{
Juan Quintelad24981d2012-05-22 00:42:40 +0200581 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000582
bellard1ccde1c2004-02-06 19:46:14 +0000583 /* we modify the TLB cache so that the dirty bit will be set again
584 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200585 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200586 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000587 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200588 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000589 != (end - 1) - start) {
590 abort();
591 }
Blue Swirle5548612012-04-21 13:08:33 +0000592 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200593
594}
595
596/* Note: start and end must be within the same ram block. */
597void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
598 int dirty_flags)
599{
600 uintptr_t length;
601
602 start &= TARGET_PAGE_MASK;
603 end = TARGET_PAGE_ALIGN(end);
604
605 length = end - start;
606 if (length == 0)
607 return;
608 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
609
610 if (tcg_enabled()) {
611 tlb_reset_dirty_range_all(start, end, length);
612 }
bellard1ccde1c2004-02-06 19:46:14 +0000613}
614
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000615static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000616{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200617 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000618 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200619 return ret;
aliguori74576192008-10-06 14:02:03 +0000620}
621
Avi Kivitya8170e52012-10-23 12:30:10 +0200622hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000623 MemoryRegionSection *section,
624 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200625 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000626 int prot,
627 target_ulong *address)
628{
Avi Kivitya8170e52012-10-23 12:30:10 +0200629 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000630 CPUWatchpoint *wp;
631
Blue Swirlcc5bea62012-04-14 14:56:48 +0000632 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000633 /* Normal RAM. */
634 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000635 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000636 if (!section->readonly) {
637 iotlb |= phys_section_notdirty;
638 } else {
639 iotlb |= phys_section_rom;
640 }
641 } else {
642 /* IO handlers are currently passed a physical address.
643 It would be nice to pass an offset from the base address
644 of that region. This would avoid having to special case RAM,
645 and avoid full address decoding in every device.
646 We can't use the high bits of pd for this because
647 IO_MEM_ROMD uses these as a ram address. */
648 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000649 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000650 }
651
652 /* Make accesses to pages with watchpoints go via the
653 watchpoint trap routines. */
654 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
655 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
656 /* Avoid trapping reads of pages with a write breakpoint. */
657 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
658 iotlb = phys_section_watch + paddr;
659 *address |= TLB_MMIO;
660 break;
661 }
662 }
663 }
664
665 return iotlb;
666}
bellard9fa3e852004-01-04 18:06:42 +0000667#endif /* defined(CONFIG_USER_ONLY) */
668
pbrooke2eef172008-06-08 01:09:01 +0000669#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000670
Paul Brookc04b2b72010-03-01 03:31:14 +0000671#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
672typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200673 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200674 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200675 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000676} subpage_t;
677
Anthony Liguoric227f092009-10-01 16:12:16 -0500678static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200679 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200680static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200681static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200682{
Avi Kivity5312bd82012-02-12 18:32:55 +0200683 MemoryRegionSection *section = &phys_sections[section_index];
684 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200685
686 if (mr->subpage) {
687 subpage_t *subpage = container_of(mr, subpage_t, iomem);
688 memory_region_destroy(&subpage->iomem);
689 g_free(subpage);
690 }
691}
692
Avi Kivity4346ae32012-02-10 17:00:01 +0200693static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200694{
695 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200696 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200697
Avi Kivityc19e8802012-02-13 20:25:31 +0200698 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200699 return;
700 }
701
Avi Kivityc19e8802012-02-13 20:25:31 +0200702 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200703 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200704 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200705 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200706 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200707 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200708 }
Avi Kivity54688b12012-02-09 17:34:32 +0200709 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200710 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200711 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200712}
713
Avi Kivityac1970f2012-10-03 16:22:53 +0200714static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200715{
Avi Kivityac1970f2012-10-03 16:22:53 +0200716 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200717 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200718}
719
Avi Kivity5312bd82012-02-12 18:32:55 +0200720static uint16_t phys_section_add(MemoryRegionSection *section)
721{
722 if (phys_sections_nb == phys_sections_nb_alloc) {
723 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
724 phys_sections = g_renew(MemoryRegionSection, phys_sections,
725 phys_sections_nb_alloc);
726 }
727 phys_sections[phys_sections_nb] = *section;
728 return phys_sections_nb++;
729}
730
731static void phys_sections_clear(void)
732{
733 phys_sections_nb = 0;
734}
735
Avi Kivityac1970f2012-10-03 16:22:53 +0200736static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200737{
738 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200739 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200740 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200741 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200742 MemoryRegionSection subsection = {
743 .offset_within_address_space = base,
744 .size = TARGET_PAGE_SIZE,
745 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200746 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200747
Avi Kivityf3705d52012-03-08 16:16:34 +0200748 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200749
Avi Kivityf3705d52012-03-08 16:16:34 +0200750 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200751 subpage = subpage_init(base);
752 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200753 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200754 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200755 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200756 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200757 }
758 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400759 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200760 subpage_register(subpage, start, end, phys_section_add(section));
761}
762
763
Avi Kivityac1970f2012-10-03 16:22:53 +0200764static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000765{
Avi Kivitya8170e52012-10-23 12:30:10 +0200766 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200767 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200768 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200769 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200770
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200771 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200772
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200773 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200774 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200775 section_index);
bellard33417e72003-08-10 21:47:01 +0000776}
777
Avi Kivityac1970f2012-10-03 16:22:53 +0200778static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200779{
Avi Kivityac1970f2012-10-03 16:22:53 +0200780 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200781 MemoryRegionSection now = *section, remain = *section;
782
783 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
784 || (now.size < TARGET_PAGE_SIZE)) {
785 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
786 - now.offset_within_address_space,
787 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200788 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200789 remain.size -= now.size;
790 remain.offset_within_address_space += now.size;
791 remain.offset_within_region += now.size;
792 }
Tyler Hall69b67642012-07-25 18:45:04 -0400793 while (remain.size >= TARGET_PAGE_SIZE) {
794 now = remain;
795 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
796 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200797 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400798 } else {
799 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200800 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400801 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200802 remain.size -= now.size;
803 remain.offset_within_address_space += now.size;
804 remain.offset_within_region += now.size;
805 }
806 now = remain;
807 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200808 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200809 }
810}
811
Sheng Yang62a27442010-01-26 19:21:16 +0800812void qemu_flush_coalesced_mmio_buffer(void)
813{
814 if (kvm_enabled())
815 kvm_flush_coalesced_mmio_buffer();
816}
817
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700818void qemu_mutex_lock_ramlist(void)
819{
820 qemu_mutex_lock(&ram_list.mutex);
821}
822
823void qemu_mutex_unlock_ramlist(void)
824{
825 qemu_mutex_unlock(&ram_list.mutex);
826}
827
Marcelo Tosattic9027602010-03-01 20:25:08 -0300828#if defined(__linux__) && !defined(TARGET_S390X)
829
830#include <sys/vfs.h>
831
832#define HUGETLBFS_MAGIC 0x958458f6
833
834static long gethugepagesize(const char *path)
835{
836 struct statfs fs;
837 int ret;
838
839 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900840 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300841 } while (ret != 0 && errno == EINTR);
842
843 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900844 perror(path);
845 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300846 }
847
848 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900849 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300850
851 return fs.f_bsize;
852}
853
Alex Williamson04b16652010-07-02 11:13:17 -0600854static void *file_ram_alloc(RAMBlock *block,
855 ram_addr_t memory,
856 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300857{
858 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500859 char *sanitized_name;
860 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300861 void *area;
862 int fd;
863#ifdef MAP_POPULATE
864 int flags;
865#endif
866 unsigned long hpagesize;
867
868 hpagesize = gethugepagesize(path);
869 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900870 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300871 }
872
873 if (memory < hpagesize) {
874 return NULL;
875 }
876
877 if (kvm_enabled() && !kvm_has_sync_mmu()) {
878 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
879 return NULL;
880 }
881
Peter Feiner8ca761f2013-03-04 13:54:25 -0500882 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
883 sanitized_name = g_strdup(block->mr->name);
884 for (c = sanitized_name; *c != '\0'; c++) {
885 if (*c == '/')
886 *c = '_';
887 }
888
889 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
890 sanitized_name);
891 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300892
893 fd = mkstemp(filename);
894 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900895 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100896 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900897 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300898 }
899 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100900 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300901
902 memory = (memory+hpagesize-1) & ~(hpagesize-1);
903
904 /*
905 * ftruncate is not supported by hugetlbfs in older
906 * hosts, so don't bother bailing out on errors.
907 * If anything goes wrong with it under other filesystems,
908 * mmap will fail.
909 */
910 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900911 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300912
913#ifdef MAP_POPULATE
914 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
915 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
916 * to sidestep this quirk.
917 */
918 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
919 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
920#else
921 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
922#endif
923 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900924 perror("file_ram_alloc: can't mmap RAM pages");
925 close(fd);
926 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300927 }
Alex Williamson04b16652010-07-02 11:13:17 -0600928 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300929 return area;
930}
931#endif
932
Alex Williamsond17b5282010-06-25 11:08:38 -0600933static ram_addr_t find_ram_offset(ram_addr_t size)
934{
Alex Williamson04b16652010-07-02 11:13:17 -0600935 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600936 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600937
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +0100938 assert(size != 0); /* it would hand out same offset multiple times */
939
Paolo Bonzinia3161032012-11-14 15:54:48 +0100940 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600941 return 0;
942
Paolo Bonzinia3161032012-11-14 15:54:48 +0100943 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000944 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600945
946 end = block->offset + block->length;
947
Paolo Bonzinia3161032012-11-14 15:54:48 +0100948 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600949 if (next_block->offset >= end) {
950 next = MIN(next, next_block->offset);
951 }
952 }
953 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600954 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600955 mingap = next - end;
956 }
957 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600958
959 if (offset == RAM_ADDR_MAX) {
960 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
961 (uint64_t)size);
962 abort();
963 }
964
Alex Williamson04b16652010-07-02 11:13:17 -0600965 return offset;
966}
967
Juan Quintela652d7ec2012-07-20 10:37:54 +0200968ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600969{
Alex Williamsond17b5282010-06-25 11:08:38 -0600970 RAMBlock *block;
971 ram_addr_t last = 0;
972
Paolo Bonzinia3161032012-11-14 15:54:48 +0100973 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600974 last = MAX(last, block->offset + block->length);
975
976 return last;
977}
978
Jason Baronddb97f12012-08-02 15:44:16 -0400979static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
980{
981 int ret;
982 QemuOpts *machine_opts;
983
984 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
985 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
986 if (machine_opts &&
987 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
988 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
989 if (ret) {
990 perror("qemu_madvise");
991 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
992 "but dump_guest_core=off specified\n");
993 }
994 }
995}
996
Avi Kivityc5705a72011-12-20 15:59:12 +0200997void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -0600998{
999 RAMBlock *new_block, *block;
1000
Avi Kivityc5705a72011-12-20 15:59:12 +02001001 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001002 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001003 if (block->offset == addr) {
1004 new_block = block;
1005 break;
1006 }
1007 }
1008 assert(new_block);
1009 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001010
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001011 if (dev) {
1012 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001013 if (id) {
1014 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001015 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001016 }
1017 }
1018 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1019
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001020 /* This assumes the iothread lock is taken here too. */
1021 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001022 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001023 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001024 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1025 new_block->idstr);
1026 abort();
1027 }
1028 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001029 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001030}
1031
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001032static int memory_try_enable_merging(void *addr, size_t len)
1033{
1034 QemuOpts *opts;
1035
1036 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1037 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1038 /* disabled by the user */
1039 return 0;
1040 }
1041
1042 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1043}
1044
Avi Kivityc5705a72011-12-20 15:59:12 +02001045ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1046 MemoryRegion *mr)
1047{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001048 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001049
1050 size = TARGET_PAGE_ALIGN(size);
1051 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001052
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001053 /* This assumes the iothread lock is taken here too. */
1054 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001055 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001056 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001057 if (host) {
1058 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001059 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001060 } else {
1061 if (mem_path) {
1062#if defined (__linux__) && !defined(TARGET_S390X)
1063 new_block->host = file_ram_alloc(new_block, size, mem_path);
1064 if (!new_block->host) {
1065 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001066 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001067 }
1068#else
1069 fprintf(stderr, "-mem-path option unsupported\n");
1070 exit(1);
1071#endif
1072 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001073 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001074 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001075 } else if (kvm_enabled()) {
1076 /* some s390/kvm configurations have special constraints */
1077 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001078 } else {
1079 new_block->host = qemu_vmalloc(size);
1080 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001081 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001082 }
1083 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001084 new_block->length = size;
1085
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001086 /* Keep the list sorted from biggest to smallest block. */
1087 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1088 if (block->length < new_block->length) {
1089 break;
1090 }
1091 }
1092 if (block) {
1093 QTAILQ_INSERT_BEFORE(block, new_block, next);
1094 } else {
1095 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1096 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001097 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001098
Umesh Deshpandef798b072011-08-18 11:41:17 -07001099 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001100 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001101
Anthony Liguori7267c092011-08-20 22:09:37 -05001102 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001103 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001104 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1105 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001106 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001107
Jason Baronddb97f12012-08-02 15:44:16 -04001108 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001109 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001110
Cam Macdonell84b89d72010-07-26 18:10:57 -06001111 if (kvm_enabled())
1112 kvm_setup_guest_memory(new_block->host, size);
1113
1114 return new_block->offset;
1115}
1116
Avi Kivityc5705a72011-12-20 15:59:12 +02001117ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001118{
Avi Kivityc5705a72011-12-20 15:59:12 +02001119 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001120}
bellarde9a1ab12007-02-08 23:08:38 +00001121
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001122void qemu_ram_free_from_ptr(ram_addr_t addr)
1123{
1124 RAMBlock *block;
1125
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001126 /* This assumes the iothread lock is taken here too. */
1127 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001128 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001129 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001130 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001131 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001132 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001133 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001134 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001135 }
1136 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001137 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001138}
1139
Anthony Liguoric227f092009-10-01 16:12:16 -05001140void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001141{
Alex Williamson04b16652010-07-02 11:13:17 -06001142 RAMBlock *block;
1143
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001144 /* This assumes the iothread lock is taken here too. */
1145 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001146 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001147 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001148 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001149 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001150 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001151 if (block->flags & RAM_PREALLOC_MASK) {
1152 ;
1153 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001154#if defined (__linux__) && !defined(TARGET_S390X)
1155 if (block->fd) {
1156 munmap(block->host, block->length);
1157 close(block->fd);
1158 } else {
1159 qemu_vfree(block->host);
1160 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001161#else
1162 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001163#endif
1164 } else {
1165#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1166 munmap(block->host, block->length);
1167#else
Jan Kiszka868bb332011-06-21 22:59:09 +02001168 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001169 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001170 } else {
1171 qemu_vfree(block->host);
1172 }
Alex Williamson04b16652010-07-02 11:13:17 -06001173#endif
1174 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001175 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001176 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001177 }
1178 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001179 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001180
bellarde9a1ab12007-02-08 23:08:38 +00001181}
1182
Huang Yingcd19cfa2011-03-02 08:56:19 +01001183#ifndef _WIN32
1184void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1185{
1186 RAMBlock *block;
1187 ram_addr_t offset;
1188 int flags;
1189 void *area, *vaddr;
1190
Paolo Bonzinia3161032012-11-14 15:54:48 +01001191 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001192 offset = addr - block->offset;
1193 if (offset < block->length) {
1194 vaddr = block->host + offset;
1195 if (block->flags & RAM_PREALLOC_MASK) {
1196 ;
1197 } else {
1198 flags = MAP_FIXED;
1199 munmap(vaddr, length);
1200 if (mem_path) {
1201#if defined(__linux__) && !defined(TARGET_S390X)
1202 if (block->fd) {
1203#ifdef MAP_POPULATE
1204 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1205 MAP_PRIVATE;
1206#else
1207 flags |= MAP_PRIVATE;
1208#endif
1209 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1210 flags, block->fd, offset);
1211 } else {
1212 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1213 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1214 flags, -1, 0);
1215 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001216#else
1217 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001218#endif
1219 } else {
1220#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1221 flags |= MAP_SHARED | MAP_ANONYMOUS;
1222 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1223 flags, -1, 0);
1224#else
1225 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1226 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1227 flags, -1, 0);
1228#endif
1229 }
1230 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001231 fprintf(stderr, "Could not remap addr: "
1232 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001233 length, addr);
1234 exit(1);
1235 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001236 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001237 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001238 }
1239 return;
1240 }
1241 }
1242}
1243#endif /* !_WIN32 */
1244
pbrookdc828ca2009-04-09 22:21:07 +00001245/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001246 With the exception of the softmmu code in this file, this should
1247 only be used for local memory (e.g. video ram) that the device owns,
1248 and knows it isn't going to access beyond the end of the block.
1249
1250 It should not be used for general purpose DMA.
1251 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1252 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001253void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001254{
pbrook94a6b542009-04-11 17:15:54 +00001255 RAMBlock *block;
1256
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001257 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001258 block = ram_list.mru_block;
1259 if (block && addr - block->offset < block->length) {
1260 goto found;
1261 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001262 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001263 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001264 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001265 }
pbrook94a6b542009-04-11 17:15:54 +00001266 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001267
1268 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1269 abort();
1270
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001271found:
1272 ram_list.mru_block = block;
1273 if (xen_enabled()) {
1274 /* We need to check if the requested address is in the RAM
1275 * because we don't want to map the entire memory in QEMU.
1276 * In that case just map until the end of the page.
1277 */
1278 if (block->offset == 0) {
1279 return xen_map_cache(addr, 0, 0);
1280 } else if (block->host == NULL) {
1281 block->host =
1282 xen_map_cache(block->offset, block->length, 1);
1283 }
1284 }
1285 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001286}
1287
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001288/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1289 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1290 *
1291 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001292 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001293static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001294{
1295 RAMBlock *block;
1296
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001297 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001298 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001299 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001300 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001301 /* We need to check if the requested address is in the RAM
1302 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001303 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001304 */
1305 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001306 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001307 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001308 block->host =
1309 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001310 }
1311 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001312 return block->host + (addr - block->offset);
1313 }
1314 }
1315
1316 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1317 abort();
1318
1319 return NULL;
1320}
1321
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001322/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1323 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001324static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001325{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001326 if (*size == 0) {
1327 return NULL;
1328 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001329 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001330 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001331 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001332 RAMBlock *block;
1333
Paolo Bonzinia3161032012-11-14 15:54:48 +01001334 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001335 if (addr - block->offset < block->length) {
1336 if (addr - block->offset + *size > block->length)
1337 *size = block->length - addr + block->offset;
1338 return block->host + (addr - block->offset);
1339 }
1340 }
1341
1342 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1343 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001344 }
1345}
1346
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001347void qemu_put_ram_ptr(void *addr)
1348{
1349 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001350}
1351
Marcelo Tosattie8902612010-10-11 15:31:19 -03001352int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001353{
pbrook94a6b542009-04-11 17:15:54 +00001354 RAMBlock *block;
1355 uint8_t *host = ptr;
1356
Jan Kiszka868bb332011-06-21 22:59:09 +02001357 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001358 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001359 return 0;
1360 }
1361
Paolo Bonzinia3161032012-11-14 15:54:48 +01001362 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001363 /* This case append when the block is not mapped. */
1364 if (block->host == NULL) {
1365 continue;
1366 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001367 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001368 *ram_addr = block->offset + (host - block->host);
1369 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001370 }
pbrook94a6b542009-04-11 17:15:54 +00001371 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001372
Marcelo Tosattie8902612010-10-11 15:31:19 -03001373 return -1;
1374}
Alex Williamsonf471a172010-06-11 11:11:42 -06001375
Marcelo Tosattie8902612010-10-11 15:31:19 -03001376/* Some of the softmmu routines need to translate from a host pointer
1377 (typically a TLB entry) back to a ram offset. */
1378ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1379{
1380 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001381
Marcelo Tosattie8902612010-10-11 15:31:19 -03001382 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1383 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1384 abort();
1385 }
1386 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001387}
1388
Avi Kivitya8170e52012-10-23 12:30:10 +02001389static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001390 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001391{
pbrook67d3b952006-12-18 05:03:52 +00001392#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001393 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001394#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001395#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001396 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001397#endif
1398 return 0;
1399}
1400
Avi Kivitya8170e52012-10-23 12:30:10 +02001401static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001402 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001403{
1404#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001405 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001406#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001407#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001408 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001409#endif
1410}
1411
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001412static const MemoryRegionOps unassigned_mem_ops = {
1413 .read = unassigned_mem_read,
1414 .write = unassigned_mem_write,
1415 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001416};
1417
Avi Kivitya8170e52012-10-23 12:30:10 +02001418static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001419 unsigned size)
1420{
1421 abort();
1422}
1423
Avi Kivitya8170e52012-10-23 12:30:10 +02001424static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001425 uint64_t value, unsigned size)
1426{
1427 abort();
1428}
1429
1430static const MemoryRegionOps error_mem_ops = {
1431 .read = error_mem_read,
1432 .write = error_mem_write,
1433 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001434};
1435
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001436static const MemoryRegionOps rom_mem_ops = {
1437 .read = error_mem_read,
1438 .write = unassigned_mem_write,
1439 .endianness = DEVICE_NATIVE_ENDIAN,
1440};
1441
Avi Kivitya8170e52012-10-23 12:30:10 +02001442static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001443 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001444{
bellard3a7d9292005-08-21 09:26:42 +00001445 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001446 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001447 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1448#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001449 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001450 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001451#endif
1452 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001453 switch (size) {
1454 case 1:
1455 stb_p(qemu_get_ram_ptr(ram_addr), val);
1456 break;
1457 case 2:
1458 stw_p(qemu_get_ram_ptr(ram_addr), val);
1459 break;
1460 case 4:
1461 stl_p(qemu_get_ram_ptr(ram_addr), val);
1462 break;
1463 default:
1464 abort();
1465 }
bellardf23db162005-08-21 19:12:28 +00001466 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001467 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001468 /* we remove the notdirty callback only if the code has been
1469 flushed */
1470 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001471 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001472}
1473
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001474static const MemoryRegionOps notdirty_mem_ops = {
1475 .read = error_mem_read,
1476 .write = notdirty_mem_write,
1477 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001478};
1479
pbrook0f459d12008-06-09 00:20:13 +00001480/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001481static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001482{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001483 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001484 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001485 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001486 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001487 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001488
aliguori06d55cc2008-11-18 20:24:06 +00001489 if (env->watchpoint_hit) {
1490 /* We re-entered the check after replacing the TB. Now raise
1491 * the debug interrupt so that is will trigger after the
1492 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001493 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001494 return;
1495 }
pbrook2e70f6e2008-06-29 01:03:05 +00001496 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001497 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001498 if ((vaddr == (wp->vaddr & len_mask) ||
1499 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001500 wp->flags |= BP_WATCHPOINT_HIT;
1501 if (!env->watchpoint_hit) {
1502 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001503 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001504 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1505 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001506 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001507 } else {
1508 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1509 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001510 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001511 }
aliguori06d55cc2008-11-18 20:24:06 +00001512 }
aliguori6e140f22008-11-18 20:37:55 +00001513 } else {
1514 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001515 }
1516 }
1517}
1518
pbrook6658ffb2007-03-16 23:58:11 +00001519/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1520 so these check for a hit then pass through to the normal out-of-line
1521 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001522static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001523 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001524{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001525 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1526 switch (size) {
1527 case 1: return ldub_phys(addr);
1528 case 2: return lduw_phys(addr);
1529 case 4: return ldl_phys(addr);
1530 default: abort();
1531 }
pbrook6658ffb2007-03-16 23:58:11 +00001532}
1533
Avi Kivitya8170e52012-10-23 12:30:10 +02001534static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001535 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001536{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001537 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1538 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001539 case 1:
1540 stb_phys(addr, val);
1541 break;
1542 case 2:
1543 stw_phys(addr, val);
1544 break;
1545 case 4:
1546 stl_phys(addr, val);
1547 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001548 default: abort();
1549 }
pbrook6658ffb2007-03-16 23:58:11 +00001550}
1551
Avi Kivity1ec9b902012-01-02 12:47:48 +02001552static const MemoryRegionOps watch_mem_ops = {
1553 .read = watch_mem_read,
1554 .write = watch_mem_write,
1555 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001556};
pbrook6658ffb2007-03-16 23:58:11 +00001557
Avi Kivitya8170e52012-10-23 12:30:10 +02001558static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001559 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001560{
Avi Kivity70c68e42012-01-02 12:32:48 +02001561 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001562 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001563 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001564#if defined(DEBUG_SUBPAGE)
1565 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1566 mmio, len, addr, idx);
1567#endif
blueswir1db7b5422007-05-26 17:36:03 +00001568
Avi Kivity5312bd82012-02-12 18:32:55 +02001569 section = &phys_sections[mmio->sub_section[idx]];
1570 addr += mmio->base;
1571 addr -= section->offset_within_address_space;
1572 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001573 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001574}
1575
Avi Kivitya8170e52012-10-23 12:30:10 +02001576static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001577 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001578{
Avi Kivity70c68e42012-01-02 12:32:48 +02001579 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001580 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001581 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001582#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001583 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1584 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001585 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001586#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001587
Avi Kivity5312bd82012-02-12 18:32:55 +02001588 section = &phys_sections[mmio->sub_section[idx]];
1589 addr += mmio->base;
1590 addr -= section->offset_within_address_space;
1591 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001592 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001593}
1594
Avi Kivity70c68e42012-01-02 12:32:48 +02001595static const MemoryRegionOps subpage_ops = {
1596 .read = subpage_read,
1597 .write = subpage_write,
1598 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001599};
1600
Avi Kivitya8170e52012-10-23 12:30:10 +02001601static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001602 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001603{
1604 ram_addr_t raddr = addr;
1605 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001606 switch (size) {
1607 case 1: return ldub_p(ptr);
1608 case 2: return lduw_p(ptr);
1609 case 4: return ldl_p(ptr);
1610 default: abort();
1611 }
Andreas Färber56384e82011-11-30 16:26:21 +01001612}
1613
Avi Kivitya8170e52012-10-23 12:30:10 +02001614static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001615 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001616{
1617 ram_addr_t raddr = addr;
1618 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001619 switch (size) {
1620 case 1: return stb_p(ptr, value);
1621 case 2: return stw_p(ptr, value);
1622 case 4: return stl_p(ptr, value);
1623 default: abort();
1624 }
Andreas Färber56384e82011-11-30 16:26:21 +01001625}
1626
Avi Kivityde712f92012-01-02 12:41:07 +02001627static const MemoryRegionOps subpage_ram_ops = {
1628 .read = subpage_ram_read,
1629 .write = subpage_ram_write,
1630 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001631};
1632
Anthony Liguoric227f092009-10-01 16:12:16 -05001633static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001634 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001635{
1636 int idx, eidx;
1637
1638 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1639 return -1;
1640 idx = SUBPAGE_IDX(start);
1641 eidx = SUBPAGE_IDX(end);
1642#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001643 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001644 mmio, start, end, idx, eidx, memory);
1645#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001646 if (memory_region_is_ram(phys_sections[section].mr)) {
1647 MemoryRegionSection new_section = phys_sections[section];
1648 new_section.mr = &io_mem_subpage_ram;
1649 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001650 }
blueswir1db7b5422007-05-26 17:36:03 +00001651 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001652 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001653 }
1654
1655 return 0;
1656}
1657
Avi Kivitya8170e52012-10-23 12:30:10 +02001658static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001659{
Anthony Liguoric227f092009-10-01 16:12:16 -05001660 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001661
Anthony Liguori7267c092011-08-20 22:09:37 -05001662 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001663
1664 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001665 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1666 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001667 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001668#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001669 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1670 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001671#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001672 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001673
1674 return mmio;
1675}
1676
Avi Kivity5312bd82012-02-12 18:32:55 +02001677static uint16_t dummy_section(MemoryRegion *mr)
1678{
1679 MemoryRegionSection section = {
1680 .mr = mr,
1681 .offset_within_address_space = 0,
1682 .offset_within_region = 0,
1683 .size = UINT64_MAX,
1684 };
1685
1686 return phys_section_add(&section);
1687}
1688
Avi Kivitya8170e52012-10-23 12:30:10 +02001689MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001690{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001691 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001692}
1693
Avi Kivitye9179ce2009-06-14 11:38:52 +03001694static void io_mem_init(void)
1695{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001696 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001697 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1698 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1699 "unassigned", UINT64_MAX);
1700 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1701 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001702 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1703 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001704 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1705 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001706}
1707
Avi Kivityac1970f2012-10-03 16:22:53 +02001708static void mem_begin(MemoryListener *listener)
1709{
1710 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1711
1712 destroy_all_mappings(d);
1713 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1714}
1715
Avi Kivity50c1e142012-02-08 21:36:02 +02001716static void core_begin(MemoryListener *listener)
1717{
Avi Kivity5312bd82012-02-12 18:32:55 +02001718 phys_sections_clear();
1719 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001720 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1721 phys_section_rom = dummy_section(&io_mem_rom);
1722 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001723}
1724
Avi Kivity1d711482012-10-02 18:54:45 +02001725static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001726{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001727 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001728
1729 /* since each CPU stores ram addresses in its TLB cache, we must
1730 reset the modified entries */
1731 /* XXX: slow ! */
1732 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1733 tlb_flush(env, 1);
1734 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001735}
1736
Avi Kivity93632742012-02-08 16:54:16 +02001737static void core_log_global_start(MemoryListener *listener)
1738{
1739 cpu_physical_memory_set_dirty_tracking(1);
1740}
1741
1742static void core_log_global_stop(MemoryListener *listener)
1743{
1744 cpu_physical_memory_set_dirty_tracking(0);
1745}
1746
Avi Kivity4855d412012-02-08 21:16:05 +02001747static void io_region_add(MemoryListener *listener,
1748 MemoryRegionSection *section)
1749{
Avi Kivitya2d33522012-03-05 17:40:12 +02001750 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1751
1752 mrio->mr = section->mr;
1753 mrio->offset = section->offset_within_region;
1754 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001755 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001756 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001757}
1758
1759static void io_region_del(MemoryListener *listener,
1760 MemoryRegionSection *section)
1761{
1762 isa_unassign_ioport(section->offset_within_address_space, section->size);
1763}
1764
Avi Kivity93632742012-02-08 16:54:16 +02001765static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001766 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001767 .log_global_start = core_log_global_start,
1768 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001769 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001770};
1771
Avi Kivity4855d412012-02-08 21:16:05 +02001772static MemoryListener io_memory_listener = {
1773 .region_add = io_region_add,
1774 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001775 .priority = 0,
1776};
1777
Avi Kivity1d711482012-10-02 18:54:45 +02001778static MemoryListener tcg_memory_listener = {
1779 .commit = tcg_commit,
1780};
1781
Avi Kivityac1970f2012-10-03 16:22:53 +02001782void address_space_init_dispatch(AddressSpace *as)
1783{
1784 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1785
1786 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1787 d->listener = (MemoryListener) {
1788 .begin = mem_begin,
1789 .region_add = mem_add,
1790 .region_nop = mem_add,
1791 .priority = 0,
1792 };
1793 as->dispatch = d;
1794 memory_listener_register(&d->listener, as);
1795}
1796
Avi Kivity83f3c252012-10-07 12:59:55 +02001797void address_space_destroy_dispatch(AddressSpace *as)
1798{
1799 AddressSpaceDispatch *d = as->dispatch;
1800
1801 memory_listener_unregister(&d->listener);
1802 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1803 g_free(d);
1804 as->dispatch = NULL;
1805}
1806
Avi Kivity62152b82011-07-26 14:26:14 +03001807static void memory_map_init(void)
1808{
Anthony Liguori7267c092011-08-20 22:09:37 -05001809 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001810 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001811 address_space_init(&address_space_memory, system_memory);
1812 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001813
Anthony Liguori7267c092011-08-20 22:09:37 -05001814 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001815 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001816 address_space_init(&address_space_io, system_io);
1817 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001818
Avi Kivityf6790af2012-10-02 20:13:51 +02001819 memory_listener_register(&core_memory_listener, &address_space_memory);
1820 memory_listener_register(&io_memory_listener, &address_space_io);
1821 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001822
1823 dma_context_init(&dma_context_memory, &address_space_memory,
1824 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001825}
1826
1827MemoryRegion *get_system_memory(void)
1828{
1829 return system_memory;
1830}
1831
Avi Kivity309cb472011-08-08 16:09:03 +03001832MemoryRegion *get_system_io(void)
1833{
1834 return system_io;
1835}
1836
pbrooke2eef172008-06-08 01:09:01 +00001837#endif /* !defined(CONFIG_USER_ONLY) */
1838
bellard13eb76e2004-01-24 15:23:36 +00001839/* physical memory access (slow version, mainly for debug) */
1840#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001841int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001842 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001843{
1844 int l, flags;
1845 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001846 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001847
1848 while (len > 0) {
1849 page = addr & TARGET_PAGE_MASK;
1850 l = (page + TARGET_PAGE_SIZE) - addr;
1851 if (l > len)
1852 l = len;
1853 flags = page_get_flags(page);
1854 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001855 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001856 if (is_write) {
1857 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001858 return -1;
bellard579a97f2007-11-11 14:26:47 +00001859 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001860 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001861 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001862 memcpy(p, buf, l);
1863 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001864 } else {
1865 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001866 return -1;
bellard579a97f2007-11-11 14:26:47 +00001867 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001868 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001869 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001870 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001871 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001872 }
1873 len -= l;
1874 buf += l;
1875 addr += l;
1876 }
Paul Brooka68fe892010-03-01 00:08:59 +00001877 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001878}
bellard8df1cd02005-01-28 22:37:22 +00001879
bellard13eb76e2004-01-24 15:23:36 +00001880#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001881
Avi Kivitya8170e52012-10-23 12:30:10 +02001882static void invalidate_and_set_dirty(hwaddr addr,
1883 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001884{
1885 if (!cpu_physical_memory_is_dirty(addr)) {
1886 /* invalidate code */
1887 tb_invalidate_phys_page_range(addr, addr + length, 0);
1888 /* set dirty bit */
1889 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1890 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001891 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001892}
1893
Avi Kivitya8170e52012-10-23 12:30:10 +02001894void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001895 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001896{
Avi Kivityac1970f2012-10-03 16:22:53 +02001897 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001898 int l;
bellard13eb76e2004-01-24 15:23:36 +00001899 uint8_t *ptr;
1900 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001901 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001902 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001903
bellard13eb76e2004-01-24 15:23:36 +00001904 while (len > 0) {
1905 page = addr & TARGET_PAGE_MASK;
1906 l = (page + TARGET_PAGE_SIZE) - addr;
1907 if (l > len)
1908 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001909 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001910
bellard13eb76e2004-01-24 15:23:36 +00001911 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001912 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001913 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001914 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001915 /* XXX: could force cpu_single_env to NULL to avoid
1916 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001917 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001918 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001919 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001920 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001921 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001922 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001923 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001924 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001925 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001926 l = 2;
1927 } else {
bellard1c213d12005-09-03 10:49:04 +00001928 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001929 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001930 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001931 l = 1;
1932 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001933 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001934 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001935 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001936 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001937 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001938 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001939 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001940 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001941 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001942 }
1943 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001944 if (!(memory_region_is_ram(section->mr) ||
1945 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001946 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001947 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001948 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001949 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001950 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001951 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001952 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001953 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001954 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001955 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001956 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001957 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001958 l = 2;
1959 } else {
bellard1c213d12005-09-03 10:49:04 +00001960 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001961 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001962 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001963 l = 1;
1964 }
1965 } else {
1966 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001967 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001968 + memory_region_section_addr(section,
1969 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001970 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001971 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001972 }
1973 }
1974 len -= l;
1975 buf += l;
1976 addr += l;
1977 }
1978}
bellard8df1cd02005-01-28 22:37:22 +00001979
Avi Kivitya8170e52012-10-23 12:30:10 +02001980void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001981 const uint8_t *buf, int len)
1982{
1983 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1984}
1985
1986/**
1987 * address_space_read: read from an address space.
1988 *
1989 * @as: #AddressSpace to be accessed
1990 * @addr: address within that address space
1991 * @buf: buffer with the data transferred
1992 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001993void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001994{
1995 address_space_rw(as, addr, buf, len, false);
1996}
1997
1998
Avi Kivitya8170e52012-10-23 12:30:10 +02001999void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002000 int len, int is_write)
2001{
2002 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
2003}
2004
bellardd0ecd2a2006-04-23 17:14:48 +00002005/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02002006void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00002007 const uint8_t *buf, int len)
2008{
Avi Kivityac1970f2012-10-03 16:22:53 +02002009 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00002010 int l;
2011 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02002012 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002013 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00002014
bellardd0ecd2a2006-04-23 17:14:48 +00002015 while (len > 0) {
2016 page = addr & TARGET_PAGE_MASK;
2017 l = (page + TARGET_PAGE_SIZE) - addr;
2018 if (l > len)
2019 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002020 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002021
Blue Swirlcc5bea62012-04-14 14:56:48 +00002022 if (!(memory_region_is_ram(section->mr) ||
2023 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002024 /* do nothing */
2025 } else {
2026 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002027 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002028 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00002029 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002030 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002031 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002032 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002033 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00002034 }
2035 len -= l;
2036 buf += l;
2037 addr += l;
2038 }
2039}
2040
aliguori6d16c2f2009-01-22 16:59:11 +00002041typedef struct {
2042 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002043 hwaddr addr;
2044 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002045} BounceBuffer;
2046
2047static BounceBuffer bounce;
2048
aliguoriba223c22009-01-22 16:59:16 +00002049typedef struct MapClient {
2050 void *opaque;
2051 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002052 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002053} MapClient;
2054
Blue Swirl72cf2d42009-09-12 07:36:22 +00002055static QLIST_HEAD(map_client_list, MapClient) map_client_list
2056 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002057
2058void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2059{
Anthony Liguori7267c092011-08-20 22:09:37 -05002060 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002061
2062 client->opaque = opaque;
2063 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002064 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002065 return client;
2066}
2067
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002068static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002069{
2070 MapClient *client = (MapClient *)_client;
2071
Blue Swirl72cf2d42009-09-12 07:36:22 +00002072 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002073 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002074}
2075
2076static void cpu_notify_map_clients(void)
2077{
2078 MapClient *client;
2079
Blue Swirl72cf2d42009-09-12 07:36:22 +00002080 while (!QLIST_EMPTY(&map_client_list)) {
2081 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002082 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002083 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002084 }
2085}
2086
aliguori6d16c2f2009-01-22 16:59:11 +00002087/* Map a physical memory region into a host virtual address.
2088 * May map a subset of the requested range, given by and returned in *plen.
2089 * May return NULL if resources needed to perform the mapping are exhausted.
2090 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002091 * Use cpu_register_map_client() to know when retrying the map operation is
2092 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002093 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002094void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002095 hwaddr addr,
2096 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002097 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002098{
Avi Kivityac1970f2012-10-03 16:22:53 +02002099 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002100 hwaddr len = *plen;
2101 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002102 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002103 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002104 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002105 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002106 ram_addr_t rlen;
2107 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002108
2109 while (len > 0) {
2110 page = addr & TARGET_PAGE_MASK;
2111 l = (page + TARGET_PAGE_SIZE) - addr;
2112 if (l > len)
2113 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002114 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002115
Avi Kivityf3705d52012-03-08 16:16:34 +02002116 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002117 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002118 break;
2119 }
2120 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2121 bounce.addr = addr;
2122 bounce.len = l;
2123 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002124 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002125 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002126
2127 *plen = l;
2128 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002129 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002130 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002131 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002132 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002133 }
aliguori6d16c2f2009-01-22 16:59:11 +00002134
2135 len -= l;
2136 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002137 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002138 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002139 rlen = todo;
2140 ret = qemu_ram_ptr_length(raddr, &rlen);
2141 *plen = rlen;
2142 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002143}
2144
Avi Kivityac1970f2012-10-03 16:22:53 +02002145/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002146 * Will also mark the memory as dirty if is_write == 1. access_len gives
2147 * the amount of memory that was actually read or written by the caller.
2148 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002149void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2150 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002151{
2152 if (buffer != bounce.buffer) {
2153 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002154 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002155 while (access_len) {
2156 unsigned l;
2157 l = TARGET_PAGE_SIZE;
2158 if (l > access_len)
2159 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002160 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002161 addr1 += l;
2162 access_len -= l;
2163 }
2164 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002165 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002166 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002167 }
aliguori6d16c2f2009-01-22 16:59:11 +00002168 return;
2169 }
2170 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002171 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002172 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002173 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002174 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002175 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002176}
bellardd0ecd2a2006-04-23 17:14:48 +00002177
Avi Kivitya8170e52012-10-23 12:30:10 +02002178void *cpu_physical_memory_map(hwaddr addr,
2179 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002180 int is_write)
2181{
2182 return address_space_map(&address_space_memory, addr, plen, is_write);
2183}
2184
Avi Kivitya8170e52012-10-23 12:30:10 +02002185void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2186 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002187{
2188 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2189}
2190
bellard8df1cd02005-01-28 22:37:22 +00002191/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002192static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002193 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002194{
bellard8df1cd02005-01-28 22:37:22 +00002195 uint8_t *ptr;
2196 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002197 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002198
Avi Kivityac1970f2012-10-03 16:22:53 +02002199 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002200
Blue Swirlcc5bea62012-04-14 14:56:48 +00002201 if (!(memory_region_is_ram(section->mr) ||
2202 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002203 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002204 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002205 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002206#if defined(TARGET_WORDS_BIGENDIAN)
2207 if (endian == DEVICE_LITTLE_ENDIAN) {
2208 val = bswap32(val);
2209 }
2210#else
2211 if (endian == DEVICE_BIG_ENDIAN) {
2212 val = bswap32(val);
2213 }
2214#endif
bellard8df1cd02005-01-28 22:37:22 +00002215 } else {
2216 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002217 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002218 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002219 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002220 switch (endian) {
2221 case DEVICE_LITTLE_ENDIAN:
2222 val = ldl_le_p(ptr);
2223 break;
2224 case DEVICE_BIG_ENDIAN:
2225 val = ldl_be_p(ptr);
2226 break;
2227 default:
2228 val = ldl_p(ptr);
2229 break;
2230 }
bellard8df1cd02005-01-28 22:37:22 +00002231 }
2232 return val;
2233}
2234
Avi Kivitya8170e52012-10-23 12:30:10 +02002235uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002236{
2237 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2238}
2239
Avi Kivitya8170e52012-10-23 12:30:10 +02002240uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002241{
2242 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2243}
2244
Avi Kivitya8170e52012-10-23 12:30:10 +02002245uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002246{
2247 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2248}
2249
bellard84b7b8e2005-11-28 21:19:04 +00002250/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002251static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002252 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002253{
bellard84b7b8e2005-11-28 21:19:04 +00002254 uint8_t *ptr;
2255 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002256 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002257
Avi Kivityac1970f2012-10-03 16:22:53 +02002258 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002259
Blue Swirlcc5bea62012-04-14 14:56:48 +00002260 if (!(memory_region_is_ram(section->mr) ||
2261 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002262 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002263 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002264
2265 /* XXX This is broken when device endian != cpu endian.
2266 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002267#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002268 val = io_mem_read(section->mr, addr, 4) << 32;
2269 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002270#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002271 val = io_mem_read(section->mr, addr, 4);
2272 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002273#endif
2274 } else {
2275 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002276 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002277 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002278 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002279 switch (endian) {
2280 case DEVICE_LITTLE_ENDIAN:
2281 val = ldq_le_p(ptr);
2282 break;
2283 case DEVICE_BIG_ENDIAN:
2284 val = ldq_be_p(ptr);
2285 break;
2286 default:
2287 val = ldq_p(ptr);
2288 break;
2289 }
bellard84b7b8e2005-11-28 21:19:04 +00002290 }
2291 return val;
2292}
2293
Avi Kivitya8170e52012-10-23 12:30:10 +02002294uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002295{
2296 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2297}
2298
Avi Kivitya8170e52012-10-23 12:30:10 +02002299uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002300{
2301 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2302}
2303
Avi Kivitya8170e52012-10-23 12:30:10 +02002304uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002305{
2306 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2307}
2308
bellardaab33092005-10-30 20:48:42 +00002309/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002310uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002311{
2312 uint8_t val;
2313 cpu_physical_memory_read(addr, &val, 1);
2314 return val;
2315}
2316
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002317/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002318static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002319 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002320{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002321 uint8_t *ptr;
2322 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002323 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002324
Avi Kivityac1970f2012-10-03 16:22:53 +02002325 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002326
Blue Swirlcc5bea62012-04-14 14:56:48 +00002327 if (!(memory_region_is_ram(section->mr) ||
2328 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002329 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002330 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002331 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002332#if defined(TARGET_WORDS_BIGENDIAN)
2333 if (endian == DEVICE_LITTLE_ENDIAN) {
2334 val = bswap16(val);
2335 }
2336#else
2337 if (endian == DEVICE_BIG_ENDIAN) {
2338 val = bswap16(val);
2339 }
2340#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002341 } else {
2342 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002343 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002344 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002345 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002346 switch (endian) {
2347 case DEVICE_LITTLE_ENDIAN:
2348 val = lduw_le_p(ptr);
2349 break;
2350 case DEVICE_BIG_ENDIAN:
2351 val = lduw_be_p(ptr);
2352 break;
2353 default:
2354 val = lduw_p(ptr);
2355 break;
2356 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002357 }
2358 return val;
bellardaab33092005-10-30 20:48:42 +00002359}
2360
Avi Kivitya8170e52012-10-23 12:30:10 +02002361uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002362{
2363 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2364}
2365
Avi Kivitya8170e52012-10-23 12:30:10 +02002366uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002367{
2368 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2369}
2370
Avi Kivitya8170e52012-10-23 12:30:10 +02002371uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002372{
2373 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2374}
2375
bellard8df1cd02005-01-28 22:37:22 +00002376/* warning: addr must be aligned. The ram page is not masked as dirty
2377 and the code inside is not invalidated. It is useful if the dirty
2378 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002379void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002380{
bellard8df1cd02005-01-28 22:37:22 +00002381 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002382 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002383
Avi Kivityac1970f2012-10-03 16:22:53 +02002384 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002385
Avi Kivityf3705d52012-03-08 16:16:34 +02002386 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002387 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002388 if (memory_region_is_ram(section->mr)) {
2389 section = &phys_sections[phys_section_rom];
2390 }
2391 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002392 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002393 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002394 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002395 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002396 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002397 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002398
2399 if (unlikely(in_migration)) {
2400 if (!cpu_physical_memory_is_dirty(addr1)) {
2401 /* invalidate code */
2402 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2403 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002404 cpu_physical_memory_set_dirty_flags(
2405 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002406 }
2407 }
bellard8df1cd02005-01-28 22:37:22 +00002408 }
2409}
2410
Avi Kivitya8170e52012-10-23 12:30:10 +02002411void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00002412{
j_mayerbc98a7e2007-04-04 07:55:12 +00002413 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002414 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00002415
Avi Kivityac1970f2012-10-03 16:22:53 +02002416 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002417
Avi Kivityf3705d52012-03-08 16:16:34 +02002418 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002419 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002420 if (memory_region_is_ram(section->mr)) {
2421 section = &phys_sections[phys_section_rom];
2422 }
j_mayerbc98a7e2007-04-04 07:55:12 +00002423#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002424 io_mem_write(section->mr, addr, val >> 32, 4);
2425 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002426#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002427 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2428 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002429#endif
2430 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002431 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002432 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002433 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00002434 stq_p(ptr, val);
2435 }
2436}
2437
bellard8df1cd02005-01-28 22:37:22 +00002438/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002439static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002440 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002441{
bellard8df1cd02005-01-28 22:37:22 +00002442 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002443 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002444
Avi Kivityac1970f2012-10-03 16:22:53 +02002445 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002446
Avi Kivityf3705d52012-03-08 16:16:34 +02002447 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002448 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002449 if (memory_region_is_ram(section->mr)) {
2450 section = &phys_sections[phys_section_rom];
2451 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002452#if defined(TARGET_WORDS_BIGENDIAN)
2453 if (endian == DEVICE_LITTLE_ENDIAN) {
2454 val = bswap32(val);
2455 }
2456#else
2457 if (endian == DEVICE_BIG_ENDIAN) {
2458 val = bswap32(val);
2459 }
2460#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002461 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002462 } else {
2463 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002464 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002465 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002466 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002467 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002468 switch (endian) {
2469 case DEVICE_LITTLE_ENDIAN:
2470 stl_le_p(ptr, val);
2471 break;
2472 case DEVICE_BIG_ENDIAN:
2473 stl_be_p(ptr, val);
2474 break;
2475 default:
2476 stl_p(ptr, val);
2477 break;
2478 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002479 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002480 }
2481}
2482
Avi Kivitya8170e52012-10-23 12:30:10 +02002483void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002484{
2485 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2486}
2487
Avi Kivitya8170e52012-10-23 12:30:10 +02002488void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002489{
2490 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2491}
2492
Avi Kivitya8170e52012-10-23 12:30:10 +02002493void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002494{
2495 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2496}
2497
bellardaab33092005-10-30 20:48:42 +00002498/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002499void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002500{
2501 uint8_t v = val;
2502 cpu_physical_memory_write(addr, &v, 1);
2503}
2504
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002505/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002506static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002507 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002508{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002509 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002510 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002511
Avi Kivityac1970f2012-10-03 16:22:53 +02002512 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002513
Avi Kivityf3705d52012-03-08 16:16:34 +02002514 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002515 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002516 if (memory_region_is_ram(section->mr)) {
2517 section = &phys_sections[phys_section_rom];
2518 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002519#if defined(TARGET_WORDS_BIGENDIAN)
2520 if (endian == DEVICE_LITTLE_ENDIAN) {
2521 val = bswap16(val);
2522 }
2523#else
2524 if (endian == DEVICE_BIG_ENDIAN) {
2525 val = bswap16(val);
2526 }
2527#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002528 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002529 } else {
2530 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002531 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002532 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002533 /* RAM case */
2534 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002535 switch (endian) {
2536 case DEVICE_LITTLE_ENDIAN:
2537 stw_le_p(ptr, val);
2538 break;
2539 case DEVICE_BIG_ENDIAN:
2540 stw_be_p(ptr, val);
2541 break;
2542 default:
2543 stw_p(ptr, val);
2544 break;
2545 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002546 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002547 }
bellardaab33092005-10-30 20:48:42 +00002548}
2549
Avi Kivitya8170e52012-10-23 12:30:10 +02002550void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002551{
2552 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2553}
2554
Avi Kivitya8170e52012-10-23 12:30:10 +02002555void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002556{
2557 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2558}
2559
Avi Kivitya8170e52012-10-23 12:30:10 +02002560void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002561{
2562 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2563}
2564
bellardaab33092005-10-30 20:48:42 +00002565/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002566void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002567{
2568 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002569 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002570}
2571
Avi Kivitya8170e52012-10-23 12:30:10 +02002572void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002573{
2574 val = cpu_to_le64(val);
2575 cpu_physical_memory_write(addr, &val, 8);
2576}
2577
Avi Kivitya8170e52012-10-23 12:30:10 +02002578void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002579{
2580 val = cpu_to_be64(val);
2581 cpu_physical_memory_write(addr, &val, 8);
2582}
2583
aliguori5e2972f2009-03-28 17:51:36 +00002584/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002585int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002586 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002587{
2588 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002589 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002590 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002591
2592 while (len > 0) {
2593 page = addr & TARGET_PAGE_MASK;
2594 phys_addr = cpu_get_phys_page_debug(env, page);
2595 /* if no physical page mapped, return an error */
2596 if (phys_addr == -1)
2597 return -1;
2598 l = (page + TARGET_PAGE_SIZE) - addr;
2599 if (l > len)
2600 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002601 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002602 if (is_write)
2603 cpu_physical_memory_write_rom(phys_addr, buf, l);
2604 else
aliguori5e2972f2009-03-28 17:51:36 +00002605 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002606 len -= l;
2607 buf += l;
2608 addr += l;
2609 }
2610 return 0;
2611}
Paul Brooka68fe892010-03-01 00:08:59 +00002612#endif
bellard13eb76e2004-01-24 15:23:36 +00002613
Blue Swirl8e4a4242013-01-06 18:30:17 +00002614#if !defined(CONFIG_USER_ONLY)
2615
2616/*
2617 * A helper function for the _utterly broken_ virtio device model to find out if
2618 * it's running on a big endian machine. Don't do this at home kids!
2619 */
2620bool virtio_is_big_endian(void);
2621bool virtio_is_big_endian(void)
2622{
2623#if defined(TARGET_WORDS_BIGENDIAN)
2624 return true;
2625#else
2626 return false;
2627#endif
2628}
2629
2630#endif
2631
Wen Congyang76f35532012-05-07 12:04:18 +08002632#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002633bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002634{
2635 MemoryRegionSection *section;
2636
Avi Kivityac1970f2012-10-03 16:22:53 +02002637 section = phys_page_find(address_space_memory.dispatch,
2638 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002639
2640 return !(memory_region_is_ram(section->mr) ||
2641 memory_region_is_romd(section->mr));
2642}
2643#endif