blob: 4c1246a9f94a4ee4536f3290f7c910c713fa14df [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
Peter Maydell9e119082012-10-29 11:34:32 +100037#include "dma.h"
Avi Kivity62152b82011-07-26 14:26:14 +030038#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000039#if defined(CONFIG_USER_ONLY)
40#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010041#else /* !CONFIG_USER_ONLY */
42#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010043#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000044#endif
bellard54936002003-05-13 00:25:15 +000045
Blue Swirl0cac1b62012-04-09 16:50:52 +000046#include "cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000047#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000048
Avi Kivity7762c2c2012-09-20 16:02:51 +030049#include "memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020050
pbrook67d3b952006-12-18 05:03:52 +000051//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000052//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000053
pbrook99773bd2006-04-16 15:14:59 +000054#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000055int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000056static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000057
Paolo Bonzini85d59fe2011-08-12 13:18:14 +020058RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030059
60static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030061static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030062
Avi Kivityf6790af2012-10-02 20:13:51 +020063AddressSpace address_space_io;
64AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100065DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020066
Avi Kivity0e0df1e2012-01-02 00:32:15 +020067MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020068static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069
pbrooke2eef172008-06-08 01:09:01 +000070#endif
bellard9fa3e852004-01-04 18:06:42 +000071
Andreas Färber9349b4f2012-03-14 01:38:32 +010072CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000073/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010075DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000076/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000077 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000078 2 = Adaptive rate instruction counting. */
79int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +000080
pbrooke2eef172008-06-08 01:09:01 +000081#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020082
Avi Kivity5312bd82012-02-12 18:32:55 +020083static MemoryRegionSection *phys_sections;
84static unsigned phys_sections_nb, phys_sections_nb_alloc;
85static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020086static uint16_t phys_section_notdirty;
87static uint16_t phys_section_rom;
88static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020089
Avi Kivityd6f2ea22012-02-12 20:12:49 +020090/* Simple allocator for PhysPageEntry nodes */
91static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
92static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
93
Avi Kivity07f07b32012-02-13 20:45:32 +020094#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020095
pbrooke2eef172008-06-08 01:09:01 +000096static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030097static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +000098static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +000099
Avi Kivity1ec9b902012-01-02 12:47:48 +0200100static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000101#endif
bellard54936002003-05-13 00:25:15 +0000102
Paul Brook6d9a1302010-02-28 23:55:53 +0000103#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200104
Avi Kivityf7bf5462012-02-13 20:12:05 +0200105static void phys_map_node_reserve(unsigned nodes)
106{
107 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
108 typedef PhysPageEntry Node[L2_SIZE];
109 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
110 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
111 phys_map_nodes_nb + nodes);
112 phys_map_nodes = g_renew(Node, phys_map_nodes,
113 phys_map_nodes_nb_alloc);
114 }
115}
116
117static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200118{
119 unsigned i;
120 uint16_t ret;
121
Avi Kivityf7bf5462012-02-13 20:12:05 +0200122 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200123 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200126 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200127 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200128 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200129 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130}
131
132static void phys_map_nodes_reset(void)
133{
134 phys_map_nodes_nb = 0;
135}
136
Avi Kivityf7bf5462012-02-13 20:12:05 +0200137
Avi Kivitya8170e52012-10-23 12:30:10 +0200138static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
139 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200140 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200141{
142 PhysPageEntry *p;
143 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200144 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200145
Avi Kivity07f07b32012-02-13 20:45:32 +0200146 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200147 lp->ptr = phys_map_node_alloc();
148 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200149 if (level == 0) {
150 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200151 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200152 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200153 }
154 }
155 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200156 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200157 }
Avi Kivity29990972012-02-13 20:21:20 +0200158 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159
Avi Kivity29990972012-02-13 20:21:20 +0200160 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200161 if ((*index & (step - 1)) == 0 && *nb >= step) {
162 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200163 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200164 *index += step;
165 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200166 } else {
167 phys_page_set_level(lp, index, nb, leaf, level - 1);
168 }
169 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200170 }
171}
172
Avi Kivityac1970f2012-10-03 16:22:53 +0200173static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200174 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200175 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000176{
Avi Kivity29990972012-02-13 20:21:20 +0200177 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200178 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000179
Avi Kivityac1970f2012-10-03 16:22:53 +0200180 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000181}
182
Avi Kivitya8170e52012-10-23 12:30:10 +0200183MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000184{
Avi Kivityac1970f2012-10-03 16:22:53 +0200185 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200186 PhysPageEntry *p;
187 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200189
Avi Kivity07f07b32012-02-13 20:45:32 +0200190 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200191 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200192 goto not_found;
193 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200194 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200195 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200196 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200197
Avi Kivityc19e8802012-02-13 20:25:31 +0200198 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200199not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200200 return &phys_sections[s_index];
201}
202
Blue Swirle5548612012-04-21 13:08:33 +0000203bool memory_region_is_unassigned(MemoryRegion *mr)
204{
205 return mr != &io_mem_ram && mr != &io_mem_rom
206 && mr != &io_mem_notdirty && !mr->rom_device
207 && mr != &io_mem_watch;
208}
bellard9fa3e852004-01-04 18:06:42 +0000209#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000210
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200211void cpu_exec_init_all(void)
212{
213#if !defined(CONFIG_USER_ONLY)
214 memory_map_init();
215 io_mem_init();
216#endif
217}
218
pbrook9656f322008-07-01 20:01:19 +0000219#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
220
Juan Quintelae59fb372009-09-29 22:48:21 +0200221static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200222{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100223 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200224
aurel323098dba2009-03-07 21:28:24 +0000225 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
226 version_id is increased. */
227 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000228 tlb_flush(env, 1);
229
230 return 0;
231}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200232
233static const VMStateDescription vmstate_cpu_common = {
234 .name = "cpu_common",
235 .version_id = 1,
236 .minimum_version_id = 1,
237 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200238 .post_load = cpu_common_post_load,
239 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100240 VMSTATE_UINT32(halted, CPUArchState),
241 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200242 VMSTATE_END_OF_LIST()
243 }
244};
pbrook9656f322008-07-01 20:01:19 +0000245#endif
246
Andreas Färber9349b4f2012-03-14 01:38:32 +0100247CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400248{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100249 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400250
251 while (env) {
252 if (env->cpu_index == cpu)
253 break;
254 env = env->next_cpu;
255 }
256
257 return env;
258}
259
Andreas Färber9349b4f2012-03-14 01:38:32 +0100260void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000261{
Andreas Färber9f09e182012-05-03 06:59:07 +0200262#ifndef CONFIG_USER_ONLY
263 CPUState *cpu = ENV_GET_CPU(env);
264#endif
Andreas Färber9349b4f2012-03-14 01:38:32 +0100265 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000266 int cpu_index;
267
pbrookc2764712009-03-07 15:24:59 +0000268#if defined(CONFIG_USER_ONLY)
269 cpu_list_lock();
270#endif
bellard6a00d602005-11-21 23:25:50 +0000271 env->next_cpu = NULL;
272 penv = &first_cpu;
273 cpu_index = 0;
274 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700275 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000276 cpu_index++;
277 }
278 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000279 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000280 QTAILQ_INIT(&env->breakpoints);
281 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100282#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200283 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100284#endif
bellard6a00d602005-11-21 23:25:50 +0000285 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000286#if defined(CONFIG_USER_ONLY)
287 cpu_list_unlock();
288#endif
pbrookb3c77242008-06-30 16:31:04 +0000289#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600290 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
291 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000292 cpu_save, cpu_load, env);
293#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000294}
295
bellard1fddef42005-04-17 19:16:13 +0000296#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000297#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100298static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000299{
300 tb_invalidate_phys_page_range(pc, pc + 1, 0);
301}
302#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400303static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
304{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400305 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
306 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400307}
bellardc27004e2005-01-03 23:35:10 +0000308#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000309#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000310
Paul Brookc527ee82010-03-01 03:31:14 +0000311#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100312void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000313
314{
315}
316
Andreas Färber9349b4f2012-03-14 01:38:32 +0100317int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000318 int flags, CPUWatchpoint **watchpoint)
319{
320 return -ENOSYS;
321}
322#else
pbrook6658ffb2007-03-16 23:58:11 +0000323/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100324int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000325 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000326{
aliguorib4051332008-11-18 20:14:20 +0000327 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000328 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000329
aliguorib4051332008-11-18 20:14:20 +0000330 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400331 if ((len & (len - 1)) || (addr & ~len_mask) ||
332 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000333 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
334 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
335 return -EINVAL;
336 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500337 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000338
aliguoria1d1bb32008-11-18 20:07:32 +0000339 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000340 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000341 wp->flags = flags;
342
aliguori2dc9f412008-11-18 20:56:59 +0000343 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000344 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000345 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000346 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000347 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000348
pbrook6658ffb2007-03-16 23:58:11 +0000349 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000350
351 if (watchpoint)
352 *watchpoint = wp;
353 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000354}
355
aliguoria1d1bb32008-11-18 20:07:32 +0000356/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100357int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000358 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000359{
aliguorib4051332008-11-18 20:14:20 +0000360 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000361 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000362
Blue Swirl72cf2d42009-09-12 07:36:22 +0000363 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000364 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000365 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000366 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000367 return 0;
368 }
369 }
aliguoria1d1bb32008-11-18 20:07:32 +0000370 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000371}
372
aliguoria1d1bb32008-11-18 20:07:32 +0000373/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100374void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000375{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000376 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000377
aliguoria1d1bb32008-11-18 20:07:32 +0000378 tlb_flush_page(env, watchpoint->vaddr);
379
Anthony Liguori7267c092011-08-20 22:09:37 -0500380 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000381}
382
aliguoria1d1bb32008-11-18 20:07:32 +0000383/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100384void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000385{
aliguoric0ce9982008-11-25 22:13:57 +0000386 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000387
Blue Swirl72cf2d42009-09-12 07:36:22 +0000388 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000389 if (wp->flags & mask)
390 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000391 }
aliguoria1d1bb32008-11-18 20:07:32 +0000392}
Paul Brookc527ee82010-03-01 03:31:14 +0000393#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000394
395/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100396int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000397 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000398{
bellard1fddef42005-04-17 19:16:13 +0000399#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000400 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000401
Anthony Liguori7267c092011-08-20 22:09:37 -0500402 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000403
404 bp->pc = pc;
405 bp->flags = flags;
406
aliguori2dc9f412008-11-18 20:56:59 +0000407 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000408 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000409 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000410 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000411 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000412
413 breakpoint_invalidate(env, pc);
414
415 if (breakpoint)
416 *breakpoint = bp;
417 return 0;
418#else
419 return -ENOSYS;
420#endif
421}
422
423/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100424int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000425{
426#if defined(TARGET_HAS_ICE)
427 CPUBreakpoint *bp;
428
Blue Swirl72cf2d42009-09-12 07:36:22 +0000429 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000430 if (bp->pc == pc && bp->flags == flags) {
431 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000432 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000433 }
bellard4c3a88a2003-07-26 12:06:08 +0000434 }
aliguoria1d1bb32008-11-18 20:07:32 +0000435 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000436#else
aliguoria1d1bb32008-11-18 20:07:32 +0000437 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000438#endif
439}
440
aliguoria1d1bb32008-11-18 20:07:32 +0000441/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100442void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000443{
bellard1fddef42005-04-17 19:16:13 +0000444#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000445 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000446
aliguoria1d1bb32008-11-18 20:07:32 +0000447 breakpoint_invalidate(env, breakpoint->pc);
448
Anthony Liguori7267c092011-08-20 22:09:37 -0500449 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000450#endif
451}
452
453/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100454void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000455{
456#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000457 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000458
Blue Swirl72cf2d42009-09-12 07:36:22 +0000459 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000460 if (bp->flags & mask)
461 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000462 }
bellard4c3a88a2003-07-26 12:06:08 +0000463#endif
464}
465
bellardc33a3462003-07-29 20:50:33 +0000466/* enable or disable single step mode. EXCP_DEBUG is returned by the
467 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100468void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000469{
bellard1fddef42005-04-17 19:16:13 +0000470#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000471 if (env->singlestep_enabled != enabled) {
472 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000473 if (kvm_enabled())
474 kvm_update_guest_debug(env, 0);
475 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100476 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000477 /* XXX: only flush what is necessary */
478 tb_flush(env);
479 }
bellardc33a3462003-07-29 20:50:33 +0000480 }
481#endif
482}
483
Andreas Färber9349b4f2012-03-14 01:38:32 +0100484void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +0000485{
486 env->interrupt_request &= ~mask;
487}
488
Andreas Färber9349b4f2012-03-14 01:38:32 +0100489void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000490{
491 env->exit_request = 1;
492 cpu_unlink_tb(env);
493}
494
Andreas Färber9349b4f2012-03-14 01:38:32 +0100495void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000496{
497 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000498 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000499
500 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000501 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000502 fprintf(stderr, "qemu: fatal: ");
503 vfprintf(stderr, fmt, ap);
504 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100505 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000506 if (qemu_log_enabled()) {
507 qemu_log("qemu: fatal: ");
508 qemu_log_vprintf(fmt, ap2);
509 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100510 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000511 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000512 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000513 }
pbrook493ae1f2007-11-23 16:53:59 +0000514 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000515 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200516#if defined(CONFIG_USER_ONLY)
517 {
518 struct sigaction act;
519 sigfillset(&act.sa_mask);
520 act.sa_handler = SIG_DFL;
521 sigaction(SIGABRT, &act, NULL);
522 }
523#endif
bellard75012672003-06-21 13:11:07 +0000524 abort();
525}
526
Andreas Färber9349b4f2012-03-14 01:38:32 +0100527CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000528{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100529 CPUArchState *new_env = cpu_init(env->cpu_model_str);
530 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +0000531 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +0000532#if defined(TARGET_HAS_ICE)
533 CPUBreakpoint *bp;
534 CPUWatchpoint *wp;
535#endif
536
Andreas Färber9349b4f2012-03-14 01:38:32 +0100537 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000538
539 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +0000540 new_env->next_cpu = next_cpu;
541 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +0000542
543 /* Clone all break/watchpoints.
544 Note: Once we support ptrace with hw-debug register access, make sure
545 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000546 QTAILQ_INIT(&env->breakpoints);
547 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000548#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000549 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000550 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
551 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000552 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000553 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
554 wp->flags, NULL);
555 }
556#endif
557
thsc5be9f02007-02-28 20:20:53 +0000558 return new_env;
559}
560
bellard01243112004-01-04 15:48:17 +0000561#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200562static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
563 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000564{
Juan Quintelad24981d2012-05-22 00:42:40 +0200565 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000566
bellard1ccde1c2004-02-06 19:46:14 +0000567 /* we modify the TLB cache so that the dirty bit will be set again
568 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200569 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200570 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000571 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200572 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000573 != (end - 1) - start) {
574 abort();
575 }
Blue Swirle5548612012-04-21 13:08:33 +0000576 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200577
578}
579
580/* Note: start and end must be within the same ram block. */
581void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
582 int dirty_flags)
583{
584 uintptr_t length;
585
586 start &= TARGET_PAGE_MASK;
587 end = TARGET_PAGE_ALIGN(end);
588
589 length = end - start;
590 if (length == 0)
591 return;
592 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
593
594 if (tcg_enabled()) {
595 tlb_reset_dirty_range_all(start, end, length);
596 }
bellard1ccde1c2004-02-06 19:46:14 +0000597}
598
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000599static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000600{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200601 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000602 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200603 return ret;
aliguori74576192008-10-06 14:02:03 +0000604}
605
Avi Kivitya8170e52012-10-23 12:30:10 +0200606hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000607 MemoryRegionSection *section,
608 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200609 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000610 int prot,
611 target_ulong *address)
612{
Avi Kivitya8170e52012-10-23 12:30:10 +0200613 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000614 CPUWatchpoint *wp;
615
Blue Swirlcc5bea62012-04-14 14:56:48 +0000616 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000617 /* Normal RAM. */
618 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000619 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000620 if (!section->readonly) {
621 iotlb |= phys_section_notdirty;
622 } else {
623 iotlb |= phys_section_rom;
624 }
625 } else {
626 /* IO handlers are currently passed a physical address.
627 It would be nice to pass an offset from the base address
628 of that region. This would avoid having to special case RAM,
629 and avoid full address decoding in every device.
630 We can't use the high bits of pd for this because
631 IO_MEM_ROMD uses these as a ram address. */
632 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000633 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000634 }
635
636 /* Make accesses to pages with watchpoints go via the
637 watchpoint trap routines. */
638 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
639 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
640 /* Avoid trapping reads of pages with a write breakpoint. */
641 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
642 iotlb = phys_section_watch + paddr;
643 *address |= TLB_MMIO;
644 break;
645 }
646 }
647 }
648
649 return iotlb;
650}
bellard9fa3e852004-01-04 18:06:42 +0000651#endif /* defined(CONFIG_USER_ONLY) */
652
pbrooke2eef172008-06-08 01:09:01 +0000653#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000654
Paul Brookc04b2b72010-03-01 03:31:14 +0000655#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
656typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200657 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200658 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200659 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000660} subpage_t;
661
Anthony Liguoric227f092009-10-01 16:12:16 -0500662static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200663 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200664static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200665static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200666{
Avi Kivity5312bd82012-02-12 18:32:55 +0200667 MemoryRegionSection *section = &phys_sections[section_index];
668 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200669
670 if (mr->subpage) {
671 subpage_t *subpage = container_of(mr, subpage_t, iomem);
672 memory_region_destroy(&subpage->iomem);
673 g_free(subpage);
674 }
675}
676
Avi Kivity4346ae32012-02-10 17:00:01 +0200677static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200678{
679 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200680 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200681
Avi Kivityc19e8802012-02-13 20:25:31 +0200682 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200683 return;
684 }
685
Avi Kivityc19e8802012-02-13 20:25:31 +0200686 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200687 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200688 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200689 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200690 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200691 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200692 }
Avi Kivity54688b12012-02-09 17:34:32 +0200693 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200694 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200695 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200696}
697
Avi Kivityac1970f2012-10-03 16:22:53 +0200698static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200699{
Avi Kivityac1970f2012-10-03 16:22:53 +0200700 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200701 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200702}
703
Avi Kivity5312bd82012-02-12 18:32:55 +0200704static uint16_t phys_section_add(MemoryRegionSection *section)
705{
706 if (phys_sections_nb == phys_sections_nb_alloc) {
707 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
708 phys_sections = g_renew(MemoryRegionSection, phys_sections,
709 phys_sections_nb_alloc);
710 }
711 phys_sections[phys_sections_nb] = *section;
712 return phys_sections_nb++;
713}
714
715static void phys_sections_clear(void)
716{
717 phys_sections_nb = 0;
718}
719
Avi Kivityac1970f2012-10-03 16:22:53 +0200720static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200721{
722 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200723 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200724 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200725 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200726 MemoryRegionSection subsection = {
727 .offset_within_address_space = base,
728 .size = TARGET_PAGE_SIZE,
729 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200730 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200731
Avi Kivityf3705d52012-03-08 16:16:34 +0200732 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200733
Avi Kivityf3705d52012-03-08 16:16:34 +0200734 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200735 subpage = subpage_init(base);
736 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200737 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200738 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200739 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200740 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200741 }
742 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400743 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200744 subpage_register(subpage, start, end, phys_section_add(section));
745}
746
747
Avi Kivityac1970f2012-10-03 16:22:53 +0200748static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000749{
Avi Kivitya8170e52012-10-23 12:30:10 +0200750 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200751 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200752 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200753 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200754
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200755 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200756
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200757 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200758 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200759 section_index);
bellard33417e72003-08-10 21:47:01 +0000760}
761
Avi Kivityac1970f2012-10-03 16:22:53 +0200762static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200763{
Avi Kivityac1970f2012-10-03 16:22:53 +0200764 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200765 MemoryRegionSection now = *section, remain = *section;
766
767 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
768 || (now.size < TARGET_PAGE_SIZE)) {
769 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
770 - now.offset_within_address_space,
771 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200772 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200773 remain.size -= now.size;
774 remain.offset_within_address_space += now.size;
775 remain.offset_within_region += now.size;
776 }
Tyler Hall69b67642012-07-25 18:45:04 -0400777 while (remain.size >= TARGET_PAGE_SIZE) {
778 now = remain;
779 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
780 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200781 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400782 } else {
783 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200784 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400785 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200786 remain.size -= now.size;
787 remain.offset_within_address_space += now.size;
788 remain.offset_within_region += now.size;
789 }
790 now = remain;
791 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200792 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200793 }
794}
795
Sheng Yang62a27442010-01-26 19:21:16 +0800796void qemu_flush_coalesced_mmio_buffer(void)
797{
798 if (kvm_enabled())
799 kvm_flush_coalesced_mmio_buffer();
800}
801
Marcelo Tosattic9027602010-03-01 20:25:08 -0300802#if defined(__linux__) && !defined(TARGET_S390X)
803
804#include <sys/vfs.h>
805
806#define HUGETLBFS_MAGIC 0x958458f6
807
808static long gethugepagesize(const char *path)
809{
810 struct statfs fs;
811 int ret;
812
813 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900814 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300815 } while (ret != 0 && errno == EINTR);
816
817 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900818 perror(path);
819 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300820 }
821
822 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900823 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300824
825 return fs.f_bsize;
826}
827
Alex Williamson04b16652010-07-02 11:13:17 -0600828static void *file_ram_alloc(RAMBlock *block,
829 ram_addr_t memory,
830 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300831{
832 char *filename;
833 void *area;
834 int fd;
835#ifdef MAP_POPULATE
836 int flags;
837#endif
838 unsigned long hpagesize;
839
840 hpagesize = gethugepagesize(path);
841 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900842 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300843 }
844
845 if (memory < hpagesize) {
846 return NULL;
847 }
848
849 if (kvm_enabled() && !kvm_has_sync_mmu()) {
850 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
851 return NULL;
852 }
853
854 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900855 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300856 }
857
858 fd = mkstemp(filename);
859 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900860 perror("unable to create backing store for hugepages");
861 free(filename);
862 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300863 }
864 unlink(filename);
865 free(filename);
866
867 memory = (memory+hpagesize-1) & ~(hpagesize-1);
868
869 /*
870 * ftruncate is not supported by hugetlbfs in older
871 * hosts, so don't bother bailing out on errors.
872 * If anything goes wrong with it under other filesystems,
873 * mmap will fail.
874 */
875 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900876 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300877
878#ifdef MAP_POPULATE
879 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
880 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
881 * to sidestep this quirk.
882 */
883 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
884 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
885#else
886 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
887#endif
888 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900889 perror("file_ram_alloc: can't mmap RAM pages");
890 close(fd);
891 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300892 }
Alex Williamson04b16652010-07-02 11:13:17 -0600893 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300894 return area;
895}
896#endif
897
Alex Williamsond17b5282010-06-25 11:08:38 -0600898static ram_addr_t find_ram_offset(ram_addr_t size)
899{
Alex Williamson04b16652010-07-02 11:13:17 -0600900 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600901 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600902
903 if (QLIST_EMPTY(&ram_list.blocks))
904 return 0;
905
906 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000907 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600908
909 end = block->offset + block->length;
910
911 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
912 if (next_block->offset >= end) {
913 next = MIN(next, next_block->offset);
914 }
915 }
916 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600917 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600918 mingap = next - end;
919 }
920 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600921
922 if (offset == RAM_ADDR_MAX) {
923 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
924 (uint64_t)size);
925 abort();
926 }
927
Alex Williamson04b16652010-07-02 11:13:17 -0600928 return offset;
929}
930
Juan Quintela652d7ec2012-07-20 10:37:54 +0200931ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600932{
Alex Williamsond17b5282010-06-25 11:08:38 -0600933 RAMBlock *block;
934 ram_addr_t last = 0;
935
936 QLIST_FOREACH(block, &ram_list.blocks, next)
937 last = MAX(last, block->offset + block->length);
938
939 return last;
940}
941
Jason Baronddb97f12012-08-02 15:44:16 -0400942static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
943{
944 int ret;
945 QemuOpts *machine_opts;
946
947 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
948 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
949 if (machine_opts &&
950 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
951 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
952 if (ret) {
953 perror("qemu_madvise");
954 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
955 "but dump_guest_core=off specified\n");
956 }
957 }
958}
959
Avi Kivityc5705a72011-12-20 15:59:12 +0200960void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -0600961{
962 RAMBlock *new_block, *block;
963
Avi Kivityc5705a72011-12-20 15:59:12 +0200964 new_block = NULL;
965 QLIST_FOREACH(block, &ram_list.blocks, next) {
966 if (block->offset == addr) {
967 new_block = block;
968 break;
969 }
970 }
971 assert(new_block);
972 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600973
Anthony Liguori09e5ab62012-02-03 12:28:43 -0600974 if (dev) {
975 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600976 if (id) {
977 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -0500978 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600979 }
980 }
981 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
982
983 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200984 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -0600985 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
986 new_block->idstr);
987 abort();
988 }
989 }
Avi Kivityc5705a72011-12-20 15:59:12 +0200990}
991
Luiz Capitulino8490fc72012-09-05 16:50:16 -0300992static int memory_try_enable_merging(void *addr, size_t len)
993{
994 QemuOpts *opts;
995
996 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
997 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
998 /* disabled by the user */
999 return 0;
1000 }
1001
1002 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1003}
1004
Avi Kivityc5705a72011-12-20 15:59:12 +02001005ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1006 MemoryRegion *mr)
1007{
1008 RAMBlock *new_block;
1009
1010 size = TARGET_PAGE_ALIGN(size);
1011 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001012
Avi Kivity7c637362011-12-21 13:09:49 +02001013 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001014 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001015 if (host) {
1016 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001017 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001018 } else {
1019 if (mem_path) {
1020#if defined (__linux__) && !defined(TARGET_S390X)
1021 new_block->host = file_ram_alloc(new_block, size, mem_path);
1022 if (!new_block->host) {
1023 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001024 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001025 }
1026#else
1027 fprintf(stderr, "-mem-path option unsupported\n");
1028 exit(1);
1029#endif
1030 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001031 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001032 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001033 } else if (kvm_enabled()) {
1034 /* some s390/kvm configurations have special constraints */
1035 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001036 } else {
1037 new_block->host = qemu_vmalloc(size);
1038 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001039 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001040 }
1041 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001042 new_block->length = size;
1043
1044 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
1045
Anthony Liguori7267c092011-08-20 22:09:37 -05001046 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001047 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001048 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1049 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001050 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001051
Jason Baronddb97f12012-08-02 15:44:16 -04001052 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001053 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001054
Cam Macdonell84b89d72010-07-26 18:10:57 -06001055 if (kvm_enabled())
1056 kvm_setup_guest_memory(new_block->host, size);
1057
1058 return new_block->offset;
1059}
1060
Avi Kivityc5705a72011-12-20 15:59:12 +02001061ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001062{
Avi Kivityc5705a72011-12-20 15:59:12 +02001063 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001064}
bellarde9a1ab12007-02-08 23:08:38 +00001065
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001066void qemu_ram_free_from_ptr(ram_addr_t addr)
1067{
1068 RAMBlock *block;
1069
1070 QLIST_FOREACH(block, &ram_list.blocks, next) {
1071 if (addr == block->offset) {
1072 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05001073 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001074 return;
1075 }
1076 }
1077}
1078
Anthony Liguoric227f092009-10-01 16:12:16 -05001079void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001080{
Alex Williamson04b16652010-07-02 11:13:17 -06001081 RAMBlock *block;
1082
1083 QLIST_FOREACH(block, &ram_list.blocks, next) {
1084 if (addr == block->offset) {
1085 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001086 if (block->flags & RAM_PREALLOC_MASK) {
1087 ;
1088 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001089#if defined (__linux__) && !defined(TARGET_S390X)
1090 if (block->fd) {
1091 munmap(block->host, block->length);
1092 close(block->fd);
1093 } else {
1094 qemu_vfree(block->host);
1095 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001096#else
1097 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001098#endif
1099 } else {
1100#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1101 munmap(block->host, block->length);
1102#else
Jan Kiszka868bb332011-06-21 22:59:09 +02001103 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001104 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001105 } else {
1106 qemu_vfree(block->host);
1107 }
Alex Williamson04b16652010-07-02 11:13:17 -06001108#endif
1109 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001110 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06001111 return;
1112 }
1113 }
1114
bellarde9a1ab12007-02-08 23:08:38 +00001115}
1116
Huang Yingcd19cfa2011-03-02 08:56:19 +01001117#ifndef _WIN32
1118void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1119{
1120 RAMBlock *block;
1121 ram_addr_t offset;
1122 int flags;
1123 void *area, *vaddr;
1124
1125 QLIST_FOREACH(block, &ram_list.blocks, next) {
1126 offset = addr - block->offset;
1127 if (offset < block->length) {
1128 vaddr = block->host + offset;
1129 if (block->flags & RAM_PREALLOC_MASK) {
1130 ;
1131 } else {
1132 flags = MAP_FIXED;
1133 munmap(vaddr, length);
1134 if (mem_path) {
1135#if defined(__linux__) && !defined(TARGET_S390X)
1136 if (block->fd) {
1137#ifdef MAP_POPULATE
1138 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1139 MAP_PRIVATE;
1140#else
1141 flags |= MAP_PRIVATE;
1142#endif
1143 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1144 flags, block->fd, offset);
1145 } else {
1146 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1147 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1148 flags, -1, 0);
1149 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001150#else
1151 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001152#endif
1153 } else {
1154#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1155 flags |= MAP_SHARED | MAP_ANONYMOUS;
1156 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1157 flags, -1, 0);
1158#else
1159 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1160 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1161 flags, -1, 0);
1162#endif
1163 }
1164 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001165 fprintf(stderr, "Could not remap addr: "
1166 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001167 length, addr);
1168 exit(1);
1169 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001170 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001171 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001172 }
1173 return;
1174 }
1175 }
1176}
1177#endif /* !_WIN32 */
1178
pbrookdc828ca2009-04-09 22:21:07 +00001179/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001180 With the exception of the softmmu code in this file, this should
1181 only be used for local memory (e.g. video ram) that the device owns,
1182 and knows it isn't going to access beyond the end of the block.
1183
1184 It should not be used for general purpose DMA.
1185 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1186 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001187void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001188{
pbrook94a6b542009-04-11 17:15:54 +00001189 RAMBlock *block;
1190
Alex Williamsonf471a172010-06-11 11:11:42 -06001191 QLIST_FOREACH(block, &ram_list.blocks, next) {
1192 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05001193 /* Move this entry to to start of the list. */
1194 if (block != QLIST_FIRST(&ram_list.blocks)) {
1195 QLIST_REMOVE(block, next);
1196 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
1197 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001198 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001199 /* We need to check if the requested address is in the RAM
1200 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001201 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001202 */
1203 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001204 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001205 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001206 block->host =
1207 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001208 }
1209 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001210 return block->host + (addr - block->offset);
1211 }
pbrook94a6b542009-04-11 17:15:54 +00001212 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001213
1214 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1215 abort();
1216
1217 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00001218}
1219
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001220/* Return a host pointer to ram allocated with qemu_ram_alloc.
1221 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
1222 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001223static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001224{
1225 RAMBlock *block;
1226
1227 QLIST_FOREACH(block, &ram_list.blocks, next) {
1228 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001229 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001230 /* We need to check if the requested address is in the RAM
1231 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001232 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001233 */
1234 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001235 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001236 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001237 block->host =
1238 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001239 }
1240 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001241 return block->host + (addr - block->offset);
1242 }
1243 }
1244
1245 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1246 abort();
1247
1248 return NULL;
1249}
1250
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001251/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1252 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001253static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001254{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001255 if (*size == 0) {
1256 return NULL;
1257 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001258 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001259 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001260 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001261 RAMBlock *block;
1262
1263 QLIST_FOREACH(block, &ram_list.blocks, next) {
1264 if (addr - block->offset < block->length) {
1265 if (addr - block->offset + *size > block->length)
1266 *size = block->length - addr + block->offset;
1267 return block->host + (addr - block->offset);
1268 }
1269 }
1270
1271 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1272 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001273 }
1274}
1275
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001276void qemu_put_ram_ptr(void *addr)
1277{
1278 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001279}
1280
Marcelo Tosattie8902612010-10-11 15:31:19 -03001281int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001282{
pbrook94a6b542009-04-11 17:15:54 +00001283 RAMBlock *block;
1284 uint8_t *host = ptr;
1285
Jan Kiszka868bb332011-06-21 22:59:09 +02001286 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001287 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001288 return 0;
1289 }
1290
Alex Williamsonf471a172010-06-11 11:11:42 -06001291 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001292 /* This case append when the block is not mapped. */
1293 if (block->host == NULL) {
1294 continue;
1295 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001296 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001297 *ram_addr = block->offset + (host - block->host);
1298 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001299 }
pbrook94a6b542009-04-11 17:15:54 +00001300 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001301
Marcelo Tosattie8902612010-10-11 15:31:19 -03001302 return -1;
1303}
Alex Williamsonf471a172010-06-11 11:11:42 -06001304
Marcelo Tosattie8902612010-10-11 15:31:19 -03001305/* Some of the softmmu routines need to translate from a host pointer
1306 (typically a TLB entry) back to a ram offset. */
1307ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1308{
1309 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001310
Marcelo Tosattie8902612010-10-11 15:31:19 -03001311 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1312 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1313 abort();
1314 }
1315 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001316}
1317
Avi Kivitya8170e52012-10-23 12:30:10 +02001318static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001319 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001320{
pbrook67d3b952006-12-18 05:03:52 +00001321#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001322 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001323#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001324#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001325 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001326#endif
1327 return 0;
1328}
1329
Avi Kivitya8170e52012-10-23 12:30:10 +02001330static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001331 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001332{
1333#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001334 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001335#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001336#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001337 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001338#endif
1339}
1340
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001341static const MemoryRegionOps unassigned_mem_ops = {
1342 .read = unassigned_mem_read,
1343 .write = unassigned_mem_write,
1344 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001345};
1346
Avi Kivitya8170e52012-10-23 12:30:10 +02001347static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001348 unsigned size)
1349{
1350 abort();
1351}
1352
Avi Kivitya8170e52012-10-23 12:30:10 +02001353static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001354 uint64_t value, unsigned size)
1355{
1356 abort();
1357}
1358
1359static const MemoryRegionOps error_mem_ops = {
1360 .read = error_mem_read,
1361 .write = error_mem_write,
1362 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001363};
1364
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001365static const MemoryRegionOps rom_mem_ops = {
1366 .read = error_mem_read,
1367 .write = unassigned_mem_write,
1368 .endianness = DEVICE_NATIVE_ENDIAN,
1369};
1370
Avi Kivitya8170e52012-10-23 12:30:10 +02001371static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001372 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001373{
bellard3a7d9292005-08-21 09:26:42 +00001374 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001375 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001376 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1377#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001378 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001379 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001380#endif
1381 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001382 switch (size) {
1383 case 1:
1384 stb_p(qemu_get_ram_ptr(ram_addr), val);
1385 break;
1386 case 2:
1387 stw_p(qemu_get_ram_ptr(ram_addr), val);
1388 break;
1389 case 4:
1390 stl_p(qemu_get_ram_ptr(ram_addr), val);
1391 break;
1392 default:
1393 abort();
1394 }
bellardf23db162005-08-21 19:12:28 +00001395 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001396 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001397 /* we remove the notdirty callback only if the code has been
1398 flushed */
1399 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001400 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001401}
1402
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001403static const MemoryRegionOps notdirty_mem_ops = {
1404 .read = error_mem_read,
1405 .write = notdirty_mem_write,
1406 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001407};
1408
pbrook0f459d12008-06-09 00:20:13 +00001409/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001410static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001411{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001412 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001413 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001414 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001415 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001416 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001417
aliguori06d55cc2008-11-18 20:24:06 +00001418 if (env->watchpoint_hit) {
1419 /* We re-entered the check after replacing the TB. Now raise
1420 * the debug interrupt so that is will trigger after the
1421 * current instruction. */
1422 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1423 return;
1424 }
pbrook2e70f6e2008-06-29 01:03:05 +00001425 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001426 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001427 if ((vaddr == (wp->vaddr & len_mask) ||
1428 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001429 wp->flags |= BP_WATCHPOINT_HIT;
1430 if (!env->watchpoint_hit) {
1431 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001432 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001433 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1434 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001435 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001436 } else {
1437 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1438 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001439 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001440 }
aliguori06d55cc2008-11-18 20:24:06 +00001441 }
aliguori6e140f22008-11-18 20:37:55 +00001442 } else {
1443 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001444 }
1445 }
1446}
1447
pbrook6658ffb2007-03-16 23:58:11 +00001448/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1449 so these check for a hit then pass through to the normal out-of-line
1450 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001451static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001452 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001453{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001454 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1455 switch (size) {
1456 case 1: return ldub_phys(addr);
1457 case 2: return lduw_phys(addr);
1458 case 4: return ldl_phys(addr);
1459 default: abort();
1460 }
pbrook6658ffb2007-03-16 23:58:11 +00001461}
1462
Avi Kivitya8170e52012-10-23 12:30:10 +02001463static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001464 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001465{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001466 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1467 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001468 case 1:
1469 stb_phys(addr, val);
1470 break;
1471 case 2:
1472 stw_phys(addr, val);
1473 break;
1474 case 4:
1475 stl_phys(addr, val);
1476 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001477 default: abort();
1478 }
pbrook6658ffb2007-03-16 23:58:11 +00001479}
1480
Avi Kivity1ec9b902012-01-02 12:47:48 +02001481static const MemoryRegionOps watch_mem_ops = {
1482 .read = watch_mem_read,
1483 .write = watch_mem_write,
1484 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001485};
pbrook6658ffb2007-03-16 23:58:11 +00001486
Avi Kivitya8170e52012-10-23 12:30:10 +02001487static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001488 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001489{
Avi Kivity70c68e42012-01-02 12:32:48 +02001490 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001491 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001492 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001493#if defined(DEBUG_SUBPAGE)
1494 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1495 mmio, len, addr, idx);
1496#endif
blueswir1db7b5422007-05-26 17:36:03 +00001497
Avi Kivity5312bd82012-02-12 18:32:55 +02001498 section = &phys_sections[mmio->sub_section[idx]];
1499 addr += mmio->base;
1500 addr -= section->offset_within_address_space;
1501 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001502 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001503}
1504
Avi Kivitya8170e52012-10-23 12:30:10 +02001505static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001506 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001507{
Avi Kivity70c68e42012-01-02 12:32:48 +02001508 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001509 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001510 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001511#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001512 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1513 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001514 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001515#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001516
Avi Kivity5312bd82012-02-12 18:32:55 +02001517 section = &phys_sections[mmio->sub_section[idx]];
1518 addr += mmio->base;
1519 addr -= section->offset_within_address_space;
1520 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001521 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001522}
1523
Avi Kivity70c68e42012-01-02 12:32:48 +02001524static const MemoryRegionOps subpage_ops = {
1525 .read = subpage_read,
1526 .write = subpage_write,
1527 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001528};
1529
Avi Kivitya8170e52012-10-23 12:30:10 +02001530static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001531 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001532{
1533 ram_addr_t raddr = addr;
1534 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001535 switch (size) {
1536 case 1: return ldub_p(ptr);
1537 case 2: return lduw_p(ptr);
1538 case 4: return ldl_p(ptr);
1539 default: abort();
1540 }
Andreas Färber56384e82011-11-30 16:26:21 +01001541}
1542
Avi Kivitya8170e52012-10-23 12:30:10 +02001543static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001544 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001545{
1546 ram_addr_t raddr = addr;
1547 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001548 switch (size) {
1549 case 1: return stb_p(ptr, value);
1550 case 2: return stw_p(ptr, value);
1551 case 4: return stl_p(ptr, value);
1552 default: abort();
1553 }
Andreas Färber56384e82011-11-30 16:26:21 +01001554}
1555
Avi Kivityde712f92012-01-02 12:41:07 +02001556static const MemoryRegionOps subpage_ram_ops = {
1557 .read = subpage_ram_read,
1558 .write = subpage_ram_write,
1559 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001560};
1561
Anthony Liguoric227f092009-10-01 16:12:16 -05001562static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001563 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001564{
1565 int idx, eidx;
1566
1567 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1568 return -1;
1569 idx = SUBPAGE_IDX(start);
1570 eidx = SUBPAGE_IDX(end);
1571#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001572 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001573 mmio, start, end, idx, eidx, memory);
1574#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001575 if (memory_region_is_ram(phys_sections[section].mr)) {
1576 MemoryRegionSection new_section = phys_sections[section];
1577 new_section.mr = &io_mem_subpage_ram;
1578 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001579 }
blueswir1db7b5422007-05-26 17:36:03 +00001580 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001581 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001582 }
1583
1584 return 0;
1585}
1586
Avi Kivitya8170e52012-10-23 12:30:10 +02001587static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001588{
Anthony Liguoric227f092009-10-01 16:12:16 -05001589 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001590
Anthony Liguori7267c092011-08-20 22:09:37 -05001591 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001592
1593 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001594 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1595 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001596 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001597#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001598 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1599 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001600#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001601 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001602
1603 return mmio;
1604}
1605
Avi Kivity5312bd82012-02-12 18:32:55 +02001606static uint16_t dummy_section(MemoryRegion *mr)
1607{
1608 MemoryRegionSection section = {
1609 .mr = mr,
1610 .offset_within_address_space = 0,
1611 .offset_within_region = 0,
1612 .size = UINT64_MAX,
1613 };
1614
1615 return phys_section_add(&section);
1616}
1617
Avi Kivitya8170e52012-10-23 12:30:10 +02001618MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001619{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001620 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001621}
1622
Avi Kivitye9179ce2009-06-14 11:38:52 +03001623static void io_mem_init(void)
1624{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001625 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001626 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1627 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1628 "unassigned", UINT64_MAX);
1629 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1630 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001631 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1632 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001633 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1634 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001635}
1636
Avi Kivityac1970f2012-10-03 16:22:53 +02001637static void mem_begin(MemoryListener *listener)
1638{
1639 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1640
1641 destroy_all_mappings(d);
1642 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1643}
1644
Avi Kivity50c1e142012-02-08 21:36:02 +02001645static void core_begin(MemoryListener *listener)
1646{
Avi Kivity5312bd82012-02-12 18:32:55 +02001647 phys_sections_clear();
1648 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001649 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1650 phys_section_rom = dummy_section(&io_mem_rom);
1651 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001652}
1653
Avi Kivity1d711482012-10-02 18:54:45 +02001654static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001655{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001656 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001657
1658 /* since each CPU stores ram addresses in its TLB cache, we must
1659 reset the modified entries */
1660 /* XXX: slow ! */
1661 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1662 tlb_flush(env, 1);
1663 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001664}
1665
Avi Kivity93632742012-02-08 16:54:16 +02001666static void core_log_global_start(MemoryListener *listener)
1667{
1668 cpu_physical_memory_set_dirty_tracking(1);
1669}
1670
1671static void core_log_global_stop(MemoryListener *listener)
1672{
1673 cpu_physical_memory_set_dirty_tracking(0);
1674}
1675
Avi Kivity4855d412012-02-08 21:16:05 +02001676static void io_region_add(MemoryListener *listener,
1677 MemoryRegionSection *section)
1678{
Avi Kivitya2d33522012-03-05 17:40:12 +02001679 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1680
1681 mrio->mr = section->mr;
1682 mrio->offset = section->offset_within_region;
1683 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001684 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001685 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001686}
1687
1688static void io_region_del(MemoryListener *listener,
1689 MemoryRegionSection *section)
1690{
1691 isa_unassign_ioport(section->offset_within_address_space, section->size);
1692}
1693
Avi Kivity93632742012-02-08 16:54:16 +02001694static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001695 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001696 .log_global_start = core_log_global_start,
1697 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001698 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001699};
1700
Avi Kivity4855d412012-02-08 21:16:05 +02001701static MemoryListener io_memory_listener = {
1702 .region_add = io_region_add,
1703 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001704 .priority = 0,
1705};
1706
Avi Kivity1d711482012-10-02 18:54:45 +02001707static MemoryListener tcg_memory_listener = {
1708 .commit = tcg_commit,
1709};
1710
Avi Kivityac1970f2012-10-03 16:22:53 +02001711void address_space_init_dispatch(AddressSpace *as)
1712{
1713 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1714
1715 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1716 d->listener = (MemoryListener) {
1717 .begin = mem_begin,
1718 .region_add = mem_add,
1719 .region_nop = mem_add,
1720 .priority = 0,
1721 };
1722 as->dispatch = d;
1723 memory_listener_register(&d->listener, as);
1724}
1725
Avi Kivity83f3c252012-10-07 12:59:55 +02001726void address_space_destroy_dispatch(AddressSpace *as)
1727{
1728 AddressSpaceDispatch *d = as->dispatch;
1729
1730 memory_listener_unregister(&d->listener);
1731 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1732 g_free(d);
1733 as->dispatch = NULL;
1734}
1735
Avi Kivity62152b82011-07-26 14:26:14 +03001736static void memory_map_init(void)
1737{
Anthony Liguori7267c092011-08-20 22:09:37 -05001738 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001739 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001740 address_space_init(&address_space_memory, system_memory);
1741 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001742
Anthony Liguori7267c092011-08-20 22:09:37 -05001743 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001744 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001745 address_space_init(&address_space_io, system_io);
1746 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001747
Avi Kivityf6790af2012-10-02 20:13:51 +02001748 memory_listener_register(&core_memory_listener, &address_space_memory);
1749 memory_listener_register(&io_memory_listener, &address_space_io);
1750 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001751
1752 dma_context_init(&dma_context_memory, &address_space_memory,
1753 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001754}
1755
1756MemoryRegion *get_system_memory(void)
1757{
1758 return system_memory;
1759}
1760
Avi Kivity309cb472011-08-08 16:09:03 +03001761MemoryRegion *get_system_io(void)
1762{
1763 return system_io;
1764}
1765
pbrooke2eef172008-06-08 01:09:01 +00001766#endif /* !defined(CONFIG_USER_ONLY) */
1767
bellard13eb76e2004-01-24 15:23:36 +00001768/* physical memory access (slow version, mainly for debug) */
1769#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001770int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001771 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001772{
1773 int l, flags;
1774 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001775 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001776
1777 while (len > 0) {
1778 page = addr & TARGET_PAGE_MASK;
1779 l = (page + TARGET_PAGE_SIZE) - addr;
1780 if (l > len)
1781 l = len;
1782 flags = page_get_flags(page);
1783 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001784 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001785 if (is_write) {
1786 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001787 return -1;
bellard579a97f2007-11-11 14:26:47 +00001788 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001789 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001790 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001791 memcpy(p, buf, l);
1792 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001793 } else {
1794 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001795 return -1;
bellard579a97f2007-11-11 14:26:47 +00001796 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001797 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001798 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001799 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001800 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001801 }
1802 len -= l;
1803 buf += l;
1804 addr += l;
1805 }
Paul Brooka68fe892010-03-01 00:08:59 +00001806 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001807}
bellard8df1cd02005-01-28 22:37:22 +00001808
bellard13eb76e2004-01-24 15:23:36 +00001809#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001810
Avi Kivitya8170e52012-10-23 12:30:10 +02001811static void invalidate_and_set_dirty(hwaddr addr,
1812 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001813{
1814 if (!cpu_physical_memory_is_dirty(addr)) {
1815 /* invalidate code */
1816 tb_invalidate_phys_page_range(addr, addr + length, 0);
1817 /* set dirty bit */
1818 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1819 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001820 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001821}
1822
Avi Kivitya8170e52012-10-23 12:30:10 +02001823void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001824 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001825{
Avi Kivityac1970f2012-10-03 16:22:53 +02001826 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001827 int l;
bellard13eb76e2004-01-24 15:23:36 +00001828 uint8_t *ptr;
1829 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001830 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001831 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001832
bellard13eb76e2004-01-24 15:23:36 +00001833 while (len > 0) {
1834 page = addr & TARGET_PAGE_MASK;
1835 l = (page + TARGET_PAGE_SIZE) - addr;
1836 if (l > len)
1837 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001838 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001839
bellard13eb76e2004-01-24 15:23:36 +00001840 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001841 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001842 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001843 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001844 /* XXX: could force cpu_single_env to NULL to avoid
1845 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001846 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001847 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001848 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001849 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001850 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001851 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001852 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001853 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001854 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001855 l = 2;
1856 } else {
bellard1c213d12005-09-03 10:49:04 +00001857 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001858 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001859 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001860 l = 1;
1861 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001862 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001863 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001864 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001865 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001866 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001867 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001868 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001869 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001870 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001871 }
1872 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001873 if (!(memory_region_is_ram(section->mr) ||
1874 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001875 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001876 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001877 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001878 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001879 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001880 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001881 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001882 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001883 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001884 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001885 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001886 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001887 l = 2;
1888 } else {
bellard1c213d12005-09-03 10:49:04 +00001889 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001890 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001891 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001892 l = 1;
1893 }
1894 } else {
1895 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001896 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001897 + memory_region_section_addr(section,
1898 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001899 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001900 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001901 }
1902 }
1903 len -= l;
1904 buf += l;
1905 addr += l;
1906 }
1907}
bellard8df1cd02005-01-28 22:37:22 +00001908
Avi Kivitya8170e52012-10-23 12:30:10 +02001909void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001910 const uint8_t *buf, int len)
1911{
1912 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1913}
1914
1915/**
1916 * address_space_read: read from an address space.
1917 *
1918 * @as: #AddressSpace to be accessed
1919 * @addr: address within that address space
1920 * @buf: buffer with the data transferred
1921 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001922void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001923{
1924 address_space_rw(as, addr, buf, len, false);
1925}
1926
1927
Avi Kivitya8170e52012-10-23 12:30:10 +02001928void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001929 int len, int is_write)
1930{
1931 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1932}
1933
bellardd0ecd2a2006-04-23 17:14:48 +00001934/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001935void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001936 const uint8_t *buf, int len)
1937{
Avi Kivityac1970f2012-10-03 16:22:53 +02001938 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00001939 int l;
1940 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02001941 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001942 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001943
bellardd0ecd2a2006-04-23 17:14:48 +00001944 while (len > 0) {
1945 page = addr & TARGET_PAGE_MASK;
1946 l = (page + TARGET_PAGE_SIZE) - addr;
1947 if (l > len)
1948 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001949 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001950
Blue Swirlcc5bea62012-04-14 14:56:48 +00001951 if (!(memory_region_is_ram(section->mr) ||
1952 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00001953 /* do nothing */
1954 } else {
1955 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001956 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001957 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00001958 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001959 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00001960 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001961 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001962 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00001963 }
1964 len -= l;
1965 buf += l;
1966 addr += l;
1967 }
1968}
1969
aliguori6d16c2f2009-01-22 16:59:11 +00001970typedef struct {
1971 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02001972 hwaddr addr;
1973 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00001974} BounceBuffer;
1975
1976static BounceBuffer bounce;
1977
aliguoriba223c22009-01-22 16:59:16 +00001978typedef struct MapClient {
1979 void *opaque;
1980 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00001981 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00001982} MapClient;
1983
Blue Swirl72cf2d42009-09-12 07:36:22 +00001984static QLIST_HEAD(map_client_list, MapClient) map_client_list
1985 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00001986
1987void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
1988{
Anthony Liguori7267c092011-08-20 22:09:37 -05001989 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00001990
1991 client->opaque = opaque;
1992 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001993 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00001994 return client;
1995}
1996
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001997static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00001998{
1999 MapClient *client = (MapClient *)_client;
2000
Blue Swirl72cf2d42009-09-12 07:36:22 +00002001 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002002 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002003}
2004
2005static void cpu_notify_map_clients(void)
2006{
2007 MapClient *client;
2008
Blue Swirl72cf2d42009-09-12 07:36:22 +00002009 while (!QLIST_EMPTY(&map_client_list)) {
2010 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002011 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002012 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002013 }
2014}
2015
aliguori6d16c2f2009-01-22 16:59:11 +00002016/* Map a physical memory region into a host virtual address.
2017 * May map a subset of the requested range, given by and returned in *plen.
2018 * May return NULL if resources needed to perform the mapping are exhausted.
2019 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002020 * Use cpu_register_map_client() to know when retrying the map operation is
2021 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002022 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002023void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002024 hwaddr addr,
2025 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002026 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002027{
Avi Kivityac1970f2012-10-03 16:22:53 +02002028 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002029 hwaddr len = *plen;
2030 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002031 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002032 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002033 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002034 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002035 ram_addr_t rlen;
2036 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002037
2038 while (len > 0) {
2039 page = addr & TARGET_PAGE_MASK;
2040 l = (page + TARGET_PAGE_SIZE) - addr;
2041 if (l > len)
2042 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002043 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002044
Avi Kivityf3705d52012-03-08 16:16:34 +02002045 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002046 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002047 break;
2048 }
2049 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2050 bounce.addr = addr;
2051 bounce.len = l;
2052 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002053 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002054 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002055
2056 *plen = l;
2057 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002058 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002059 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002060 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002061 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002062 }
aliguori6d16c2f2009-01-22 16:59:11 +00002063
2064 len -= l;
2065 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002066 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002067 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002068 rlen = todo;
2069 ret = qemu_ram_ptr_length(raddr, &rlen);
2070 *plen = rlen;
2071 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002072}
2073
Avi Kivityac1970f2012-10-03 16:22:53 +02002074/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002075 * Will also mark the memory as dirty if is_write == 1. access_len gives
2076 * the amount of memory that was actually read or written by the caller.
2077 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002078void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2079 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002080{
2081 if (buffer != bounce.buffer) {
2082 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002083 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002084 while (access_len) {
2085 unsigned l;
2086 l = TARGET_PAGE_SIZE;
2087 if (l > access_len)
2088 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002089 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002090 addr1 += l;
2091 access_len -= l;
2092 }
2093 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002094 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002095 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002096 }
aliguori6d16c2f2009-01-22 16:59:11 +00002097 return;
2098 }
2099 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002100 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002101 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002102 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002103 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002104 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002105}
bellardd0ecd2a2006-04-23 17:14:48 +00002106
Avi Kivitya8170e52012-10-23 12:30:10 +02002107void *cpu_physical_memory_map(hwaddr addr,
2108 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002109 int is_write)
2110{
2111 return address_space_map(&address_space_memory, addr, plen, is_write);
2112}
2113
Avi Kivitya8170e52012-10-23 12:30:10 +02002114void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2115 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002116{
2117 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2118}
2119
bellard8df1cd02005-01-28 22:37:22 +00002120/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002121static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002122 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002123{
bellard8df1cd02005-01-28 22:37:22 +00002124 uint8_t *ptr;
2125 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002126 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002127
Avi Kivityac1970f2012-10-03 16:22:53 +02002128 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002129
Blue Swirlcc5bea62012-04-14 14:56:48 +00002130 if (!(memory_region_is_ram(section->mr) ||
2131 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002132 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002133 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002134 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002135#if defined(TARGET_WORDS_BIGENDIAN)
2136 if (endian == DEVICE_LITTLE_ENDIAN) {
2137 val = bswap32(val);
2138 }
2139#else
2140 if (endian == DEVICE_BIG_ENDIAN) {
2141 val = bswap32(val);
2142 }
2143#endif
bellard8df1cd02005-01-28 22:37:22 +00002144 } else {
2145 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002146 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002147 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002148 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002149 switch (endian) {
2150 case DEVICE_LITTLE_ENDIAN:
2151 val = ldl_le_p(ptr);
2152 break;
2153 case DEVICE_BIG_ENDIAN:
2154 val = ldl_be_p(ptr);
2155 break;
2156 default:
2157 val = ldl_p(ptr);
2158 break;
2159 }
bellard8df1cd02005-01-28 22:37:22 +00002160 }
2161 return val;
2162}
2163
Avi Kivitya8170e52012-10-23 12:30:10 +02002164uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002165{
2166 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2167}
2168
Avi Kivitya8170e52012-10-23 12:30:10 +02002169uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002170{
2171 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2172}
2173
Avi Kivitya8170e52012-10-23 12:30:10 +02002174uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002175{
2176 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2177}
2178
bellard84b7b8e2005-11-28 21:19:04 +00002179/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002180static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002181 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002182{
bellard84b7b8e2005-11-28 21:19:04 +00002183 uint8_t *ptr;
2184 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002185 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002186
Avi Kivityac1970f2012-10-03 16:22:53 +02002187 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002188
Blue Swirlcc5bea62012-04-14 14:56:48 +00002189 if (!(memory_region_is_ram(section->mr) ||
2190 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002191 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002192 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002193
2194 /* XXX This is broken when device endian != cpu endian.
2195 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002196#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002197 val = io_mem_read(section->mr, addr, 4) << 32;
2198 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002199#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002200 val = io_mem_read(section->mr, addr, 4);
2201 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002202#endif
2203 } else {
2204 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002205 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002206 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002207 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002208 switch (endian) {
2209 case DEVICE_LITTLE_ENDIAN:
2210 val = ldq_le_p(ptr);
2211 break;
2212 case DEVICE_BIG_ENDIAN:
2213 val = ldq_be_p(ptr);
2214 break;
2215 default:
2216 val = ldq_p(ptr);
2217 break;
2218 }
bellard84b7b8e2005-11-28 21:19:04 +00002219 }
2220 return val;
2221}
2222
Avi Kivitya8170e52012-10-23 12:30:10 +02002223uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002224{
2225 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2226}
2227
Avi Kivitya8170e52012-10-23 12:30:10 +02002228uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002229{
2230 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2231}
2232
Avi Kivitya8170e52012-10-23 12:30:10 +02002233uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002234{
2235 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2236}
2237
bellardaab33092005-10-30 20:48:42 +00002238/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002239uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002240{
2241 uint8_t val;
2242 cpu_physical_memory_read(addr, &val, 1);
2243 return val;
2244}
2245
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002246/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002247static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002248 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002249{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002250 uint8_t *ptr;
2251 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002252 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002253
Avi Kivityac1970f2012-10-03 16:22:53 +02002254 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002255
Blue Swirlcc5bea62012-04-14 14:56:48 +00002256 if (!(memory_region_is_ram(section->mr) ||
2257 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002258 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002259 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002260 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002261#if defined(TARGET_WORDS_BIGENDIAN)
2262 if (endian == DEVICE_LITTLE_ENDIAN) {
2263 val = bswap16(val);
2264 }
2265#else
2266 if (endian == DEVICE_BIG_ENDIAN) {
2267 val = bswap16(val);
2268 }
2269#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002270 } else {
2271 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002272 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002273 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002274 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002275 switch (endian) {
2276 case DEVICE_LITTLE_ENDIAN:
2277 val = lduw_le_p(ptr);
2278 break;
2279 case DEVICE_BIG_ENDIAN:
2280 val = lduw_be_p(ptr);
2281 break;
2282 default:
2283 val = lduw_p(ptr);
2284 break;
2285 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002286 }
2287 return val;
bellardaab33092005-10-30 20:48:42 +00002288}
2289
Avi Kivitya8170e52012-10-23 12:30:10 +02002290uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002291{
2292 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2293}
2294
Avi Kivitya8170e52012-10-23 12:30:10 +02002295uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002296{
2297 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2298}
2299
Avi Kivitya8170e52012-10-23 12:30:10 +02002300uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002301{
2302 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2303}
2304
bellard8df1cd02005-01-28 22:37:22 +00002305/* warning: addr must be aligned. The ram page is not masked as dirty
2306 and the code inside is not invalidated. It is useful if the dirty
2307 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002308void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002309{
bellard8df1cd02005-01-28 22:37:22 +00002310 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002311 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002312
Avi Kivityac1970f2012-10-03 16:22:53 +02002313 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002314
Avi Kivityf3705d52012-03-08 16:16:34 +02002315 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002316 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002317 if (memory_region_is_ram(section->mr)) {
2318 section = &phys_sections[phys_section_rom];
2319 }
2320 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002321 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002322 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002323 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002324 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002325 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002326 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002327
2328 if (unlikely(in_migration)) {
2329 if (!cpu_physical_memory_is_dirty(addr1)) {
2330 /* invalidate code */
2331 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2332 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002333 cpu_physical_memory_set_dirty_flags(
2334 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002335 }
2336 }
bellard8df1cd02005-01-28 22:37:22 +00002337 }
2338}
2339
Avi Kivitya8170e52012-10-23 12:30:10 +02002340void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00002341{
j_mayerbc98a7e2007-04-04 07:55:12 +00002342 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002343 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00002344
Avi Kivityac1970f2012-10-03 16:22:53 +02002345 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002346
Avi Kivityf3705d52012-03-08 16:16:34 +02002347 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002348 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002349 if (memory_region_is_ram(section->mr)) {
2350 section = &phys_sections[phys_section_rom];
2351 }
j_mayerbc98a7e2007-04-04 07:55:12 +00002352#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002353 io_mem_write(section->mr, addr, val >> 32, 4);
2354 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002355#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002356 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2357 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002358#endif
2359 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002360 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002361 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002362 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00002363 stq_p(ptr, val);
2364 }
2365}
2366
bellard8df1cd02005-01-28 22:37:22 +00002367/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002368static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002369 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002370{
bellard8df1cd02005-01-28 22:37:22 +00002371 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002372 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002373
Avi Kivityac1970f2012-10-03 16:22:53 +02002374 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002375
Avi Kivityf3705d52012-03-08 16:16:34 +02002376 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002377 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002378 if (memory_region_is_ram(section->mr)) {
2379 section = &phys_sections[phys_section_rom];
2380 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002381#if defined(TARGET_WORDS_BIGENDIAN)
2382 if (endian == DEVICE_LITTLE_ENDIAN) {
2383 val = bswap32(val);
2384 }
2385#else
2386 if (endian == DEVICE_BIG_ENDIAN) {
2387 val = bswap32(val);
2388 }
2389#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002390 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002391 } else {
2392 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002393 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002394 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002395 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002396 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002397 switch (endian) {
2398 case DEVICE_LITTLE_ENDIAN:
2399 stl_le_p(ptr, val);
2400 break;
2401 case DEVICE_BIG_ENDIAN:
2402 stl_be_p(ptr, val);
2403 break;
2404 default:
2405 stl_p(ptr, val);
2406 break;
2407 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002408 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002409 }
2410}
2411
Avi Kivitya8170e52012-10-23 12:30:10 +02002412void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002413{
2414 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2415}
2416
Avi Kivitya8170e52012-10-23 12:30:10 +02002417void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002418{
2419 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2420}
2421
Avi Kivitya8170e52012-10-23 12:30:10 +02002422void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002423{
2424 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2425}
2426
bellardaab33092005-10-30 20:48:42 +00002427/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002428void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002429{
2430 uint8_t v = val;
2431 cpu_physical_memory_write(addr, &v, 1);
2432}
2433
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002434/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002435static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002436 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002437{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002438 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002439 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002440
Avi Kivityac1970f2012-10-03 16:22:53 +02002441 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002442
Avi Kivityf3705d52012-03-08 16:16:34 +02002443 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002444 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002445 if (memory_region_is_ram(section->mr)) {
2446 section = &phys_sections[phys_section_rom];
2447 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002448#if defined(TARGET_WORDS_BIGENDIAN)
2449 if (endian == DEVICE_LITTLE_ENDIAN) {
2450 val = bswap16(val);
2451 }
2452#else
2453 if (endian == DEVICE_BIG_ENDIAN) {
2454 val = bswap16(val);
2455 }
2456#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002457 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002458 } else {
2459 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002460 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002461 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002462 /* RAM case */
2463 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002464 switch (endian) {
2465 case DEVICE_LITTLE_ENDIAN:
2466 stw_le_p(ptr, val);
2467 break;
2468 case DEVICE_BIG_ENDIAN:
2469 stw_be_p(ptr, val);
2470 break;
2471 default:
2472 stw_p(ptr, val);
2473 break;
2474 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002475 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002476 }
bellardaab33092005-10-30 20:48:42 +00002477}
2478
Avi Kivitya8170e52012-10-23 12:30:10 +02002479void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002480{
2481 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2482}
2483
Avi Kivitya8170e52012-10-23 12:30:10 +02002484void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002485{
2486 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2487}
2488
Avi Kivitya8170e52012-10-23 12:30:10 +02002489void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002490{
2491 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2492}
2493
bellardaab33092005-10-30 20:48:42 +00002494/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002495void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002496{
2497 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002498 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002499}
2500
Avi Kivitya8170e52012-10-23 12:30:10 +02002501void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002502{
2503 val = cpu_to_le64(val);
2504 cpu_physical_memory_write(addr, &val, 8);
2505}
2506
Avi Kivitya8170e52012-10-23 12:30:10 +02002507void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002508{
2509 val = cpu_to_be64(val);
2510 cpu_physical_memory_write(addr, &val, 8);
2511}
2512
aliguori5e2972f2009-03-28 17:51:36 +00002513/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002514int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002515 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002516{
2517 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002518 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002519 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002520
2521 while (len > 0) {
2522 page = addr & TARGET_PAGE_MASK;
2523 phys_addr = cpu_get_phys_page_debug(env, page);
2524 /* if no physical page mapped, return an error */
2525 if (phys_addr == -1)
2526 return -1;
2527 l = (page + TARGET_PAGE_SIZE) - addr;
2528 if (l > len)
2529 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002530 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002531 if (is_write)
2532 cpu_physical_memory_write_rom(phys_addr, buf, l);
2533 else
aliguori5e2972f2009-03-28 17:51:36 +00002534 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002535 len -= l;
2536 buf += l;
2537 addr += l;
2538 }
2539 return 0;
2540}
Paul Brooka68fe892010-03-01 00:08:59 +00002541#endif
bellard13eb76e2004-01-24 15:23:36 +00002542
Paul Brookb3755a92010-03-12 16:54:58 +00002543#if !defined(CONFIG_USER_ONLY)
2544
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00002545/*
2546 * A helper function for the _utterly broken_ virtio device model to find out if
2547 * it's running on a big endian machine. Don't do this at home kids!
2548 */
2549bool virtio_is_big_endian(void);
2550bool virtio_is_big_endian(void)
2551{
2552#if defined(TARGET_WORDS_BIGENDIAN)
2553 return true;
2554#else
2555 return false;
2556#endif
2557}
2558
bellard61382a52003-10-27 21:22:23 +00002559#endif
Wen Congyang76f35532012-05-07 12:04:18 +08002560
2561#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002562bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002563{
2564 MemoryRegionSection *section;
2565
Avi Kivityac1970f2012-10-03 16:22:53 +02002566 section = phys_page_find(address_space_memory.dispatch,
2567 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002568
2569 return !(memory_region_is_ram(section->mr) ||
2570 memory_region_is_romd(section->mr));
2571}
2572#endif