blob: 1ee4fa649d9cd43d004a80b4467891165a3b9d6e [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020070static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
81int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200190 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 goto not_found;
195 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200198 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200199
Avi Kivityc19e8802012-02-13 20:25:31 +0200200 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200201not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200202 return &phys_sections[s_index];
203}
204
Blue Swirle5548612012-04-21 13:08:33 +0000205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
210}
bellard9fa3e852004-01-04 18:06:42 +0000211#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000212
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200213void cpu_exec_init_all(void)
214{
215#if !defined(CONFIG_USER_ONLY)
216 memory_map_init();
217 io_mem_init();
218#endif
219}
220
pbrook9656f322008-07-01 20:01:19 +0000221#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
222
Juan Quintelae59fb372009-09-29 22:48:21 +0200223static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200224{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100225 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200226
aurel323098dba2009-03-07 21:28:24 +0000227 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
228 version_id is increased. */
229 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000230 tlb_flush(env, 1);
231
232 return 0;
233}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200234
235static const VMStateDescription vmstate_cpu_common = {
236 .name = "cpu_common",
237 .version_id = 1,
238 .minimum_version_id = 1,
239 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200240 .post_load = cpu_common_post_load,
241 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100242 VMSTATE_UINT32(halted, CPUArchState),
243 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200244 VMSTATE_END_OF_LIST()
245 }
246};
pbrook9656f322008-07-01 20:01:19 +0000247#endif
248
Andreas Färber9349b4f2012-03-14 01:38:32 +0100249CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400250{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100251 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400252
253 while (env) {
254 if (env->cpu_index == cpu)
255 break;
256 env = env->next_cpu;
257 }
258
259 return env;
260}
261
Andreas Färber9349b4f2012-03-14 01:38:32 +0100262void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000263{
Andreas Färber9f09e182012-05-03 06:59:07 +0200264#ifndef CONFIG_USER_ONLY
265 CPUState *cpu = ENV_GET_CPU(env);
266#endif
Andreas Färber9349b4f2012-03-14 01:38:32 +0100267 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000268 int cpu_index;
269
pbrookc2764712009-03-07 15:24:59 +0000270#if defined(CONFIG_USER_ONLY)
271 cpu_list_lock();
272#endif
bellard6a00d602005-11-21 23:25:50 +0000273 env->next_cpu = NULL;
274 penv = &first_cpu;
275 cpu_index = 0;
276 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700277 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000278 cpu_index++;
279 }
280 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000281 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000282 QTAILQ_INIT(&env->breakpoints);
283 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100284#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200285 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100286#endif
bellard6a00d602005-11-21 23:25:50 +0000287 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000288#if defined(CONFIG_USER_ONLY)
289 cpu_list_unlock();
290#endif
pbrookb3c77242008-06-30 16:31:04 +0000291#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600292 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
293 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000294 cpu_save, cpu_load, env);
295#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000296}
297
bellard1fddef42005-04-17 19:16:13 +0000298#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000299#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100300static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000301{
302 tb_invalidate_phys_page_range(pc, pc + 1, 0);
303}
304#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400305static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
306{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400307 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
308 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400309}
bellardc27004e2005-01-03 23:35:10 +0000310#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000311#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000312
Paul Brookc527ee82010-03-01 03:31:14 +0000313#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100314void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000315
316{
317}
318
Andreas Färber9349b4f2012-03-14 01:38:32 +0100319int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000320 int flags, CPUWatchpoint **watchpoint)
321{
322 return -ENOSYS;
323}
324#else
pbrook6658ffb2007-03-16 23:58:11 +0000325/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100326int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000327 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000328{
aliguorib4051332008-11-18 20:14:20 +0000329 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000330 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000331
aliguorib4051332008-11-18 20:14:20 +0000332 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400333 if ((len & (len - 1)) || (addr & ~len_mask) ||
334 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000335 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
336 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
337 return -EINVAL;
338 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500339 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000340
aliguoria1d1bb32008-11-18 20:07:32 +0000341 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000342 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000343 wp->flags = flags;
344
aliguori2dc9f412008-11-18 20:56:59 +0000345 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000346 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000347 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000348 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000349 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000350
pbrook6658ffb2007-03-16 23:58:11 +0000351 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000352
353 if (watchpoint)
354 *watchpoint = wp;
355 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000356}
357
aliguoria1d1bb32008-11-18 20:07:32 +0000358/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100359int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000360 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000361{
aliguorib4051332008-11-18 20:14:20 +0000362 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000363 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000364
Blue Swirl72cf2d42009-09-12 07:36:22 +0000365 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000366 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000367 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000368 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000369 return 0;
370 }
371 }
aliguoria1d1bb32008-11-18 20:07:32 +0000372 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000373}
374
aliguoria1d1bb32008-11-18 20:07:32 +0000375/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100376void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000377{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000378 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000379
aliguoria1d1bb32008-11-18 20:07:32 +0000380 tlb_flush_page(env, watchpoint->vaddr);
381
Anthony Liguori7267c092011-08-20 22:09:37 -0500382 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000383}
384
aliguoria1d1bb32008-11-18 20:07:32 +0000385/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100386void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000387{
aliguoric0ce9982008-11-25 22:13:57 +0000388 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000389
Blue Swirl72cf2d42009-09-12 07:36:22 +0000390 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000391 if (wp->flags & mask)
392 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000393 }
aliguoria1d1bb32008-11-18 20:07:32 +0000394}
Paul Brookc527ee82010-03-01 03:31:14 +0000395#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000396
397/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100398int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000399 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000400{
bellard1fddef42005-04-17 19:16:13 +0000401#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000402 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000403
Anthony Liguori7267c092011-08-20 22:09:37 -0500404 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000405
406 bp->pc = pc;
407 bp->flags = flags;
408
aliguori2dc9f412008-11-18 20:56:59 +0000409 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000410 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000411 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000412 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000413 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000414
415 breakpoint_invalidate(env, pc);
416
417 if (breakpoint)
418 *breakpoint = bp;
419 return 0;
420#else
421 return -ENOSYS;
422#endif
423}
424
425/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100426int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000427{
428#if defined(TARGET_HAS_ICE)
429 CPUBreakpoint *bp;
430
Blue Swirl72cf2d42009-09-12 07:36:22 +0000431 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000432 if (bp->pc == pc && bp->flags == flags) {
433 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000434 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000435 }
bellard4c3a88a2003-07-26 12:06:08 +0000436 }
aliguoria1d1bb32008-11-18 20:07:32 +0000437 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000438#else
aliguoria1d1bb32008-11-18 20:07:32 +0000439 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000440#endif
441}
442
aliguoria1d1bb32008-11-18 20:07:32 +0000443/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100444void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000445{
bellard1fddef42005-04-17 19:16:13 +0000446#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000447 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000448
aliguoria1d1bb32008-11-18 20:07:32 +0000449 breakpoint_invalidate(env, breakpoint->pc);
450
Anthony Liguori7267c092011-08-20 22:09:37 -0500451 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000452#endif
453}
454
455/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100456void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000457{
458#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000459 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000460
Blue Swirl72cf2d42009-09-12 07:36:22 +0000461 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000462 if (bp->flags & mask)
463 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000464 }
bellard4c3a88a2003-07-26 12:06:08 +0000465#endif
466}
467
bellardc33a3462003-07-29 20:50:33 +0000468/* enable or disable single step mode. EXCP_DEBUG is returned by the
469 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100470void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000471{
bellard1fddef42005-04-17 19:16:13 +0000472#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000473 if (env->singlestep_enabled != enabled) {
474 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000475 if (kvm_enabled())
476 kvm_update_guest_debug(env, 0);
477 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100478 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000479 /* XXX: only flush what is necessary */
480 tb_flush(env);
481 }
bellardc33a3462003-07-29 20:50:33 +0000482 }
483#endif
484}
485
Andreas Färber9349b4f2012-03-14 01:38:32 +0100486void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +0000487{
488 env->interrupt_request &= ~mask;
489}
490
Andreas Färber9349b4f2012-03-14 01:38:32 +0100491void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000492{
493 env->exit_request = 1;
494 cpu_unlink_tb(env);
495}
496
Andreas Färber9349b4f2012-03-14 01:38:32 +0100497void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000498{
499 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000500 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000501
502 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000503 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000504 fprintf(stderr, "qemu: fatal: ");
505 vfprintf(stderr, fmt, ap);
506 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100507 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000508 if (qemu_log_enabled()) {
509 qemu_log("qemu: fatal: ");
510 qemu_log_vprintf(fmt, ap2);
511 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100512 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000513 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000514 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000515 }
pbrook493ae1f2007-11-23 16:53:59 +0000516 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000517 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200518#if defined(CONFIG_USER_ONLY)
519 {
520 struct sigaction act;
521 sigfillset(&act.sa_mask);
522 act.sa_handler = SIG_DFL;
523 sigaction(SIGABRT, &act, NULL);
524 }
525#endif
bellard75012672003-06-21 13:11:07 +0000526 abort();
527}
528
Andreas Färber9349b4f2012-03-14 01:38:32 +0100529CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000530{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100531 CPUArchState *new_env = cpu_init(env->cpu_model_str);
532 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +0000533 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +0000534#if defined(TARGET_HAS_ICE)
535 CPUBreakpoint *bp;
536 CPUWatchpoint *wp;
537#endif
538
Andreas Färber9349b4f2012-03-14 01:38:32 +0100539 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000540
541 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +0000542 new_env->next_cpu = next_cpu;
543 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +0000544
545 /* Clone all break/watchpoints.
546 Note: Once we support ptrace with hw-debug register access, make sure
547 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000548 QTAILQ_INIT(&env->breakpoints);
549 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000550#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000551 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000552 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
553 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000554 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000555 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
556 wp->flags, NULL);
557 }
558#endif
559
thsc5be9f02007-02-28 20:20:53 +0000560 return new_env;
561}
562
bellard01243112004-01-04 15:48:17 +0000563#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200564static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
565 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000566{
Juan Quintelad24981d2012-05-22 00:42:40 +0200567 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000568
bellard1ccde1c2004-02-06 19:46:14 +0000569 /* we modify the TLB cache so that the dirty bit will be set again
570 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200571 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200572 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000573 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200574 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000575 != (end - 1) - start) {
576 abort();
577 }
Blue Swirle5548612012-04-21 13:08:33 +0000578 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200579
580}
581
582/* Note: start and end must be within the same ram block. */
583void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
584 int dirty_flags)
585{
586 uintptr_t length;
587
588 start &= TARGET_PAGE_MASK;
589 end = TARGET_PAGE_ALIGN(end);
590
591 length = end - start;
592 if (length == 0)
593 return;
594 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
595
596 if (tcg_enabled()) {
597 tlb_reset_dirty_range_all(start, end, length);
598 }
bellard1ccde1c2004-02-06 19:46:14 +0000599}
600
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000601static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000602{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200603 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000604 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200605 return ret;
aliguori74576192008-10-06 14:02:03 +0000606}
607
Avi Kivitya8170e52012-10-23 12:30:10 +0200608hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000609 MemoryRegionSection *section,
610 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200611 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000612 int prot,
613 target_ulong *address)
614{
Avi Kivitya8170e52012-10-23 12:30:10 +0200615 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000616 CPUWatchpoint *wp;
617
Blue Swirlcc5bea62012-04-14 14:56:48 +0000618 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000619 /* Normal RAM. */
620 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000621 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000622 if (!section->readonly) {
623 iotlb |= phys_section_notdirty;
624 } else {
625 iotlb |= phys_section_rom;
626 }
627 } else {
628 /* IO handlers are currently passed a physical address.
629 It would be nice to pass an offset from the base address
630 of that region. This would avoid having to special case RAM,
631 and avoid full address decoding in every device.
632 We can't use the high bits of pd for this because
633 IO_MEM_ROMD uses these as a ram address. */
634 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000635 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000636 }
637
638 /* Make accesses to pages with watchpoints go via the
639 watchpoint trap routines. */
640 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
641 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
642 /* Avoid trapping reads of pages with a write breakpoint. */
643 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
644 iotlb = phys_section_watch + paddr;
645 *address |= TLB_MMIO;
646 break;
647 }
648 }
649 }
650
651 return iotlb;
652}
bellard9fa3e852004-01-04 18:06:42 +0000653#endif /* defined(CONFIG_USER_ONLY) */
654
pbrooke2eef172008-06-08 01:09:01 +0000655#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000656
Paul Brookc04b2b72010-03-01 03:31:14 +0000657#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
658typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200659 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200660 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200661 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000662} subpage_t;
663
Anthony Liguoric227f092009-10-01 16:12:16 -0500664static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200665 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200666static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200667static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200668{
Avi Kivity5312bd82012-02-12 18:32:55 +0200669 MemoryRegionSection *section = &phys_sections[section_index];
670 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200671
672 if (mr->subpage) {
673 subpage_t *subpage = container_of(mr, subpage_t, iomem);
674 memory_region_destroy(&subpage->iomem);
675 g_free(subpage);
676 }
677}
678
Avi Kivity4346ae32012-02-10 17:00:01 +0200679static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200680{
681 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200682 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200683
Avi Kivityc19e8802012-02-13 20:25:31 +0200684 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200685 return;
686 }
687
Avi Kivityc19e8802012-02-13 20:25:31 +0200688 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200689 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200690 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200691 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200692 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200693 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200694 }
Avi Kivity54688b12012-02-09 17:34:32 +0200695 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200696 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200697 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200698}
699
Avi Kivityac1970f2012-10-03 16:22:53 +0200700static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200701{
Avi Kivityac1970f2012-10-03 16:22:53 +0200702 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200703 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200704}
705
Avi Kivity5312bd82012-02-12 18:32:55 +0200706static uint16_t phys_section_add(MemoryRegionSection *section)
707{
708 if (phys_sections_nb == phys_sections_nb_alloc) {
709 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
710 phys_sections = g_renew(MemoryRegionSection, phys_sections,
711 phys_sections_nb_alloc);
712 }
713 phys_sections[phys_sections_nb] = *section;
714 return phys_sections_nb++;
715}
716
717static void phys_sections_clear(void)
718{
719 phys_sections_nb = 0;
720}
721
Avi Kivityac1970f2012-10-03 16:22:53 +0200722static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200723{
724 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200725 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200726 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200727 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200728 MemoryRegionSection subsection = {
729 .offset_within_address_space = base,
730 .size = TARGET_PAGE_SIZE,
731 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200732 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200733
Avi Kivityf3705d52012-03-08 16:16:34 +0200734 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200735
Avi Kivityf3705d52012-03-08 16:16:34 +0200736 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200737 subpage = subpage_init(base);
738 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200739 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200740 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200741 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200742 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200743 }
744 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400745 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200746 subpage_register(subpage, start, end, phys_section_add(section));
747}
748
749
Avi Kivityac1970f2012-10-03 16:22:53 +0200750static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000751{
Avi Kivitya8170e52012-10-23 12:30:10 +0200752 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200753 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200754 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200755 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200756
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200757 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200758
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200759 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200760 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200761 section_index);
bellard33417e72003-08-10 21:47:01 +0000762}
763
Avi Kivityac1970f2012-10-03 16:22:53 +0200764static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200765{
Avi Kivityac1970f2012-10-03 16:22:53 +0200766 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200767 MemoryRegionSection now = *section, remain = *section;
768
769 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
770 || (now.size < TARGET_PAGE_SIZE)) {
771 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
772 - now.offset_within_address_space,
773 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200774 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200775 remain.size -= now.size;
776 remain.offset_within_address_space += now.size;
777 remain.offset_within_region += now.size;
778 }
Tyler Hall69b67642012-07-25 18:45:04 -0400779 while (remain.size >= TARGET_PAGE_SIZE) {
780 now = remain;
781 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
782 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200783 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400784 } else {
785 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200786 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400787 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200788 remain.size -= now.size;
789 remain.offset_within_address_space += now.size;
790 remain.offset_within_region += now.size;
791 }
792 now = remain;
793 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200794 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200795 }
796}
797
Sheng Yang62a27442010-01-26 19:21:16 +0800798void qemu_flush_coalesced_mmio_buffer(void)
799{
800 if (kvm_enabled())
801 kvm_flush_coalesced_mmio_buffer();
802}
803
Marcelo Tosattic9027602010-03-01 20:25:08 -0300804#if defined(__linux__) && !defined(TARGET_S390X)
805
806#include <sys/vfs.h>
807
808#define HUGETLBFS_MAGIC 0x958458f6
809
810static long gethugepagesize(const char *path)
811{
812 struct statfs fs;
813 int ret;
814
815 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900816 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300817 } while (ret != 0 && errno == EINTR);
818
819 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900820 perror(path);
821 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300822 }
823
824 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900825 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300826
827 return fs.f_bsize;
828}
829
Alex Williamson04b16652010-07-02 11:13:17 -0600830static void *file_ram_alloc(RAMBlock *block,
831 ram_addr_t memory,
832 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300833{
834 char *filename;
835 void *area;
836 int fd;
837#ifdef MAP_POPULATE
838 int flags;
839#endif
840 unsigned long hpagesize;
841
842 hpagesize = gethugepagesize(path);
843 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900844 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300845 }
846
847 if (memory < hpagesize) {
848 return NULL;
849 }
850
851 if (kvm_enabled() && !kvm_has_sync_mmu()) {
852 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
853 return NULL;
854 }
855
856 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900857 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300858 }
859
860 fd = mkstemp(filename);
861 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900862 perror("unable to create backing store for hugepages");
863 free(filename);
864 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300865 }
866 unlink(filename);
867 free(filename);
868
869 memory = (memory+hpagesize-1) & ~(hpagesize-1);
870
871 /*
872 * ftruncate is not supported by hugetlbfs in older
873 * hosts, so don't bother bailing out on errors.
874 * If anything goes wrong with it under other filesystems,
875 * mmap will fail.
876 */
877 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900878 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300879
880#ifdef MAP_POPULATE
881 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
882 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
883 * to sidestep this quirk.
884 */
885 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
886 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
887#else
888 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
889#endif
890 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900891 perror("file_ram_alloc: can't mmap RAM pages");
892 close(fd);
893 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300894 }
Alex Williamson04b16652010-07-02 11:13:17 -0600895 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300896 return area;
897}
898#endif
899
Alex Williamsond17b5282010-06-25 11:08:38 -0600900static ram_addr_t find_ram_offset(ram_addr_t size)
901{
Alex Williamson04b16652010-07-02 11:13:17 -0600902 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600903 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600904
Paolo Bonzinia3161032012-11-14 15:54:48 +0100905 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600906 return 0;
907
Paolo Bonzinia3161032012-11-14 15:54:48 +0100908 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000909 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600910
911 end = block->offset + block->length;
912
Paolo Bonzinia3161032012-11-14 15:54:48 +0100913 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600914 if (next_block->offset >= end) {
915 next = MIN(next, next_block->offset);
916 }
917 }
918 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600919 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600920 mingap = next - end;
921 }
922 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600923
924 if (offset == RAM_ADDR_MAX) {
925 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
926 (uint64_t)size);
927 abort();
928 }
929
Alex Williamson04b16652010-07-02 11:13:17 -0600930 return offset;
931}
932
Juan Quintela652d7ec2012-07-20 10:37:54 +0200933ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600934{
Alex Williamsond17b5282010-06-25 11:08:38 -0600935 RAMBlock *block;
936 ram_addr_t last = 0;
937
Paolo Bonzinia3161032012-11-14 15:54:48 +0100938 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600939 last = MAX(last, block->offset + block->length);
940
941 return last;
942}
943
Jason Baronddb97f12012-08-02 15:44:16 -0400944static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
945{
946 int ret;
947 QemuOpts *machine_opts;
948
949 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
950 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
951 if (machine_opts &&
952 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
953 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
954 if (ret) {
955 perror("qemu_madvise");
956 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
957 "but dump_guest_core=off specified\n");
958 }
959 }
960}
961
Avi Kivityc5705a72011-12-20 15:59:12 +0200962void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -0600963{
964 RAMBlock *new_block, *block;
965
Avi Kivityc5705a72011-12-20 15:59:12 +0200966 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +0100967 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200968 if (block->offset == addr) {
969 new_block = block;
970 break;
971 }
972 }
973 assert(new_block);
974 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600975
Anthony Liguori09e5ab62012-02-03 12:28:43 -0600976 if (dev) {
977 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600978 if (id) {
979 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -0500980 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600981 }
982 }
983 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
984
Paolo Bonzinia3161032012-11-14 15:54:48 +0100985 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200986 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -0600987 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
988 new_block->idstr);
989 abort();
990 }
991 }
Avi Kivityc5705a72011-12-20 15:59:12 +0200992}
993
Luiz Capitulino8490fc72012-09-05 16:50:16 -0300994static int memory_try_enable_merging(void *addr, size_t len)
995{
996 QemuOpts *opts;
997
998 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
999 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1000 /* disabled by the user */
1001 return 0;
1002 }
1003
1004 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1005}
1006
Avi Kivityc5705a72011-12-20 15:59:12 +02001007ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1008 MemoryRegion *mr)
1009{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001010 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001011
1012 size = TARGET_PAGE_ALIGN(size);
1013 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001014
Avi Kivity7c637362011-12-21 13:09:49 +02001015 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001016 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001017 if (host) {
1018 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001019 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001020 } else {
1021 if (mem_path) {
1022#if defined (__linux__) && !defined(TARGET_S390X)
1023 new_block->host = file_ram_alloc(new_block, size, mem_path);
1024 if (!new_block->host) {
1025 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001026 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001027 }
1028#else
1029 fprintf(stderr, "-mem-path option unsupported\n");
1030 exit(1);
1031#endif
1032 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001033 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001034 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001035 } else if (kvm_enabled()) {
1036 /* some s390/kvm configurations have special constraints */
1037 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001038 } else {
1039 new_block->host = qemu_vmalloc(size);
1040 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001041 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001042 }
1043 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001044 new_block->length = size;
1045
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001046 /* Keep the list sorted from biggest to smallest block. */
1047 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1048 if (block->length < new_block->length) {
1049 break;
1050 }
1051 }
1052 if (block) {
1053 QTAILQ_INSERT_BEFORE(block, new_block, next);
1054 } else {
1055 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1056 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001057 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001058
Anthony Liguori7267c092011-08-20 22:09:37 -05001059 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001060 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001061 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1062 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001063 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001064
Jason Baronddb97f12012-08-02 15:44:16 -04001065 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001066 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001067
Cam Macdonell84b89d72010-07-26 18:10:57 -06001068 if (kvm_enabled())
1069 kvm_setup_guest_memory(new_block->host, size);
1070
1071 return new_block->offset;
1072}
1073
Avi Kivityc5705a72011-12-20 15:59:12 +02001074ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001075{
Avi Kivityc5705a72011-12-20 15:59:12 +02001076 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001077}
bellarde9a1ab12007-02-08 23:08:38 +00001078
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001079void qemu_ram_free_from_ptr(ram_addr_t addr)
1080{
1081 RAMBlock *block;
1082
Paolo Bonzinia3161032012-11-14 15:54:48 +01001083 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001084 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001085 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001086 ram_list.mru_block = NULL;
Anthony Liguori7267c092011-08-20 22:09:37 -05001087 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001088 return;
1089 }
1090 }
1091}
1092
Anthony Liguoric227f092009-10-01 16:12:16 -05001093void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001094{
Alex Williamson04b16652010-07-02 11:13:17 -06001095 RAMBlock *block;
1096
Paolo Bonzinia3161032012-11-14 15:54:48 +01001097 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001098 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001099 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001100 ram_list.mru_block = NULL;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001101 if (block->flags & RAM_PREALLOC_MASK) {
1102 ;
1103 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001104#if defined (__linux__) && !defined(TARGET_S390X)
1105 if (block->fd) {
1106 munmap(block->host, block->length);
1107 close(block->fd);
1108 } else {
1109 qemu_vfree(block->host);
1110 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001111#else
1112 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001113#endif
1114 } else {
1115#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1116 munmap(block->host, block->length);
1117#else
Jan Kiszka868bb332011-06-21 22:59:09 +02001118 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001119 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001120 } else {
1121 qemu_vfree(block->host);
1122 }
Alex Williamson04b16652010-07-02 11:13:17 -06001123#endif
1124 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001125 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06001126 return;
1127 }
1128 }
1129
bellarde9a1ab12007-02-08 23:08:38 +00001130}
1131
Huang Yingcd19cfa2011-03-02 08:56:19 +01001132#ifndef _WIN32
1133void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1134{
1135 RAMBlock *block;
1136 ram_addr_t offset;
1137 int flags;
1138 void *area, *vaddr;
1139
Paolo Bonzinia3161032012-11-14 15:54:48 +01001140 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001141 offset = addr - block->offset;
1142 if (offset < block->length) {
1143 vaddr = block->host + offset;
1144 if (block->flags & RAM_PREALLOC_MASK) {
1145 ;
1146 } else {
1147 flags = MAP_FIXED;
1148 munmap(vaddr, length);
1149 if (mem_path) {
1150#if defined(__linux__) && !defined(TARGET_S390X)
1151 if (block->fd) {
1152#ifdef MAP_POPULATE
1153 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1154 MAP_PRIVATE;
1155#else
1156 flags |= MAP_PRIVATE;
1157#endif
1158 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1159 flags, block->fd, offset);
1160 } else {
1161 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1162 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1163 flags, -1, 0);
1164 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001165#else
1166 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001167#endif
1168 } else {
1169#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1170 flags |= MAP_SHARED | MAP_ANONYMOUS;
1171 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1172 flags, -1, 0);
1173#else
1174 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1175 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1176 flags, -1, 0);
1177#endif
1178 }
1179 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001180 fprintf(stderr, "Could not remap addr: "
1181 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001182 length, addr);
1183 exit(1);
1184 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001185 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001186 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001187 }
1188 return;
1189 }
1190 }
1191}
1192#endif /* !_WIN32 */
1193
pbrookdc828ca2009-04-09 22:21:07 +00001194/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001195 With the exception of the softmmu code in this file, this should
1196 only be used for local memory (e.g. video ram) that the device owns,
1197 and knows it isn't going to access beyond the end of the block.
1198
1199 It should not be used for general purpose DMA.
1200 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1201 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001202void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001203{
pbrook94a6b542009-04-11 17:15:54 +00001204 RAMBlock *block;
1205
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001206 block = ram_list.mru_block;
1207 if (block && addr - block->offset < block->length) {
1208 goto found;
1209 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001210 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001211 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001212 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001213 }
pbrook94a6b542009-04-11 17:15:54 +00001214 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001215
1216 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1217 abort();
1218
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001219found:
1220 ram_list.mru_block = block;
1221 if (xen_enabled()) {
1222 /* We need to check if the requested address is in the RAM
1223 * because we don't want to map the entire memory in QEMU.
1224 * In that case just map until the end of the page.
1225 */
1226 if (block->offset == 0) {
1227 return xen_map_cache(addr, 0, 0);
1228 } else if (block->host == NULL) {
1229 block->host =
1230 xen_map_cache(block->offset, block->length, 1);
1231 }
1232 }
1233 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001234}
1235
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001236/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1237 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1238 *
1239 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001240 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001241static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001242{
1243 RAMBlock *block;
1244
Paolo Bonzinia3161032012-11-14 15:54:48 +01001245 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001246 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001247 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001248 /* We need to check if the requested address is in the RAM
1249 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001250 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001251 */
1252 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001253 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001254 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001255 block->host =
1256 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001257 }
1258 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001259 return block->host + (addr - block->offset);
1260 }
1261 }
1262
1263 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1264 abort();
1265
1266 return NULL;
1267}
1268
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001269/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1270 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001271static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001272{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001273 if (*size == 0) {
1274 return NULL;
1275 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001276 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001277 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001278 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001279 RAMBlock *block;
1280
Paolo Bonzinia3161032012-11-14 15:54:48 +01001281 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001282 if (addr - block->offset < block->length) {
1283 if (addr - block->offset + *size > block->length)
1284 *size = block->length - addr + block->offset;
1285 return block->host + (addr - block->offset);
1286 }
1287 }
1288
1289 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1290 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001291 }
1292}
1293
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001294void qemu_put_ram_ptr(void *addr)
1295{
1296 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001297}
1298
Marcelo Tosattie8902612010-10-11 15:31:19 -03001299int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001300{
pbrook94a6b542009-04-11 17:15:54 +00001301 RAMBlock *block;
1302 uint8_t *host = ptr;
1303
Jan Kiszka868bb332011-06-21 22:59:09 +02001304 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001305 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001306 return 0;
1307 }
1308
Paolo Bonzinia3161032012-11-14 15:54:48 +01001309 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001310 /* This case append when the block is not mapped. */
1311 if (block->host == NULL) {
1312 continue;
1313 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001314 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001315 *ram_addr = block->offset + (host - block->host);
1316 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001317 }
pbrook94a6b542009-04-11 17:15:54 +00001318 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001319
Marcelo Tosattie8902612010-10-11 15:31:19 -03001320 return -1;
1321}
Alex Williamsonf471a172010-06-11 11:11:42 -06001322
Marcelo Tosattie8902612010-10-11 15:31:19 -03001323/* Some of the softmmu routines need to translate from a host pointer
1324 (typically a TLB entry) back to a ram offset. */
1325ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1326{
1327 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001328
Marcelo Tosattie8902612010-10-11 15:31:19 -03001329 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1330 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1331 abort();
1332 }
1333 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001334}
1335
Avi Kivitya8170e52012-10-23 12:30:10 +02001336static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001337 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001338{
pbrook67d3b952006-12-18 05:03:52 +00001339#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001340 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001341#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001342#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001343 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001344#endif
1345 return 0;
1346}
1347
Avi Kivitya8170e52012-10-23 12:30:10 +02001348static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001349 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001350{
1351#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001352 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001353#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001354#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001355 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001356#endif
1357}
1358
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001359static const MemoryRegionOps unassigned_mem_ops = {
1360 .read = unassigned_mem_read,
1361 .write = unassigned_mem_write,
1362 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001363};
1364
Avi Kivitya8170e52012-10-23 12:30:10 +02001365static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001366 unsigned size)
1367{
1368 abort();
1369}
1370
Avi Kivitya8170e52012-10-23 12:30:10 +02001371static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001372 uint64_t value, unsigned size)
1373{
1374 abort();
1375}
1376
1377static const MemoryRegionOps error_mem_ops = {
1378 .read = error_mem_read,
1379 .write = error_mem_write,
1380 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001381};
1382
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001383static const MemoryRegionOps rom_mem_ops = {
1384 .read = error_mem_read,
1385 .write = unassigned_mem_write,
1386 .endianness = DEVICE_NATIVE_ENDIAN,
1387};
1388
Avi Kivitya8170e52012-10-23 12:30:10 +02001389static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001390 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001391{
bellard3a7d9292005-08-21 09:26:42 +00001392 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001393 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001394 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1395#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001396 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001397 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001398#endif
1399 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001400 switch (size) {
1401 case 1:
1402 stb_p(qemu_get_ram_ptr(ram_addr), val);
1403 break;
1404 case 2:
1405 stw_p(qemu_get_ram_ptr(ram_addr), val);
1406 break;
1407 case 4:
1408 stl_p(qemu_get_ram_ptr(ram_addr), val);
1409 break;
1410 default:
1411 abort();
1412 }
bellardf23db162005-08-21 19:12:28 +00001413 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001414 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001415 /* we remove the notdirty callback only if the code has been
1416 flushed */
1417 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001418 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001419}
1420
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001421static const MemoryRegionOps notdirty_mem_ops = {
1422 .read = error_mem_read,
1423 .write = notdirty_mem_write,
1424 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001425};
1426
pbrook0f459d12008-06-09 00:20:13 +00001427/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001428static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001429{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001430 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001431 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001432 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001433 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001434 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001435
aliguori06d55cc2008-11-18 20:24:06 +00001436 if (env->watchpoint_hit) {
1437 /* We re-entered the check after replacing the TB. Now raise
1438 * the debug interrupt so that is will trigger after the
1439 * current instruction. */
1440 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1441 return;
1442 }
pbrook2e70f6e2008-06-29 01:03:05 +00001443 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001444 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001445 if ((vaddr == (wp->vaddr & len_mask) ||
1446 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001447 wp->flags |= BP_WATCHPOINT_HIT;
1448 if (!env->watchpoint_hit) {
1449 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001450 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001451 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1452 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001453 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001454 } else {
1455 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1456 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001457 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001458 }
aliguori06d55cc2008-11-18 20:24:06 +00001459 }
aliguori6e140f22008-11-18 20:37:55 +00001460 } else {
1461 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001462 }
1463 }
1464}
1465
pbrook6658ffb2007-03-16 23:58:11 +00001466/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1467 so these check for a hit then pass through to the normal out-of-line
1468 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001469static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001470 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001471{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001472 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1473 switch (size) {
1474 case 1: return ldub_phys(addr);
1475 case 2: return lduw_phys(addr);
1476 case 4: return ldl_phys(addr);
1477 default: abort();
1478 }
pbrook6658ffb2007-03-16 23:58:11 +00001479}
1480
Avi Kivitya8170e52012-10-23 12:30:10 +02001481static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001482 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001483{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001484 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1485 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001486 case 1:
1487 stb_phys(addr, val);
1488 break;
1489 case 2:
1490 stw_phys(addr, val);
1491 break;
1492 case 4:
1493 stl_phys(addr, val);
1494 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001495 default: abort();
1496 }
pbrook6658ffb2007-03-16 23:58:11 +00001497}
1498
Avi Kivity1ec9b902012-01-02 12:47:48 +02001499static const MemoryRegionOps watch_mem_ops = {
1500 .read = watch_mem_read,
1501 .write = watch_mem_write,
1502 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001503};
pbrook6658ffb2007-03-16 23:58:11 +00001504
Avi Kivitya8170e52012-10-23 12:30:10 +02001505static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001506 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001507{
Avi Kivity70c68e42012-01-02 12:32:48 +02001508 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001509 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001510 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001511#if defined(DEBUG_SUBPAGE)
1512 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1513 mmio, len, addr, idx);
1514#endif
blueswir1db7b5422007-05-26 17:36:03 +00001515
Avi Kivity5312bd82012-02-12 18:32:55 +02001516 section = &phys_sections[mmio->sub_section[idx]];
1517 addr += mmio->base;
1518 addr -= section->offset_within_address_space;
1519 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001520 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001521}
1522
Avi Kivitya8170e52012-10-23 12:30:10 +02001523static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001524 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001525{
Avi Kivity70c68e42012-01-02 12:32:48 +02001526 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001527 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001528 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001529#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001530 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1531 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001532 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001533#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001534
Avi Kivity5312bd82012-02-12 18:32:55 +02001535 section = &phys_sections[mmio->sub_section[idx]];
1536 addr += mmio->base;
1537 addr -= section->offset_within_address_space;
1538 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001539 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001540}
1541
Avi Kivity70c68e42012-01-02 12:32:48 +02001542static const MemoryRegionOps subpage_ops = {
1543 .read = subpage_read,
1544 .write = subpage_write,
1545 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001546};
1547
Avi Kivitya8170e52012-10-23 12:30:10 +02001548static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001549 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001550{
1551 ram_addr_t raddr = addr;
1552 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001553 switch (size) {
1554 case 1: return ldub_p(ptr);
1555 case 2: return lduw_p(ptr);
1556 case 4: return ldl_p(ptr);
1557 default: abort();
1558 }
Andreas Färber56384e82011-11-30 16:26:21 +01001559}
1560
Avi Kivitya8170e52012-10-23 12:30:10 +02001561static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001562 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001563{
1564 ram_addr_t raddr = addr;
1565 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001566 switch (size) {
1567 case 1: return stb_p(ptr, value);
1568 case 2: return stw_p(ptr, value);
1569 case 4: return stl_p(ptr, value);
1570 default: abort();
1571 }
Andreas Färber56384e82011-11-30 16:26:21 +01001572}
1573
Avi Kivityde712f92012-01-02 12:41:07 +02001574static const MemoryRegionOps subpage_ram_ops = {
1575 .read = subpage_ram_read,
1576 .write = subpage_ram_write,
1577 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001578};
1579
Anthony Liguoric227f092009-10-01 16:12:16 -05001580static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001581 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001582{
1583 int idx, eidx;
1584
1585 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1586 return -1;
1587 idx = SUBPAGE_IDX(start);
1588 eidx = SUBPAGE_IDX(end);
1589#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001590 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001591 mmio, start, end, idx, eidx, memory);
1592#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001593 if (memory_region_is_ram(phys_sections[section].mr)) {
1594 MemoryRegionSection new_section = phys_sections[section];
1595 new_section.mr = &io_mem_subpage_ram;
1596 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001597 }
blueswir1db7b5422007-05-26 17:36:03 +00001598 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001599 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001600 }
1601
1602 return 0;
1603}
1604
Avi Kivitya8170e52012-10-23 12:30:10 +02001605static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001606{
Anthony Liguoric227f092009-10-01 16:12:16 -05001607 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001608
Anthony Liguori7267c092011-08-20 22:09:37 -05001609 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001610
1611 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001612 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1613 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001614 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001615#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001616 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1617 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001618#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001619 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001620
1621 return mmio;
1622}
1623
Avi Kivity5312bd82012-02-12 18:32:55 +02001624static uint16_t dummy_section(MemoryRegion *mr)
1625{
1626 MemoryRegionSection section = {
1627 .mr = mr,
1628 .offset_within_address_space = 0,
1629 .offset_within_region = 0,
1630 .size = UINT64_MAX,
1631 };
1632
1633 return phys_section_add(&section);
1634}
1635
Avi Kivitya8170e52012-10-23 12:30:10 +02001636MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001637{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001638 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001639}
1640
Avi Kivitye9179ce2009-06-14 11:38:52 +03001641static void io_mem_init(void)
1642{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001643 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001644 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1645 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1646 "unassigned", UINT64_MAX);
1647 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1648 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001649 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1650 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001651 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1652 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001653}
1654
Avi Kivityac1970f2012-10-03 16:22:53 +02001655static void mem_begin(MemoryListener *listener)
1656{
1657 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1658
1659 destroy_all_mappings(d);
1660 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1661}
1662
Avi Kivity50c1e142012-02-08 21:36:02 +02001663static void core_begin(MemoryListener *listener)
1664{
Avi Kivity5312bd82012-02-12 18:32:55 +02001665 phys_sections_clear();
1666 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001667 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1668 phys_section_rom = dummy_section(&io_mem_rom);
1669 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001670}
1671
Avi Kivity1d711482012-10-02 18:54:45 +02001672static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001673{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001674 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001675
1676 /* since each CPU stores ram addresses in its TLB cache, we must
1677 reset the modified entries */
1678 /* XXX: slow ! */
1679 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1680 tlb_flush(env, 1);
1681 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001682}
1683
Avi Kivity93632742012-02-08 16:54:16 +02001684static void core_log_global_start(MemoryListener *listener)
1685{
1686 cpu_physical_memory_set_dirty_tracking(1);
1687}
1688
1689static void core_log_global_stop(MemoryListener *listener)
1690{
1691 cpu_physical_memory_set_dirty_tracking(0);
1692}
1693
Avi Kivity4855d412012-02-08 21:16:05 +02001694static void io_region_add(MemoryListener *listener,
1695 MemoryRegionSection *section)
1696{
Avi Kivitya2d33522012-03-05 17:40:12 +02001697 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1698
1699 mrio->mr = section->mr;
1700 mrio->offset = section->offset_within_region;
1701 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001702 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001703 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001704}
1705
1706static void io_region_del(MemoryListener *listener,
1707 MemoryRegionSection *section)
1708{
1709 isa_unassign_ioport(section->offset_within_address_space, section->size);
1710}
1711
Avi Kivity93632742012-02-08 16:54:16 +02001712static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001713 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001714 .log_global_start = core_log_global_start,
1715 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001716 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001717};
1718
Avi Kivity4855d412012-02-08 21:16:05 +02001719static MemoryListener io_memory_listener = {
1720 .region_add = io_region_add,
1721 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001722 .priority = 0,
1723};
1724
Avi Kivity1d711482012-10-02 18:54:45 +02001725static MemoryListener tcg_memory_listener = {
1726 .commit = tcg_commit,
1727};
1728
Avi Kivityac1970f2012-10-03 16:22:53 +02001729void address_space_init_dispatch(AddressSpace *as)
1730{
1731 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1732
1733 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1734 d->listener = (MemoryListener) {
1735 .begin = mem_begin,
1736 .region_add = mem_add,
1737 .region_nop = mem_add,
1738 .priority = 0,
1739 };
1740 as->dispatch = d;
1741 memory_listener_register(&d->listener, as);
1742}
1743
Avi Kivity83f3c252012-10-07 12:59:55 +02001744void address_space_destroy_dispatch(AddressSpace *as)
1745{
1746 AddressSpaceDispatch *d = as->dispatch;
1747
1748 memory_listener_unregister(&d->listener);
1749 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1750 g_free(d);
1751 as->dispatch = NULL;
1752}
1753
Avi Kivity62152b82011-07-26 14:26:14 +03001754static void memory_map_init(void)
1755{
Anthony Liguori7267c092011-08-20 22:09:37 -05001756 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001757 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001758 address_space_init(&address_space_memory, system_memory);
1759 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001760
Anthony Liguori7267c092011-08-20 22:09:37 -05001761 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001762 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001763 address_space_init(&address_space_io, system_io);
1764 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001765
Avi Kivityf6790af2012-10-02 20:13:51 +02001766 memory_listener_register(&core_memory_listener, &address_space_memory);
1767 memory_listener_register(&io_memory_listener, &address_space_io);
1768 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001769
1770 dma_context_init(&dma_context_memory, &address_space_memory,
1771 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001772}
1773
1774MemoryRegion *get_system_memory(void)
1775{
1776 return system_memory;
1777}
1778
Avi Kivity309cb472011-08-08 16:09:03 +03001779MemoryRegion *get_system_io(void)
1780{
1781 return system_io;
1782}
1783
pbrooke2eef172008-06-08 01:09:01 +00001784#endif /* !defined(CONFIG_USER_ONLY) */
1785
bellard13eb76e2004-01-24 15:23:36 +00001786/* physical memory access (slow version, mainly for debug) */
1787#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001788int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001789 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001790{
1791 int l, flags;
1792 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001793 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001794
1795 while (len > 0) {
1796 page = addr & TARGET_PAGE_MASK;
1797 l = (page + TARGET_PAGE_SIZE) - addr;
1798 if (l > len)
1799 l = len;
1800 flags = page_get_flags(page);
1801 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001802 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001803 if (is_write) {
1804 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001805 return -1;
bellard579a97f2007-11-11 14:26:47 +00001806 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001807 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001808 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001809 memcpy(p, buf, l);
1810 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001811 } else {
1812 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001813 return -1;
bellard579a97f2007-11-11 14:26:47 +00001814 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001815 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001816 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001817 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001818 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001819 }
1820 len -= l;
1821 buf += l;
1822 addr += l;
1823 }
Paul Brooka68fe892010-03-01 00:08:59 +00001824 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001825}
bellard8df1cd02005-01-28 22:37:22 +00001826
bellard13eb76e2004-01-24 15:23:36 +00001827#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001828
Avi Kivitya8170e52012-10-23 12:30:10 +02001829static void invalidate_and_set_dirty(hwaddr addr,
1830 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001831{
1832 if (!cpu_physical_memory_is_dirty(addr)) {
1833 /* invalidate code */
1834 tb_invalidate_phys_page_range(addr, addr + length, 0);
1835 /* set dirty bit */
1836 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1837 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001838 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001839}
1840
Avi Kivitya8170e52012-10-23 12:30:10 +02001841void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001842 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001843{
Avi Kivityac1970f2012-10-03 16:22:53 +02001844 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001845 int l;
bellard13eb76e2004-01-24 15:23:36 +00001846 uint8_t *ptr;
1847 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001848 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001849 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001850
bellard13eb76e2004-01-24 15:23:36 +00001851 while (len > 0) {
1852 page = addr & TARGET_PAGE_MASK;
1853 l = (page + TARGET_PAGE_SIZE) - addr;
1854 if (l > len)
1855 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001856 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001857
bellard13eb76e2004-01-24 15:23:36 +00001858 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001859 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001860 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001861 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001862 /* XXX: could force cpu_single_env to NULL to avoid
1863 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001864 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001865 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001866 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001867 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001868 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001869 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001870 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001871 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001872 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001873 l = 2;
1874 } else {
bellard1c213d12005-09-03 10:49:04 +00001875 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001876 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001877 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001878 l = 1;
1879 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001880 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001881 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001882 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001883 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001884 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001885 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001886 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001887 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001888 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001889 }
1890 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001891 if (!(memory_region_is_ram(section->mr) ||
1892 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001893 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001894 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001895 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001896 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001897 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001898 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001899 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001900 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001901 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001902 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001903 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001904 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001905 l = 2;
1906 } else {
bellard1c213d12005-09-03 10:49:04 +00001907 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001908 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001909 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001910 l = 1;
1911 }
1912 } else {
1913 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001914 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001915 + memory_region_section_addr(section,
1916 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001917 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001918 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001919 }
1920 }
1921 len -= l;
1922 buf += l;
1923 addr += l;
1924 }
1925}
bellard8df1cd02005-01-28 22:37:22 +00001926
Avi Kivitya8170e52012-10-23 12:30:10 +02001927void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001928 const uint8_t *buf, int len)
1929{
1930 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1931}
1932
1933/**
1934 * address_space_read: read from an address space.
1935 *
1936 * @as: #AddressSpace to be accessed
1937 * @addr: address within that address space
1938 * @buf: buffer with the data transferred
1939 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001940void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001941{
1942 address_space_rw(as, addr, buf, len, false);
1943}
1944
1945
Avi Kivitya8170e52012-10-23 12:30:10 +02001946void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001947 int len, int is_write)
1948{
1949 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1950}
1951
bellardd0ecd2a2006-04-23 17:14:48 +00001952/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001953void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001954 const uint8_t *buf, int len)
1955{
Avi Kivityac1970f2012-10-03 16:22:53 +02001956 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00001957 int l;
1958 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02001959 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001960 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001961
bellardd0ecd2a2006-04-23 17:14:48 +00001962 while (len > 0) {
1963 page = addr & TARGET_PAGE_MASK;
1964 l = (page + TARGET_PAGE_SIZE) - addr;
1965 if (l > len)
1966 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001967 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001968
Blue Swirlcc5bea62012-04-14 14:56:48 +00001969 if (!(memory_region_is_ram(section->mr) ||
1970 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00001971 /* do nothing */
1972 } else {
1973 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001974 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001975 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00001976 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001977 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00001978 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001979 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001980 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00001981 }
1982 len -= l;
1983 buf += l;
1984 addr += l;
1985 }
1986}
1987
aliguori6d16c2f2009-01-22 16:59:11 +00001988typedef struct {
1989 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02001990 hwaddr addr;
1991 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00001992} BounceBuffer;
1993
1994static BounceBuffer bounce;
1995
aliguoriba223c22009-01-22 16:59:16 +00001996typedef struct MapClient {
1997 void *opaque;
1998 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00001999 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002000} MapClient;
2001
Blue Swirl72cf2d42009-09-12 07:36:22 +00002002static QLIST_HEAD(map_client_list, MapClient) map_client_list
2003 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002004
2005void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2006{
Anthony Liguori7267c092011-08-20 22:09:37 -05002007 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002008
2009 client->opaque = opaque;
2010 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002011 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002012 return client;
2013}
2014
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002015static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002016{
2017 MapClient *client = (MapClient *)_client;
2018
Blue Swirl72cf2d42009-09-12 07:36:22 +00002019 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002020 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002021}
2022
2023static void cpu_notify_map_clients(void)
2024{
2025 MapClient *client;
2026
Blue Swirl72cf2d42009-09-12 07:36:22 +00002027 while (!QLIST_EMPTY(&map_client_list)) {
2028 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002029 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002030 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002031 }
2032}
2033
aliguori6d16c2f2009-01-22 16:59:11 +00002034/* Map a physical memory region into a host virtual address.
2035 * May map a subset of the requested range, given by and returned in *plen.
2036 * May return NULL if resources needed to perform the mapping are exhausted.
2037 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002038 * Use cpu_register_map_client() to know when retrying the map operation is
2039 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002040 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002041void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002042 hwaddr addr,
2043 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002044 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002045{
Avi Kivityac1970f2012-10-03 16:22:53 +02002046 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002047 hwaddr len = *plen;
2048 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002049 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002050 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002051 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002052 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002053 ram_addr_t rlen;
2054 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002055
2056 while (len > 0) {
2057 page = addr & TARGET_PAGE_MASK;
2058 l = (page + TARGET_PAGE_SIZE) - addr;
2059 if (l > len)
2060 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002061 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002062
Avi Kivityf3705d52012-03-08 16:16:34 +02002063 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002064 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002065 break;
2066 }
2067 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2068 bounce.addr = addr;
2069 bounce.len = l;
2070 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002071 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002072 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002073
2074 *plen = l;
2075 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002076 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002077 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002078 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002079 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002080 }
aliguori6d16c2f2009-01-22 16:59:11 +00002081
2082 len -= l;
2083 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002084 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002085 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002086 rlen = todo;
2087 ret = qemu_ram_ptr_length(raddr, &rlen);
2088 *plen = rlen;
2089 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002090}
2091
Avi Kivityac1970f2012-10-03 16:22:53 +02002092/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002093 * Will also mark the memory as dirty if is_write == 1. access_len gives
2094 * the amount of memory that was actually read or written by the caller.
2095 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002096void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2097 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002098{
2099 if (buffer != bounce.buffer) {
2100 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002101 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002102 while (access_len) {
2103 unsigned l;
2104 l = TARGET_PAGE_SIZE;
2105 if (l > access_len)
2106 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002107 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002108 addr1 += l;
2109 access_len -= l;
2110 }
2111 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002112 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002113 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002114 }
aliguori6d16c2f2009-01-22 16:59:11 +00002115 return;
2116 }
2117 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002118 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002119 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002120 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002121 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002122 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002123}
bellardd0ecd2a2006-04-23 17:14:48 +00002124
Avi Kivitya8170e52012-10-23 12:30:10 +02002125void *cpu_physical_memory_map(hwaddr addr,
2126 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002127 int is_write)
2128{
2129 return address_space_map(&address_space_memory, addr, plen, is_write);
2130}
2131
Avi Kivitya8170e52012-10-23 12:30:10 +02002132void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2133 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002134{
2135 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2136}
2137
bellard8df1cd02005-01-28 22:37:22 +00002138/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002139static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002140 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002141{
bellard8df1cd02005-01-28 22:37:22 +00002142 uint8_t *ptr;
2143 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002144 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002145
Avi Kivityac1970f2012-10-03 16:22:53 +02002146 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002147
Blue Swirlcc5bea62012-04-14 14:56:48 +00002148 if (!(memory_region_is_ram(section->mr) ||
2149 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002150 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002151 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002152 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002153#if defined(TARGET_WORDS_BIGENDIAN)
2154 if (endian == DEVICE_LITTLE_ENDIAN) {
2155 val = bswap32(val);
2156 }
2157#else
2158 if (endian == DEVICE_BIG_ENDIAN) {
2159 val = bswap32(val);
2160 }
2161#endif
bellard8df1cd02005-01-28 22:37:22 +00002162 } else {
2163 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002164 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002165 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002166 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002167 switch (endian) {
2168 case DEVICE_LITTLE_ENDIAN:
2169 val = ldl_le_p(ptr);
2170 break;
2171 case DEVICE_BIG_ENDIAN:
2172 val = ldl_be_p(ptr);
2173 break;
2174 default:
2175 val = ldl_p(ptr);
2176 break;
2177 }
bellard8df1cd02005-01-28 22:37:22 +00002178 }
2179 return val;
2180}
2181
Avi Kivitya8170e52012-10-23 12:30:10 +02002182uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002183{
2184 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2185}
2186
Avi Kivitya8170e52012-10-23 12:30:10 +02002187uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002188{
2189 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2190}
2191
Avi Kivitya8170e52012-10-23 12:30:10 +02002192uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002193{
2194 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2195}
2196
bellard84b7b8e2005-11-28 21:19:04 +00002197/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002198static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002199 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002200{
bellard84b7b8e2005-11-28 21:19:04 +00002201 uint8_t *ptr;
2202 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002203 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002204
Avi Kivityac1970f2012-10-03 16:22:53 +02002205 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002206
Blue Swirlcc5bea62012-04-14 14:56:48 +00002207 if (!(memory_region_is_ram(section->mr) ||
2208 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002209 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002210 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002211
2212 /* XXX This is broken when device endian != cpu endian.
2213 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002214#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002215 val = io_mem_read(section->mr, addr, 4) << 32;
2216 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002217#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002218 val = io_mem_read(section->mr, addr, 4);
2219 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002220#endif
2221 } else {
2222 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002223 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002224 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002225 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002226 switch (endian) {
2227 case DEVICE_LITTLE_ENDIAN:
2228 val = ldq_le_p(ptr);
2229 break;
2230 case DEVICE_BIG_ENDIAN:
2231 val = ldq_be_p(ptr);
2232 break;
2233 default:
2234 val = ldq_p(ptr);
2235 break;
2236 }
bellard84b7b8e2005-11-28 21:19:04 +00002237 }
2238 return val;
2239}
2240
Avi Kivitya8170e52012-10-23 12:30:10 +02002241uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002242{
2243 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2244}
2245
Avi Kivitya8170e52012-10-23 12:30:10 +02002246uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002247{
2248 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2249}
2250
Avi Kivitya8170e52012-10-23 12:30:10 +02002251uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002252{
2253 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2254}
2255
bellardaab33092005-10-30 20:48:42 +00002256/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002257uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002258{
2259 uint8_t val;
2260 cpu_physical_memory_read(addr, &val, 1);
2261 return val;
2262}
2263
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002264/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002265static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002266 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002267{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002268 uint8_t *ptr;
2269 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002270 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002271
Avi Kivityac1970f2012-10-03 16:22:53 +02002272 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002273
Blue Swirlcc5bea62012-04-14 14:56:48 +00002274 if (!(memory_region_is_ram(section->mr) ||
2275 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002276 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002277 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002278 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002279#if defined(TARGET_WORDS_BIGENDIAN)
2280 if (endian == DEVICE_LITTLE_ENDIAN) {
2281 val = bswap16(val);
2282 }
2283#else
2284 if (endian == DEVICE_BIG_ENDIAN) {
2285 val = bswap16(val);
2286 }
2287#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002288 } else {
2289 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002290 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002291 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002292 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002293 switch (endian) {
2294 case DEVICE_LITTLE_ENDIAN:
2295 val = lduw_le_p(ptr);
2296 break;
2297 case DEVICE_BIG_ENDIAN:
2298 val = lduw_be_p(ptr);
2299 break;
2300 default:
2301 val = lduw_p(ptr);
2302 break;
2303 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002304 }
2305 return val;
bellardaab33092005-10-30 20:48:42 +00002306}
2307
Avi Kivitya8170e52012-10-23 12:30:10 +02002308uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002309{
2310 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2311}
2312
Avi Kivitya8170e52012-10-23 12:30:10 +02002313uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002314{
2315 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2316}
2317
Avi Kivitya8170e52012-10-23 12:30:10 +02002318uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002319{
2320 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2321}
2322
bellard8df1cd02005-01-28 22:37:22 +00002323/* warning: addr must be aligned. The ram page is not masked as dirty
2324 and the code inside is not invalidated. It is useful if the dirty
2325 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002326void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002327{
bellard8df1cd02005-01-28 22:37:22 +00002328 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002329 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002330
Avi Kivityac1970f2012-10-03 16:22:53 +02002331 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002332
Avi Kivityf3705d52012-03-08 16:16:34 +02002333 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002334 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002335 if (memory_region_is_ram(section->mr)) {
2336 section = &phys_sections[phys_section_rom];
2337 }
2338 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002339 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002340 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002341 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002342 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002343 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002344 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002345
2346 if (unlikely(in_migration)) {
2347 if (!cpu_physical_memory_is_dirty(addr1)) {
2348 /* invalidate code */
2349 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2350 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002351 cpu_physical_memory_set_dirty_flags(
2352 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002353 }
2354 }
bellard8df1cd02005-01-28 22:37:22 +00002355 }
2356}
2357
Avi Kivitya8170e52012-10-23 12:30:10 +02002358void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00002359{
j_mayerbc98a7e2007-04-04 07:55:12 +00002360 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002361 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00002362
Avi Kivityac1970f2012-10-03 16:22:53 +02002363 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002364
Avi Kivityf3705d52012-03-08 16:16:34 +02002365 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002366 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002367 if (memory_region_is_ram(section->mr)) {
2368 section = &phys_sections[phys_section_rom];
2369 }
j_mayerbc98a7e2007-04-04 07:55:12 +00002370#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002371 io_mem_write(section->mr, addr, val >> 32, 4);
2372 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002373#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002374 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2375 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002376#endif
2377 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002378 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002379 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002380 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00002381 stq_p(ptr, val);
2382 }
2383}
2384
bellard8df1cd02005-01-28 22:37:22 +00002385/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002386static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002387 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002388{
bellard8df1cd02005-01-28 22:37:22 +00002389 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002390 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002391
Avi Kivityac1970f2012-10-03 16:22:53 +02002392 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002393
Avi Kivityf3705d52012-03-08 16:16:34 +02002394 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002395 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002396 if (memory_region_is_ram(section->mr)) {
2397 section = &phys_sections[phys_section_rom];
2398 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002399#if defined(TARGET_WORDS_BIGENDIAN)
2400 if (endian == DEVICE_LITTLE_ENDIAN) {
2401 val = bswap32(val);
2402 }
2403#else
2404 if (endian == DEVICE_BIG_ENDIAN) {
2405 val = bswap32(val);
2406 }
2407#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002408 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002409 } else {
2410 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002411 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002412 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002413 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002414 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002415 switch (endian) {
2416 case DEVICE_LITTLE_ENDIAN:
2417 stl_le_p(ptr, val);
2418 break;
2419 case DEVICE_BIG_ENDIAN:
2420 stl_be_p(ptr, val);
2421 break;
2422 default:
2423 stl_p(ptr, val);
2424 break;
2425 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002426 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002427 }
2428}
2429
Avi Kivitya8170e52012-10-23 12:30:10 +02002430void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002431{
2432 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2433}
2434
Avi Kivitya8170e52012-10-23 12:30:10 +02002435void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002436{
2437 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2438}
2439
Avi Kivitya8170e52012-10-23 12:30:10 +02002440void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002441{
2442 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2443}
2444
bellardaab33092005-10-30 20:48:42 +00002445/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002446void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002447{
2448 uint8_t v = val;
2449 cpu_physical_memory_write(addr, &v, 1);
2450}
2451
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002452/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002453static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002454 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002455{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002456 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002457 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002458
Avi Kivityac1970f2012-10-03 16:22:53 +02002459 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002460
Avi Kivityf3705d52012-03-08 16:16:34 +02002461 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002462 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002463 if (memory_region_is_ram(section->mr)) {
2464 section = &phys_sections[phys_section_rom];
2465 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002466#if defined(TARGET_WORDS_BIGENDIAN)
2467 if (endian == DEVICE_LITTLE_ENDIAN) {
2468 val = bswap16(val);
2469 }
2470#else
2471 if (endian == DEVICE_BIG_ENDIAN) {
2472 val = bswap16(val);
2473 }
2474#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002475 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002476 } else {
2477 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002478 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002479 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002480 /* RAM case */
2481 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002482 switch (endian) {
2483 case DEVICE_LITTLE_ENDIAN:
2484 stw_le_p(ptr, val);
2485 break;
2486 case DEVICE_BIG_ENDIAN:
2487 stw_be_p(ptr, val);
2488 break;
2489 default:
2490 stw_p(ptr, val);
2491 break;
2492 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002493 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002494 }
bellardaab33092005-10-30 20:48:42 +00002495}
2496
Avi Kivitya8170e52012-10-23 12:30:10 +02002497void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002498{
2499 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2500}
2501
Avi Kivitya8170e52012-10-23 12:30:10 +02002502void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002503{
2504 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2505}
2506
Avi Kivitya8170e52012-10-23 12:30:10 +02002507void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002508{
2509 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2510}
2511
bellardaab33092005-10-30 20:48:42 +00002512/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002513void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002514{
2515 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002516 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002517}
2518
Avi Kivitya8170e52012-10-23 12:30:10 +02002519void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002520{
2521 val = cpu_to_le64(val);
2522 cpu_physical_memory_write(addr, &val, 8);
2523}
2524
Avi Kivitya8170e52012-10-23 12:30:10 +02002525void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002526{
2527 val = cpu_to_be64(val);
2528 cpu_physical_memory_write(addr, &val, 8);
2529}
2530
aliguori5e2972f2009-03-28 17:51:36 +00002531/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002532int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002533 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002534{
2535 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002536 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002537 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002538
2539 while (len > 0) {
2540 page = addr & TARGET_PAGE_MASK;
2541 phys_addr = cpu_get_phys_page_debug(env, page);
2542 /* if no physical page mapped, return an error */
2543 if (phys_addr == -1)
2544 return -1;
2545 l = (page + TARGET_PAGE_SIZE) - addr;
2546 if (l > len)
2547 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002548 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002549 if (is_write)
2550 cpu_physical_memory_write_rom(phys_addr, buf, l);
2551 else
aliguori5e2972f2009-03-28 17:51:36 +00002552 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002553 len -= l;
2554 buf += l;
2555 addr += l;
2556 }
2557 return 0;
2558}
Paul Brooka68fe892010-03-01 00:08:59 +00002559#endif
bellard13eb76e2004-01-24 15:23:36 +00002560
Paul Brookb3755a92010-03-12 16:54:58 +00002561#if !defined(CONFIG_USER_ONLY)
2562
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00002563/*
2564 * A helper function for the _utterly broken_ virtio device model to find out if
2565 * it's running on a big endian machine. Don't do this at home kids!
2566 */
2567bool virtio_is_big_endian(void);
2568bool virtio_is_big_endian(void)
2569{
2570#if defined(TARGET_WORDS_BIGENDIAN)
2571 return true;
2572#else
2573 return false;
2574#endif
2575}
2576
bellard61382a52003-10-27 21:22:23 +00002577#endif
Wen Congyang76f35532012-05-07 12:04:18 +08002578
2579#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002580bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002581{
2582 MemoryRegionSection *section;
2583
Avi Kivityac1970f2012-10-03 16:22:53 +02002584 section = phys_page_find(address_space_memory.dispatch,
2585 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002586
2587 return !(memory_region_is_ram(section->mr) ||
2588 memory_region_is_romd(section->mr));
2589}
2590#endif