blob: 78bae1c7acb984606221b531c2b2777f1fe1998f [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzini85d59fe2011-08-12 13:18:14 +020060RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020070static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
81int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200190 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 goto not_found;
195 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200198 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200199
Avi Kivityc19e8802012-02-13 20:25:31 +0200200 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200201not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200202 return &phys_sections[s_index];
203}
204
Blue Swirle5548612012-04-21 13:08:33 +0000205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
210}
bellard9fa3e852004-01-04 18:06:42 +0000211#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000212
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200213void cpu_exec_init_all(void)
214{
215#if !defined(CONFIG_USER_ONLY)
216 memory_map_init();
217 io_mem_init();
218#endif
219}
220
pbrook9656f322008-07-01 20:01:19 +0000221#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
222
Juan Quintelae59fb372009-09-29 22:48:21 +0200223static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200224{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100225 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200226
aurel323098dba2009-03-07 21:28:24 +0000227 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
228 version_id is increased. */
229 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000230 tlb_flush(env, 1);
231
232 return 0;
233}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200234
235static const VMStateDescription vmstate_cpu_common = {
236 .name = "cpu_common",
237 .version_id = 1,
238 .minimum_version_id = 1,
239 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200240 .post_load = cpu_common_post_load,
241 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100242 VMSTATE_UINT32(halted, CPUArchState),
243 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200244 VMSTATE_END_OF_LIST()
245 }
246};
pbrook9656f322008-07-01 20:01:19 +0000247#endif
248
Andreas Färber9349b4f2012-03-14 01:38:32 +0100249CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400250{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100251 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400252
253 while (env) {
254 if (env->cpu_index == cpu)
255 break;
256 env = env->next_cpu;
257 }
258
259 return env;
260}
261
Andreas Färber9349b4f2012-03-14 01:38:32 +0100262void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000263{
Andreas Färber9f09e182012-05-03 06:59:07 +0200264#ifndef CONFIG_USER_ONLY
265 CPUState *cpu = ENV_GET_CPU(env);
266#endif
Andreas Färber9349b4f2012-03-14 01:38:32 +0100267 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000268 int cpu_index;
269
pbrookc2764712009-03-07 15:24:59 +0000270#if defined(CONFIG_USER_ONLY)
271 cpu_list_lock();
272#endif
bellard6a00d602005-11-21 23:25:50 +0000273 env->next_cpu = NULL;
274 penv = &first_cpu;
275 cpu_index = 0;
276 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700277 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000278 cpu_index++;
279 }
280 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000281 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000282 QTAILQ_INIT(&env->breakpoints);
283 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100284#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200285 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100286#endif
bellard6a00d602005-11-21 23:25:50 +0000287 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000288#if defined(CONFIG_USER_ONLY)
289 cpu_list_unlock();
290#endif
pbrookb3c77242008-06-30 16:31:04 +0000291#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600292 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
293 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000294 cpu_save, cpu_load, env);
295#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000296}
297
bellard1fddef42005-04-17 19:16:13 +0000298#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000299#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100300static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000301{
302 tb_invalidate_phys_page_range(pc, pc + 1, 0);
303}
304#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400305static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
306{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400307 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
308 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400309}
bellardc27004e2005-01-03 23:35:10 +0000310#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000311#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000312
Paul Brookc527ee82010-03-01 03:31:14 +0000313#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100314void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000315
316{
317}
318
Andreas Färber9349b4f2012-03-14 01:38:32 +0100319int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000320 int flags, CPUWatchpoint **watchpoint)
321{
322 return -ENOSYS;
323}
324#else
pbrook6658ffb2007-03-16 23:58:11 +0000325/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100326int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000327 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000328{
aliguorib4051332008-11-18 20:14:20 +0000329 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000330 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000331
aliguorib4051332008-11-18 20:14:20 +0000332 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400333 if ((len & (len - 1)) || (addr & ~len_mask) ||
334 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000335 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
336 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
337 return -EINVAL;
338 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500339 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000340
aliguoria1d1bb32008-11-18 20:07:32 +0000341 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000342 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000343 wp->flags = flags;
344
aliguori2dc9f412008-11-18 20:56:59 +0000345 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000346 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000347 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000348 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000349 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000350
pbrook6658ffb2007-03-16 23:58:11 +0000351 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000352
353 if (watchpoint)
354 *watchpoint = wp;
355 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000356}
357
aliguoria1d1bb32008-11-18 20:07:32 +0000358/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100359int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000360 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000361{
aliguorib4051332008-11-18 20:14:20 +0000362 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000363 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000364
Blue Swirl72cf2d42009-09-12 07:36:22 +0000365 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000366 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000367 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000368 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000369 return 0;
370 }
371 }
aliguoria1d1bb32008-11-18 20:07:32 +0000372 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000373}
374
aliguoria1d1bb32008-11-18 20:07:32 +0000375/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100376void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000377{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000378 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000379
aliguoria1d1bb32008-11-18 20:07:32 +0000380 tlb_flush_page(env, watchpoint->vaddr);
381
Anthony Liguori7267c092011-08-20 22:09:37 -0500382 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000383}
384
aliguoria1d1bb32008-11-18 20:07:32 +0000385/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100386void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000387{
aliguoric0ce9982008-11-25 22:13:57 +0000388 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000389
Blue Swirl72cf2d42009-09-12 07:36:22 +0000390 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000391 if (wp->flags & mask)
392 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000393 }
aliguoria1d1bb32008-11-18 20:07:32 +0000394}
Paul Brookc527ee82010-03-01 03:31:14 +0000395#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000396
397/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100398int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000399 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000400{
bellard1fddef42005-04-17 19:16:13 +0000401#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000402 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000403
Anthony Liguori7267c092011-08-20 22:09:37 -0500404 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000405
406 bp->pc = pc;
407 bp->flags = flags;
408
aliguori2dc9f412008-11-18 20:56:59 +0000409 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000410 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000411 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000412 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000413 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000414
415 breakpoint_invalidate(env, pc);
416
417 if (breakpoint)
418 *breakpoint = bp;
419 return 0;
420#else
421 return -ENOSYS;
422#endif
423}
424
425/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100426int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000427{
428#if defined(TARGET_HAS_ICE)
429 CPUBreakpoint *bp;
430
Blue Swirl72cf2d42009-09-12 07:36:22 +0000431 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000432 if (bp->pc == pc && bp->flags == flags) {
433 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000434 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000435 }
bellard4c3a88a2003-07-26 12:06:08 +0000436 }
aliguoria1d1bb32008-11-18 20:07:32 +0000437 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000438#else
aliguoria1d1bb32008-11-18 20:07:32 +0000439 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000440#endif
441}
442
aliguoria1d1bb32008-11-18 20:07:32 +0000443/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100444void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000445{
bellard1fddef42005-04-17 19:16:13 +0000446#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000447 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000448
aliguoria1d1bb32008-11-18 20:07:32 +0000449 breakpoint_invalidate(env, breakpoint->pc);
450
Anthony Liguori7267c092011-08-20 22:09:37 -0500451 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000452#endif
453}
454
455/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100456void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000457{
458#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000459 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000460
Blue Swirl72cf2d42009-09-12 07:36:22 +0000461 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000462 if (bp->flags & mask)
463 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000464 }
bellard4c3a88a2003-07-26 12:06:08 +0000465#endif
466}
467
bellardc33a3462003-07-29 20:50:33 +0000468/* enable or disable single step mode. EXCP_DEBUG is returned by the
469 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100470void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000471{
bellard1fddef42005-04-17 19:16:13 +0000472#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000473 if (env->singlestep_enabled != enabled) {
474 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000475 if (kvm_enabled())
476 kvm_update_guest_debug(env, 0);
477 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100478 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000479 /* XXX: only flush what is necessary */
480 tb_flush(env);
481 }
bellardc33a3462003-07-29 20:50:33 +0000482 }
483#endif
484}
485
Andreas Färber9349b4f2012-03-14 01:38:32 +0100486void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +0000487{
488 env->interrupt_request &= ~mask;
489}
490
Andreas Färber9349b4f2012-03-14 01:38:32 +0100491void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000492{
493 env->exit_request = 1;
494 cpu_unlink_tb(env);
495}
496
Andreas Färber9349b4f2012-03-14 01:38:32 +0100497void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000498{
499 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000500 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000501
502 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000503 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000504 fprintf(stderr, "qemu: fatal: ");
505 vfprintf(stderr, fmt, ap);
506 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100507 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000508 if (qemu_log_enabled()) {
509 qemu_log("qemu: fatal: ");
510 qemu_log_vprintf(fmt, ap2);
511 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100512 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000513 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000514 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000515 }
pbrook493ae1f2007-11-23 16:53:59 +0000516 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000517 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200518#if defined(CONFIG_USER_ONLY)
519 {
520 struct sigaction act;
521 sigfillset(&act.sa_mask);
522 act.sa_handler = SIG_DFL;
523 sigaction(SIGABRT, &act, NULL);
524 }
525#endif
bellard75012672003-06-21 13:11:07 +0000526 abort();
527}
528
Andreas Färber9349b4f2012-03-14 01:38:32 +0100529CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000530{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100531 CPUArchState *new_env = cpu_init(env->cpu_model_str);
532 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +0000533 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +0000534#if defined(TARGET_HAS_ICE)
535 CPUBreakpoint *bp;
536 CPUWatchpoint *wp;
537#endif
538
Andreas Färber9349b4f2012-03-14 01:38:32 +0100539 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000540
541 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +0000542 new_env->next_cpu = next_cpu;
543 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +0000544
545 /* Clone all break/watchpoints.
546 Note: Once we support ptrace with hw-debug register access, make sure
547 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000548 QTAILQ_INIT(&env->breakpoints);
549 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000550#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000551 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000552 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
553 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000554 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000555 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
556 wp->flags, NULL);
557 }
558#endif
559
thsc5be9f02007-02-28 20:20:53 +0000560 return new_env;
561}
562
bellard01243112004-01-04 15:48:17 +0000563#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200564static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
565 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000566{
Juan Quintelad24981d2012-05-22 00:42:40 +0200567 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000568
bellard1ccde1c2004-02-06 19:46:14 +0000569 /* we modify the TLB cache so that the dirty bit will be set again
570 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200571 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200572 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000573 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200574 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000575 != (end - 1) - start) {
576 abort();
577 }
Blue Swirle5548612012-04-21 13:08:33 +0000578 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200579
580}
581
582/* Note: start and end must be within the same ram block. */
583void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
584 int dirty_flags)
585{
586 uintptr_t length;
587
588 start &= TARGET_PAGE_MASK;
589 end = TARGET_PAGE_ALIGN(end);
590
591 length = end - start;
592 if (length == 0)
593 return;
594 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
595
596 if (tcg_enabled()) {
597 tlb_reset_dirty_range_all(start, end, length);
598 }
bellard1ccde1c2004-02-06 19:46:14 +0000599}
600
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000601static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000602{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200603 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000604 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200605 return ret;
aliguori74576192008-10-06 14:02:03 +0000606}
607
Avi Kivitya8170e52012-10-23 12:30:10 +0200608hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000609 MemoryRegionSection *section,
610 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200611 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000612 int prot,
613 target_ulong *address)
614{
Avi Kivitya8170e52012-10-23 12:30:10 +0200615 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000616 CPUWatchpoint *wp;
617
Blue Swirlcc5bea62012-04-14 14:56:48 +0000618 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000619 /* Normal RAM. */
620 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000621 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000622 if (!section->readonly) {
623 iotlb |= phys_section_notdirty;
624 } else {
625 iotlb |= phys_section_rom;
626 }
627 } else {
628 /* IO handlers are currently passed a physical address.
629 It would be nice to pass an offset from the base address
630 of that region. This would avoid having to special case RAM,
631 and avoid full address decoding in every device.
632 We can't use the high bits of pd for this because
633 IO_MEM_ROMD uses these as a ram address. */
634 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000635 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000636 }
637
638 /* Make accesses to pages with watchpoints go via the
639 watchpoint trap routines. */
640 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
641 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
642 /* Avoid trapping reads of pages with a write breakpoint. */
643 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
644 iotlb = phys_section_watch + paddr;
645 *address |= TLB_MMIO;
646 break;
647 }
648 }
649 }
650
651 return iotlb;
652}
bellard9fa3e852004-01-04 18:06:42 +0000653#endif /* defined(CONFIG_USER_ONLY) */
654
pbrooke2eef172008-06-08 01:09:01 +0000655#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000656
Paul Brookc04b2b72010-03-01 03:31:14 +0000657#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
658typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200659 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200660 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200661 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000662} subpage_t;
663
Anthony Liguoric227f092009-10-01 16:12:16 -0500664static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200665 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200666static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200667static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200668{
Avi Kivity5312bd82012-02-12 18:32:55 +0200669 MemoryRegionSection *section = &phys_sections[section_index];
670 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200671
672 if (mr->subpage) {
673 subpage_t *subpage = container_of(mr, subpage_t, iomem);
674 memory_region_destroy(&subpage->iomem);
675 g_free(subpage);
676 }
677}
678
Avi Kivity4346ae32012-02-10 17:00:01 +0200679static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200680{
681 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200682 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200683
Avi Kivityc19e8802012-02-13 20:25:31 +0200684 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200685 return;
686 }
687
Avi Kivityc19e8802012-02-13 20:25:31 +0200688 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200689 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200690 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200691 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200692 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200693 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200694 }
Avi Kivity54688b12012-02-09 17:34:32 +0200695 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200696 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200697 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200698}
699
Avi Kivityac1970f2012-10-03 16:22:53 +0200700static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200701{
Avi Kivityac1970f2012-10-03 16:22:53 +0200702 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200703 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200704}
705
Avi Kivity5312bd82012-02-12 18:32:55 +0200706static uint16_t phys_section_add(MemoryRegionSection *section)
707{
708 if (phys_sections_nb == phys_sections_nb_alloc) {
709 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
710 phys_sections = g_renew(MemoryRegionSection, phys_sections,
711 phys_sections_nb_alloc);
712 }
713 phys_sections[phys_sections_nb] = *section;
714 return phys_sections_nb++;
715}
716
717static void phys_sections_clear(void)
718{
719 phys_sections_nb = 0;
720}
721
Avi Kivityac1970f2012-10-03 16:22:53 +0200722static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200723{
724 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200725 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200726 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200727 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200728 MemoryRegionSection subsection = {
729 .offset_within_address_space = base,
730 .size = TARGET_PAGE_SIZE,
731 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200732 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200733
Avi Kivityf3705d52012-03-08 16:16:34 +0200734 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200735
Avi Kivityf3705d52012-03-08 16:16:34 +0200736 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200737 subpage = subpage_init(base);
738 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200739 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200740 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200741 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200742 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200743 }
744 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400745 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200746 subpage_register(subpage, start, end, phys_section_add(section));
747}
748
749
Avi Kivityac1970f2012-10-03 16:22:53 +0200750static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000751{
Avi Kivitya8170e52012-10-23 12:30:10 +0200752 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200753 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200754 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200755 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200756
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200757 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200758
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200759 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200760 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200761 section_index);
bellard33417e72003-08-10 21:47:01 +0000762}
763
Avi Kivityac1970f2012-10-03 16:22:53 +0200764static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200765{
Avi Kivityac1970f2012-10-03 16:22:53 +0200766 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200767 MemoryRegionSection now = *section, remain = *section;
768
769 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
770 || (now.size < TARGET_PAGE_SIZE)) {
771 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
772 - now.offset_within_address_space,
773 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200774 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200775 remain.size -= now.size;
776 remain.offset_within_address_space += now.size;
777 remain.offset_within_region += now.size;
778 }
Tyler Hall69b67642012-07-25 18:45:04 -0400779 while (remain.size >= TARGET_PAGE_SIZE) {
780 now = remain;
781 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
782 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200783 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400784 } else {
785 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200786 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400787 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200788 remain.size -= now.size;
789 remain.offset_within_address_space += now.size;
790 remain.offset_within_region += now.size;
791 }
792 now = remain;
793 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200794 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200795 }
796}
797
Sheng Yang62a27442010-01-26 19:21:16 +0800798void qemu_flush_coalesced_mmio_buffer(void)
799{
800 if (kvm_enabled())
801 kvm_flush_coalesced_mmio_buffer();
802}
803
Marcelo Tosattic9027602010-03-01 20:25:08 -0300804#if defined(__linux__) && !defined(TARGET_S390X)
805
806#include <sys/vfs.h>
807
808#define HUGETLBFS_MAGIC 0x958458f6
809
810static long gethugepagesize(const char *path)
811{
812 struct statfs fs;
813 int ret;
814
815 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900816 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300817 } while (ret != 0 && errno == EINTR);
818
819 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900820 perror(path);
821 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300822 }
823
824 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900825 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300826
827 return fs.f_bsize;
828}
829
Alex Williamson04b16652010-07-02 11:13:17 -0600830static void *file_ram_alloc(RAMBlock *block,
831 ram_addr_t memory,
832 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300833{
834 char *filename;
835 void *area;
836 int fd;
837#ifdef MAP_POPULATE
838 int flags;
839#endif
840 unsigned long hpagesize;
841
842 hpagesize = gethugepagesize(path);
843 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900844 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300845 }
846
847 if (memory < hpagesize) {
848 return NULL;
849 }
850
851 if (kvm_enabled() && !kvm_has_sync_mmu()) {
852 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
853 return NULL;
854 }
855
856 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900857 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300858 }
859
860 fd = mkstemp(filename);
861 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900862 perror("unable to create backing store for hugepages");
863 free(filename);
864 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300865 }
866 unlink(filename);
867 free(filename);
868
869 memory = (memory+hpagesize-1) & ~(hpagesize-1);
870
871 /*
872 * ftruncate is not supported by hugetlbfs in older
873 * hosts, so don't bother bailing out on errors.
874 * If anything goes wrong with it under other filesystems,
875 * mmap will fail.
876 */
877 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900878 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300879
880#ifdef MAP_POPULATE
881 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
882 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
883 * to sidestep this quirk.
884 */
885 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
886 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
887#else
888 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
889#endif
890 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900891 perror("file_ram_alloc: can't mmap RAM pages");
892 close(fd);
893 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300894 }
Alex Williamson04b16652010-07-02 11:13:17 -0600895 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300896 return area;
897}
898#endif
899
Alex Williamsond17b5282010-06-25 11:08:38 -0600900static ram_addr_t find_ram_offset(ram_addr_t size)
901{
Alex Williamson04b16652010-07-02 11:13:17 -0600902 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600903 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600904
905 if (QLIST_EMPTY(&ram_list.blocks))
906 return 0;
907
908 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000909 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600910
911 end = block->offset + block->length;
912
913 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
914 if (next_block->offset >= end) {
915 next = MIN(next, next_block->offset);
916 }
917 }
918 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600919 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600920 mingap = next - end;
921 }
922 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600923
924 if (offset == RAM_ADDR_MAX) {
925 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
926 (uint64_t)size);
927 abort();
928 }
929
Alex Williamson04b16652010-07-02 11:13:17 -0600930 return offset;
931}
932
Juan Quintela652d7ec2012-07-20 10:37:54 +0200933ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600934{
Alex Williamsond17b5282010-06-25 11:08:38 -0600935 RAMBlock *block;
936 ram_addr_t last = 0;
937
938 QLIST_FOREACH(block, &ram_list.blocks, next)
939 last = MAX(last, block->offset + block->length);
940
941 return last;
942}
943
Jason Baronddb97f12012-08-02 15:44:16 -0400944static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
945{
946 int ret;
947 QemuOpts *machine_opts;
948
949 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
950 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
951 if (machine_opts &&
952 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
953 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
954 if (ret) {
955 perror("qemu_madvise");
956 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
957 "but dump_guest_core=off specified\n");
958 }
959 }
960}
961
Avi Kivityc5705a72011-12-20 15:59:12 +0200962void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -0600963{
964 RAMBlock *new_block, *block;
965
Avi Kivityc5705a72011-12-20 15:59:12 +0200966 new_block = NULL;
967 QLIST_FOREACH(block, &ram_list.blocks, next) {
968 if (block->offset == addr) {
969 new_block = block;
970 break;
971 }
972 }
973 assert(new_block);
974 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600975
Anthony Liguori09e5ab62012-02-03 12:28:43 -0600976 if (dev) {
977 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600978 if (id) {
979 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -0500980 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600981 }
982 }
983 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
984
985 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200986 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -0600987 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
988 new_block->idstr);
989 abort();
990 }
991 }
Avi Kivityc5705a72011-12-20 15:59:12 +0200992}
993
Luiz Capitulino8490fc72012-09-05 16:50:16 -0300994static int memory_try_enable_merging(void *addr, size_t len)
995{
996 QemuOpts *opts;
997
998 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
999 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1000 /* disabled by the user */
1001 return 0;
1002 }
1003
1004 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1005}
1006
Avi Kivityc5705a72011-12-20 15:59:12 +02001007ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1008 MemoryRegion *mr)
1009{
1010 RAMBlock *new_block;
1011
1012 size = TARGET_PAGE_ALIGN(size);
1013 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001014
Avi Kivity7c637362011-12-21 13:09:49 +02001015 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001016 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001017 if (host) {
1018 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001019 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001020 } else {
1021 if (mem_path) {
1022#if defined (__linux__) && !defined(TARGET_S390X)
1023 new_block->host = file_ram_alloc(new_block, size, mem_path);
1024 if (!new_block->host) {
1025 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001026 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001027 }
1028#else
1029 fprintf(stderr, "-mem-path option unsupported\n");
1030 exit(1);
1031#endif
1032 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001033 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001034 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001035 } else if (kvm_enabled()) {
1036 /* some s390/kvm configurations have special constraints */
1037 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001038 } else {
1039 new_block->host = qemu_vmalloc(size);
1040 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001041 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001042 }
1043 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001044 new_block->length = size;
1045
1046 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001047 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001048
Anthony Liguori7267c092011-08-20 22:09:37 -05001049 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001050 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001051 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1052 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001053 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001054
Jason Baronddb97f12012-08-02 15:44:16 -04001055 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001056 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001057
Cam Macdonell84b89d72010-07-26 18:10:57 -06001058 if (kvm_enabled())
1059 kvm_setup_guest_memory(new_block->host, size);
1060
1061 return new_block->offset;
1062}
1063
Avi Kivityc5705a72011-12-20 15:59:12 +02001064ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001065{
Avi Kivityc5705a72011-12-20 15:59:12 +02001066 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001067}
bellarde9a1ab12007-02-08 23:08:38 +00001068
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001069void qemu_ram_free_from_ptr(ram_addr_t addr)
1070{
1071 RAMBlock *block;
1072
1073 QLIST_FOREACH(block, &ram_list.blocks, next) {
1074 if (addr == block->offset) {
1075 QLIST_REMOVE(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001076 ram_list.mru_block = NULL;
Anthony Liguori7267c092011-08-20 22:09:37 -05001077 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001078 return;
1079 }
1080 }
1081}
1082
Anthony Liguoric227f092009-10-01 16:12:16 -05001083void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001084{
Alex Williamson04b16652010-07-02 11:13:17 -06001085 RAMBlock *block;
1086
1087 QLIST_FOREACH(block, &ram_list.blocks, next) {
1088 if (addr == block->offset) {
1089 QLIST_REMOVE(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001090 ram_list.mru_block = NULL;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001091 if (block->flags & RAM_PREALLOC_MASK) {
1092 ;
1093 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001094#if defined (__linux__) && !defined(TARGET_S390X)
1095 if (block->fd) {
1096 munmap(block->host, block->length);
1097 close(block->fd);
1098 } else {
1099 qemu_vfree(block->host);
1100 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001101#else
1102 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001103#endif
1104 } else {
1105#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1106 munmap(block->host, block->length);
1107#else
Jan Kiszka868bb332011-06-21 22:59:09 +02001108 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001109 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001110 } else {
1111 qemu_vfree(block->host);
1112 }
Alex Williamson04b16652010-07-02 11:13:17 -06001113#endif
1114 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001115 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06001116 return;
1117 }
1118 }
1119
bellarde9a1ab12007-02-08 23:08:38 +00001120}
1121
Huang Yingcd19cfa2011-03-02 08:56:19 +01001122#ifndef _WIN32
1123void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1124{
1125 RAMBlock *block;
1126 ram_addr_t offset;
1127 int flags;
1128 void *area, *vaddr;
1129
1130 QLIST_FOREACH(block, &ram_list.blocks, next) {
1131 offset = addr - block->offset;
1132 if (offset < block->length) {
1133 vaddr = block->host + offset;
1134 if (block->flags & RAM_PREALLOC_MASK) {
1135 ;
1136 } else {
1137 flags = MAP_FIXED;
1138 munmap(vaddr, length);
1139 if (mem_path) {
1140#if defined(__linux__) && !defined(TARGET_S390X)
1141 if (block->fd) {
1142#ifdef MAP_POPULATE
1143 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1144 MAP_PRIVATE;
1145#else
1146 flags |= MAP_PRIVATE;
1147#endif
1148 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1149 flags, block->fd, offset);
1150 } else {
1151 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1152 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1153 flags, -1, 0);
1154 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001155#else
1156 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001157#endif
1158 } else {
1159#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1160 flags |= MAP_SHARED | MAP_ANONYMOUS;
1161 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1162 flags, -1, 0);
1163#else
1164 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1165 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1166 flags, -1, 0);
1167#endif
1168 }
1169 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001170 fprintf(stderr, "Could not remap addr: "
1171 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001172 length, addr);
1173 exit(1);
1174 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001175 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001176 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001177 }
1178 return;
1179 }
1180 }
1181}
1182#endif /* !_WIN32 */
1183
pbrookdc828ca2009-04-09 22:21:07 +00001184/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001185 With the exception of the softmmu code in this file, this should
1186 only be used for local memory (e.g. video ram) that the device owns,
1187 and knows it isn't going to access beyond the end of the block.
1188
1189 It should not be used for general purpose DMA.
1190 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1191 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001192void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001193{
pbrook94a6b542009-04-11 17:15:54 +00001194 RAMBlock *block;
1195
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001196 block = ram_list.mru_block;
1197 if (block && addr - block->offset < block->length) {
1198 goto found;
1199 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001200 QLIST_FOREACH(block, &ram_list.blocks, next) {
1201 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001202 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001203 }
pbrook94a6b542009-04-11 17:15:54 +00001204 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001205
1206 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1207 abort();
1208
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001209found:
1210 ram_list.mru_block = block;
1211 if (xen_enabled()) {
1212 /* We need to check if the requested address is in the RAM
1213 * because we don't want to map the entire memory in QEMU.
1214 * In that case just map until the end of the page.
1215 */
1216 if (block->offset == 0) {
1217 return xen_map_cache(addr, 0, 0);
1218 } else if (block->host == NULL) {
1219 block->host =
1220 xen_map_cache(block->offset, block->length, 1);
1221 }
1222 }
1223 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001224}
1225
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001226/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1227 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1228 *
1229 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001230 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001231static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001232{
1233 RAMBlock *block;
1234
1235 QLIST_FOREACH(block, &ram_list.blocks, next) {
1236 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001237 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001238 /* We need to check if the requested address is in the RAM
1239 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001240 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001241 */
1242 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001243 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001244 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001245 block->host =
1246 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001247 }
1248 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001249 return block->host + (addr - block->offset);
1250 }
1251 }
1252
1253 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1254 abort();
1255
1256 return NULL;
1257}
1258
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001259/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1260 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001261static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001262{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001263 if (*size == 0) {
1264 return NULL;
1265 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001266 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001267 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001268 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001269 RAMBlock *block;
1270
1271 QLIST_FOREACH(block, &ram_list.blocks, next) {
1272 if (addr - block->offset < block->length) {
1273 if (addr - block->offset + *size > block->length)
1274 *size = block->length - addr + block->offset;
1275 return block->host + (addr - block->offset);
1276 }
1277 }
1278
1279 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1280 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001281 }
1282}
1283
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001284void qemu_put_ram_ptr(void *addr)
1285{
1286 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001287}
1288
Marcelo Tosattie8902612010-10-11 15:31:19 -03001289int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001290{
pbrook94a6b542009-04-11 17:15:54 +00001291 RAMBlock *block;
1292 uint8_t *host = ptr;
1293
Jan Kiszka868bb332011-06-21 22:59:09 +02001294 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001295 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001296 return 0;
1297 }
1298
Alex Williamsonf471a172010-06-11 11:11:42 -06001299 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001300 /* This case append when the block is not mapped. */
1301 if (block->host == NULL) {
1302 continue;
1303 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001304 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001305 *ram_addr = block->offset + (host - block->host);
1306 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001307 }
pbrook94a6b542009-04-11 17:15:54 +00001308 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001309
Marcelo Tosattie8902612010-10-11 15:31:19 -03001310 return -1;
1311}
Alex Williamsonf471a172010-06-11 11:11:42 -06001312
Marcelo Tosattie8902612010-10-11 15:31:19 -03001313/* Some of the softmmu routines need to translate from a host pointer
1314 (typically a TLB entry) back to a ram offset. */
1315ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1316{
1317 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001318
Marcelo Tosattie8902612010-10-11 15:31:19 -03001319 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1320 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1321 abort();
1322 }
1323 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001324}
1325
Avi Kivitya8170e52012-10-23 12:30:10 +02001326static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001327 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001328{
pbrook67d3b952006-12-18 05:03:52 +00001329#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001330 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001331#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001332#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001333 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001334#endif
1335 return 0;
1336}
1337
Avi Kivitya8170e52012-10-23 12:30:10 +02001338static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001339 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001340{
1341#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001342 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001343#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001344#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001345 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001346#endif
1347}
1348
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001349static const MemoryRegionOps unassigned_mem_ops = {
1350 .read = unassigned_mem_read,
1351 .write = unassigned_mem_write,
1352 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001353};
1354
Avi Kivitya8170e52012-10-23 12:30:10 +02001355static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001356 unsigned size)
1357{
1358 abort();
1359}
1360
Avi Kivitya8170e52012-10-23 12:30:10 +02001361static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001362 uint64_t value, unsigned size)
1363{
1364 abort();
1365}
1366
1367static const MemoryRegionOps error_mem_ops = {
1368 .read = error_mem_read,
1369 .write = error_mem_write,
1370 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001371};
1372
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001373static const MemoryRegionOps rom_mem_ops = {
1374 .read = error_mem_read,
1375 .write = unassigned_mem_write,
1376 .endianness = DEVICE_NATIVE_ENDIAN,
1377};
1378
Avi Kivitya8170e52012-10-23 12:30:10 +02001379static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001380 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001381{
bellard3a7d9292005-08-21 09:26:42 +00001382 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001383 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001384 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1385#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001386 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001387 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001388#endif
1389 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001390 switch (size) {
1391 case 1:
1392 stb_p(qemu_get_ram_ptr(ram_addr), val);
1393 break;
1394 case 2:
1395 stw_p(qemu_get_ram_ptr(ram_addr), val);
1396 break;
1397 case 4:
1398 stl_p(qemu_get_ram_ptr(ram_addr), val);
1399 break;
1400 default:
1401 abort();
1402 }
bellardf23db162005-08-21 19:12:28 +00001403 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001404 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001405 /* we remove the notdirty callback only if the code has been
1406 flushed */
1407 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001408 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001409}
1410
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001411static const MemoryRegionOps notdirty_mem_ops = {
1412 .read = error_mem_read,
1413 .write = notdirty_mem_write,
1414 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001415};
1416
pbrook0f459d12008-06-09 00:20:13 +00001417/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001418static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001419{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001420 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001421 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001422 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001423 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001424 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001425
aliguori06d55cc2008-11-18 20:24:06 +00001426 if (env->watchpoint_hit) {
1427 /* We re-entered the check after replacing the TB. Now raise
1428 * the debug interrupt so that is will trigger after the
1429 * current instruction. */
1430 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1431 return;
1432 }
pbrook2e70f6e2008-06-29 01:03:05 +00001433 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001434 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001435 if ((vaddr == (wp->vaddr & len_mask) ||
1436 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001437 wp->flags |= BP_WATCHPOINT_HIT;
1438 if (!env->watchpoint_hit) {
1439 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001440 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001441 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1442 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001443 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001444 } else {
1445 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1446 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001447 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001448 }
aliguori06d55cc2008-11-18 20:24:06 +00001449 }
aliguori6e140f22008-11-18 20:37:55 +00001450 } else {
1451 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001452 }
1453 }
1454}
1455
pbrook6658ffb2007-03-16 23:58:11 +00001456/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1457 so these check for a hit then pass through to the normal out-of-line
1458 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001459static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001460 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001461{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001462 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1463 switch (size) {
1464 case 1: return ldub_phys(addr);
1465 case 2: return lduw_phys(addr);
1466 case 4: return ldl_phys(addr);
1467 default: abort();
1468 }
pbrook6658ffb2007-03-16 23:58:11 +00001469}
1470
Avi Kivitya8170e52012-10-23 12:30:10 +02001471static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001472 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001473{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001474 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1475 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001476 case 1:
1477 stb_phys(addr, val);
1478 break;
1479 case 2:
1480 stw_phys(addr, val);
1481 break;
1482 case 4:
1483 stl_phys(addr, val);
1484 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001485 default: abort();
1486 }
pbrook6658ffb2007-03-16 23:58:11 +00001487}
1488
Avi Kivity1ec9b902012-01-02 12:47:48 +02001489static const MemoryRegionOps watch_mem_ops = {
1490 .read = watch_mem_read,
1491 .write = watch_mem_write,
1492 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001493};
pbrook6658ffb2007-03-16 23:58:11 +00001494
Avi Kivitya8170e52012-10-23 12:30:10 +02001495static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001496 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001497{
Avi Kivity70c68e42012-01-02 12:32:48 +02001498 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001499 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001500 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001501#if defined(DEBUG_SUBPAGE)
1502 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1503 mmio, len, addr, idx);
1504#endif
blueswir1db7b5422007-05-26 17:36:03 +00001505
Avi Kivity5312bd82012-02-12 18:32:55 +02001506 section = &phys_sections[mmio->sub_section[idx]];
1507 addr += mmio->base;
1508 addr -= section->offset_within_address_space;
1509 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001510 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001511}
1512
Avi Kivitya8170e52012-10-23 12:30:10 +02001513static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001514 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001515{
Avi Kivity70c68e42012-01-02 12:32:48 +02001516 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001517 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001518 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001519#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001520 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1521 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001522 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001523#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001524
Avi Kivity5312bd82012-02-12 18:32:55 +02001525 section = &phys_sections[mmio->sub_section[idx]];
1526 addr += mmio->base;
1527 addr -= section->offset_within_address_space;
1528 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001529 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001530}
1531
Avi Kivity70c68e42012-01-02 12:32:48 +02001532static const MemoryRegionOps subpage_ops = {
1533 .read = subpage_read,
1534 .write = subpage_write,
1535 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001536};
1537
Avi Kivitya8170e52012-10-23 12:30:10 +02001538static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001539 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001540{
1541 ram_addr_t raddr = addr;
1542 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001543 switch (size) {
1544 case 1: return ldub_p(ptr);
1545 case 2: return lduw_p(ptr);
1546 case 4: return ldl_p(ptr);
1547 default: abort();
1548 }
Andreas Färber56384e82011-11-30 16:26:21 +01001549}
1550
Avi Kivitya8170e52012-10-23 12:30:10 +02001551static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001552 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001553{
1554 ram_addr_t raddr = addr;
1555 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001556 switch (size) {
1557 case 1: return stb_p(ptr, value);
1558 case 2: return stw_p(ptr, value);
1559 case 4: return stl_p(ptr, value);
1560 default: abort();
1561 }
Andreas Färber56384e82011-11-30 16:26:21 +01001562}
1563
Avi Kivityde712f92012-01-02 12:41:07 +02001564static const MemoryRegionOps subpage_ram_ops = {
1565 .read = subpage_ram_read,
1566 .write = subpage_ram_write,
1567 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001568};
1569
Anthony Liguoric227f092009-10-01 16:12:16 -05001570static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001571 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001572{
1573 int idx, eidx;
1574
1575 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1576 return -1;
1577 idx = SUBPAGE_IDX(start);
1578 eidx = SUBPAGE_IDX(end);
1579#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001580 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001581 mmio, start, end, idx, eidx, memory);
1582#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001583 if (memory_region_is_ram(phys_sections[section].mr)) {
1584 MemoryRegionSection new_section = phys_sections[section];
1585 new_section.mr = &io_mem_subpage_ram;
1586 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001587 }
blueswir1db7b5422007-05-26 17:36:03 +00001588 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001589 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001590 }
1591
1592 return 0;
1593}
1594
Avi Kivitya8170e52012-10-23 12:30:10 +02001595static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001596{
Anthony Liguoric227f092009-10-01 16:12:16 -05001597 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001598
Anthony Liguori7267c092011-08-20 22:09:37 -05001599 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001600
1601 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001602 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1603 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001604 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001605#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001606 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1607 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001608#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001609 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001610
1611 return mmio;
1612}
1613
Avi Kivity5312bd82012-02-12 18:32:55 +02001614static uint16_t dummy_section(MemoryRegion *mr)
1615{
1616 MemoryRegionSection section = {
1617 .mr = mr,
1618 .offset_within_address_space = 0,
1619 .offset_within_region = 0,
1620 .size = UINT64_MAX,
1621 };
1622
1623 return phys_section_add(&section);
1624}
1625
Avi Kivitya8170e52012-10-23 12:30:10 +02001626MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001627{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001628 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001629}
1630
Avi Kivitye9179ce2009-06-14 11:38:52 +03001631static void io_mem_init(void)
1632{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001633 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001634 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1635 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1636 "unassigned", UINT64_MAX);
1637 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1638 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001639 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1640 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001641 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1642 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001643}
1644
Avi Kivityac1970f2012-10-03 16:22:53 +02001645static void mem_begin(MemoryListener *listener)
1646{
1647 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1648
1649 destroy_all_mappings(d);
1650 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1651}
1652
Avi Kivity50c1e142012-02-08 21:36:02 +02001653static void core_begin(MemoryListener *listener)
1654{
Avi Kivity5312bd82012-02-12 18:32:55 +02001655 phys_sections_clear();
1656 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001657 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1658 phys_section_rom = dummy_section(&io_mem_rom);
1659 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001660}
1661
Avi Kivity1d711482012-10-02 18:54:45 +02001662static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001663{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001664 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001665
1666 /* since each CPU stores ram addresses in its TLB cache, we must
1667 reset the modified entries */
1668 /* XXX: slow ! */
1669 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1670 tlb_flush(env, 1);
1671 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001672}
1673
Avi Kivity93632742012-02-08 16:54:16 +02001674static void core_log_global_start(MemoryListener *listener)
1675{
1676 cpu_physical_memory_set_dirty_tracking(1);
1677}
1678
1679static void core_log_global_stop(MemoryListener *listener)
1680{
1681 cpu_physical_memory_set_dirty_tracking(0);
1682}
1683
Avi Kivity4855d412012-02-08 21:16:05 +02001684static void io_region_add(MemoryListener *listener,
1685 MemoryRegionSection *section)
1686{
Avi Kivitya2d33522012-03-05 17:40:12 +02001687 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1688
1689 mrio->mr = section->mr;
1690 mrio->offset = section->offset_within_region;
1691 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001692 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001693 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001694}
1695
1696static void io_region_del(MemoryListener *listener,
1697 MemoryRegionSection *section)
1698{
1699 isa_unassign_ioport(section->offset_within_address_space, section->size);
1700}
1701
Avi Kivity93632742012-02-08 16:54:16 +02001702static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001703 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001704 .log_global_start = core_log_global_start,
1705 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001706 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001707};
1708
Avi Kivity4855d412012-02-08 21:16:05 +02001709static MemoryListener io_memory_listener = {
1710 .region_add = io_region_add,
1711 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001712 .priority = 0,
1713};
1714
Avi Kivity1d711482012-10-02 18:54:45 +02001715static MemoryListener tcg_memory_listener = {
1716 .commit = tcg_commit,
1717};
1718
Avi Kivityac1970f2012-10-03 16:22:53 +02001719void address_space_init_dispatch(AddressSpace *as)
1720{
1721 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1722
1723 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1724 d->listener = (MemoryListener) {
1725 .begin = mem_begin,
1726 .region_add = mem_add,
1727 .region_nop = mem_add,
1728 .priority = 0,
1729 };
1730 as->dispatch = d;
1731 memory_listener_register(&d->listener, as);
1732}
1733
Avi Kivity83f3c252012-10-07 12:59:55 +02001734void address_space_destroy_dispatch(AddressSpace *as)
1735{
1736 AddressSpaceDispatch *d = as->dispatch;
1737
1738 memory_listener_unregister(&d->listener);
1739 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1740 g_free(d);
1741 as->dispatch = NULL;
1742}
1743
Avi Kivity62152b82011-07-26 14:26:14 +03001744static void memory_map_init(void)
1745{
Anthony Liguori7267c092011-08-20 22:09:37 -05001746 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001747 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001748 address_space_init(&address_space_memory, system_memory);
1749 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001750
Anthony Liguori7267c092011-08-20 22:09:37 -05001751 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001752 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001753 address_space_init(&address_space_io, system_io);
1754 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001755
Avi Kivityf6790af2012-10-02 20:13:51 +02001756 memory_listener_register(&core_memory_listener, &address_space_memory);
1757 memory_listener_register(&io_memory_listener, &address_space_io);
1758 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001759
1760 dma_context_init(&dma_context_memory, &address_space_memory,
1761 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001762}
1763
1764MemoryRegion *get_system_memory(void)
1765{
1766 return system_memory;
1767}
1768
Avi Kivity309cb472011-08-08 16:09:03 +03001769MemoryRegion *get_system_io(void)
1770{
1771 return system_io;
1772}
1773
pbrooke2eef172008-06-08 01:09:01 +00001774#endif /* !defined(CONFIG_USER_ONLY) */
1775
bellard13eb76e2004-01-24 15:23:36 +00001776/* physical memory access (slow version, mainly for debug) */
1777#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001778int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001779 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001780{
1781 int l, flags;
1782 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001783 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001784
1785 while (len > 0) {
1786 page = addr & TARGET_PAGE_MASK;
1787 l = (page + TARGET_PAGE_SIZE) - addr;
1788 if (l > len)
1789 l = len;
1790 flags = page_get_flags(page);
1791 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001792 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001793 if (is_write) {
1794 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001795 return -1;
bellard579a97f2007-11-11 14:26:47 +00001796 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001797 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001798 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001799 memcpy(p, buf, l);
1800 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001801 } else {
1802 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001803 return -1;
bellard579a97f2007-11-11 14:26:47 +00001804 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001805 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001806 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001807 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001808 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001809 }
1810 len -= l;
1811 buf += l;
1812 addr += l;
1813 }
Paul Brooka68fe892010-03-01 00:08:59 +00001814 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001815}
bellard8df1cd02005-01-28 22:37:22 +00001816
bellard13eb76e2004-01-24 15:23:36 +00001817#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001818
Avi Kivitya8170e52012-10-23 12:30:10 +02001819static void invalidate_and_set_dirty(hwaddr addr,
1820 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001821{
1822 if (!cpu_physical_memory_is_dirty(addr)) {
1823 /* invalidate code */
1824 tb_invalidate_phys_page_range(addr, addr + length, 0);
1825 /* set dirty bit */
1826 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1827 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001828 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001829}
1830
Avi Kivitya8170e52012-10-23 12:30:10 +02001831void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001832 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001833{
Avi Kivityac1970f2012-10-03 16:22:53 +02001834 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001835 int l;
bellard13eb76e2004-01-24 15:23:36 +00001836 uint8_t *ptr;
1837 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001838 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001839 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001840
bellard13eb76e2004-01-24 15:23:36 +00001841 while (len > 0) {
1842 page = addr & TARGET_PAGE_MASK;
1843 l = (page + TARGET_PAGE_SIZE) - addr;
1844 if (l > len)
1845 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001846 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001847
bellard13eb76e2004-01-24 15:23:36 +00001848 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001849 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001850 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001851 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001852 /* XXX: could force cpu_single_env to NULL to avoid
1853 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001854 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001855 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001856 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001857 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001858 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001859 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001860 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001861 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001862 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001863 l = 2;
1864 } else {
bellard1c213d12005-09-03 10:49:04 +00001865 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001866 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001867 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001868 l = 1;
1869 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001870 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001871 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001872 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001873 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001874 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001875 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001876 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001877 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001878 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001879 }
1880 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001881 if (!(memory_region_is_ram(section->mr) ||
1882 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001883 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001884 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001885 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001886 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001887 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001888 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001889 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001890 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001891 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001892 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001893 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001894 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001895 l = 2;
1896 } else {
bellard1c213d12005-09-03 10:49:04 +00001897 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001898 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001899 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001900 l = 1;
1901 }
1902 } else {
1903 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001904 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001905 + memory_region_section_addr(section,
1906 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001907 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001908 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001909 }
1910 }
1911 len -= l;
1912 buf += l;
1913 addr += l;
1914 }
1915}
bellard8df1cd02005-01-28 22:37:22 +00001916
Avi Kivitya8170e52012-10-23 12:30:10 +02001917void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001918 const uint8_t *buf, int len)
1919{
1920 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1921}
1922
1923/**
1924 * address_space_read: read from an address space.
1925 *
1926 * @as: #AddressSpace to be accessed
1927 * @addr: address within that address space
1928 * @buf: buffer with the data transferred
1929 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001930void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001931{
1932 address_space_rw(as, addr, buf, len, false);
1933}
1934
1935
Avi Kivitya8170e52012-10-23 12:30:10 +02001936void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001937 int len, int is_write)
1938{
1939 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1940}
1941
bellardd0ecd2a2006-04-23 17:14:48 +00001942/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001943void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001944 const uint8_t *buf, int len)
1945{
Avi Kivityac1970f2012-10-03 16:22:53 +02001946 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00001947 int l;
1948 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02001949 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001950 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001951
bellardd0ecd2a2006-04-23 17:14:48 +00001952 while (len > 0) {
1953 page = addr & TARGET_PAGE_MASK;
1954 l = (page + TARGET_PAGE_SIZE) - addr;
1955 if (l > len)
1956 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001957 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001958
Blue Swirlcc5bea62012-04-14 14:56:48 +00001959 if (!(memory_region_is_ram(section->mr) ||
1960 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00001961 /* do nothing */
1962 } else {
1963 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001964 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001965 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00001966 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001967 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00001968 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001969 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001970 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00001971 }
1972 len -= l;
1973 buf += l;
1974 addr += l;
1975 }
1976}
1977
aliguori6d16c2f2009-01-22 16:59:11 +00001978typedef struct {
1979 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02001980 hwaddr addr;
1981 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00001982} BounceBuffer;
1983
1984static BounceBuffer bounce;
1985
aliguoriba223c22009-01-22 16:59:16 +00001986typedef struct MapClient {
1987 void *opaque;
1988 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00001989 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00001990} MapClient;
1991
Blue Swirl72cf2d42009-09-12 07:36:22 +00001992static QLIST_HEAD(map_client_list, MapClient) map_client_list
1993 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00001994
1995void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
1996{
Anthony Liguori7267c092011-08-20 22:09:37 -05001997 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00001998
1999 client->opaque = opaque;
2000 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002001 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002002 return client;
2003}
2004
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002005static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002006{
2007 MapClient *client = (MapClient *)_client;
2008
Blue Swirl72cf2d42009-09-12 07:36:22 +00002009 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002010 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002011}
2012
2013static void cpu_notify_map_clients(void)
2014{
2015 MapClient *client;
2016
Blue Swirl72cf2d42009-09-12 07:36:22 +00002017 while (!QLIST_EMPTY(&map_client_list)) {
2018 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002019 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002020 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002021 }
2022}
2023
aliguori6d16c2f2009-01-22 16:59:11 +00002024/* Map a physical memory region into a host virtual address.
2025 * May map a subset of the requested range, given by and returned in *plen.
2026 * May return NULL if resources needed to perform the mapping are exhausted.
2027 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002028 * Use cpu_register_map_client() to know when retrying the map operation is
2029 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002030 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002031void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002032 hwaddr addr,
2033 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002034 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002035{
Avi Kivityac1970f2012-10-03 16:22:53 +02002036 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002037 hwaddr len = *plen;
2038 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002039 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002040 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002041 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002042 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002043 ram_addr_t rlen;
2044 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002045
2046 while (len > 0) {
2047 page = addr & TARGET_PAGE_MASK;
2048 l = (page + TARGET_PAGE_SIZE) - addr;
2049 if (l > len)
2050 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002051 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002052
Avi Kivityf3705d52012-03-08 16:16:34 +02002053 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002054 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002055 break;
2056 }
2057 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2058 bounce.addr = addr;
2059 bounce.len = l;
2060 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002061 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002062 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002063
2064 *plen = l;
2065 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002066 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002067 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002068 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002069 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002070 }
aliguori6d16c2f2009-01-22 16:59:11 +00002071
2072 len -= l;
2073 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002074 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002075 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002076 rlen = todo;
2077 ret = qemu_ram_ptr_length(raddr, &rlen);
2078 *plen = rlen;
2079 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002080}
2081
Avi Kivityac1970f2012-10-03 16:22:53 +02002082/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002083 * Will also mark the memory as dirty if is_write == 1. access_len gives
2084 * the amount of memory that was actually read or written by the caller.
2085 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002086void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2087 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002088{
2089 if (buffer != bounce.buffer) {
2090 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002091 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002092 while (access_len) {
2093 unsigned l;
2094 l = TARGET_PAGE_SIZE;
2095 if (l > access_len)
2096 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002097 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002098 addr1 += l;
2099 access_len -= l;
2100 }
2101 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002102 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002103 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002104 }
aliguori6d16c2f2009-01-22 16:59:11 +00002105 return;
2106 }
2107 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002108 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002109 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002110 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002111 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002112 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002113}
bellardd0ecd2a2006-04-23 17:14:48 +00002114
Avi Kivitya8170e52012-10-23 12:30:10 +02002115void *cpu_physical_memory_map(hwaddr addr,
2116 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002117 int is_write)
2118{
2119 return address_space_map(&address_space_memory, addr, plen, is_write);
2120}
2121
Avi Kivitya8170e52012-10-23 12:30:10 +02002122void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2123 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002124{
2125 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2126}
2127
bellard8df1cd02005-01-28 22:37:22 +00002128/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002129static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002130 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002131{
bellard8df1cd02005-01-28 22:37:22 +00002132 uint8_t *ptr;
2133 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002134 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002135
Avi Kivityac1970f2012-10-03 16:22:53 +02002136 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002137
Blue Swirlcc5bea62012-04-14 14:56:48 +00002138 if (!(memory_region_is_ram(section->mr) ||
2139 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002140 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002141 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002142 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002143#if defined(TARGET_WORDS_BIGENDIAN)
2144 if (endian == DEVICE_LITTLE_ENDIAN) {
2145 val = bswap32(val);
2146 }
2147#else
2148 if (endian == DEVICE_BIG_ENDIAN) {
2149 val = bswap32(val);
2150 }
2151#endif
bellard8df1cd02005-01-28 22:37:22 +00002152 } else {
2153 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002154 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002155 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002156 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002157 switch (endian) {
2158 case DEVICE_LITTLE_ENDIAN:
2159 val = ldl_le_p(ptr);
2160 break;
2161 case DEVICE_BIG_ENDIAN:
2162 val = ldl_be_p(ptr);
2163 break;
2164 default:
2165 val = ldl_p(ptr);
2166 break;
2167 }
bellard8df1cd02005-01-28 22:37:22 +00002168 }
2169 return val;
2170}
2171
Avi Kivitya8170e52012-10-23 12:30:10 +02002172uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002173{
2174 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2175}
2176
Avi Kivitya8170e52012-10-23 12:30:10 +02002177uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002178{
2179 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2180}
2181
Avi Kivitya8170e52012-10-23 12:30:10 +02002182uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002183{
2184 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2185}
2186
bellard84b7b8e2005-11-28 21:19:04 +00002187/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002188static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002189 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002190{
bellard84b7b8e2005-11-28 21:19:04 +00002191 uint8_t *ptr;
2192 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002193 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002194
Avi Kivityac1970f2012-10-03 16:22:53 +02002195 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002196
Blue Swirlcc5bea62012-04-14 14:56:48 +00002197 if (!(memory_region_is_ram(section->mr) ||
2198 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002199 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002200 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002201
2202 /* XXX This is broken when device endian != cpu endian.
2203 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002204#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002205 val = io_mem_read(section->mr, addr, 4) << 32;
2206 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002207#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002208 val = io_mem_read(section->mr, addr, 4);
2209 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002210#endif
2211 } else {
2212 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002213 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002214 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002215 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002216 switch (endian) {
2217 case DEVICE_LITTLE_ENDIAN:
2218 val = ldq_le_p(ptr);
2219 break;
2220 case DEVICE_BIG_ENDIAN:
2221 val = ldq_be_p(ptr);
2222 break;
2223 default:
2224 val = ldq_p(ptr);
2225 break;
2226 }
bellard84b7b8e2005-11-28 21:19:04 +00002227 }
2228 return val;
2229}
2230
Avi Kivitya8170e52012-10-23 12:30:10 +02002231uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002232{
2233 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2234}
2235
Avi Kivitya8170e52012-10-23 12:30:10 +02002236uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002237{
2238 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2239}
2240
Avi Kivitya8170e52012-10-23 12:30:10 +02002241uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002242{
2243 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2244}
2245
bellardaab33092005-10-30 20:48:42 +00002246/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002247uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002248{
2249 uint8_t val;
2250 cpu_physical_memory_read(addr, &val, 1);
2251 return val;
2252}
2253
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002254/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002255static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002256 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002257{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002258 uint8_t *ptr;
2259 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002260 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002261
Avi Kivityac1970f2012-10-03 16:22:53 +02002262 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002263
Blue Swirlcc5bea62012-04-14 14:56:48 +00002264 if (!(memory_region_is_ram(section->mr) ||
2265 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002266 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002267 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002268 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002269#if defined(TARGET_WORDS_BIGENDIAN)
2270 if (endian == DEVICE_LITTLE_ENDIAN) {
2271 val = bswap16(val);
2272 }
2273#else
2274 if (endian == DEVICE_BIG_ENDIAN) {
2275 val = bswap16(val);
2276 }
2277#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002278 } else {
2279 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002280 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002281 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002282 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002283 switch (endian) {
2284 case DEVICE_LITTLE_ENDIAN:
2285 val = lduw_le_p(ptr);
2286 break;
2287 case DEVICE_BIG_ENDIAN:
2288 val = lduw_be_p(ptr);
2289 break;
2290 default:
2291 val = lduw_p(ptr);
2292 break;
2293 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002294 }
2295 return val;
bellardaab33092005-10-30 20:48:42 +00002296}
2297
Avi Kivitya8170e52012-10-23 12:30:10 +02002298uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002299{
2300 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2301}
2302
Avi Kivitya8170e52012-10-23 12:30:10 +02002303uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002304{
2305 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2306}
2307
Avi Kivitya8170e52012-10-23 12:30:10 +02002308uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002309{
2310 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2311}
2312
bellard8df1cd02005-01-28 22:37:22 +00002313/* warning: addr must be aligned. The ram page is not masked as dirty
2314 and the code inside is not invalidated. It is useful if the dirty
2315 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002316void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002317{
bellard8df1cd02005-01-28 22:37:22 +00002318 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002319 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002320
Avi Kivityac1970f2012-10-03 16:22:53 +02002321 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002322
Avi Kivityf3705d52012-03-08 16:16:34 +02002323 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002324 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002325 if (memory_region_is_ram(section->mr)) {
2326 section = &phys_sections[phys_section_rom];
2327 }
2328 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002329 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002330 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002331 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002332 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002333 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002334 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002335
2336 if (unlikely(in_migration)) {
2337 if (!cpu_physical_memory_is_dirty(addr1)) {
2338 /* invalidate code */
2339 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2340 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002341 cpu_physical_memory_set_dirty_flags(
2342 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002343 }
2344 }
bellard8df1cd02005-01-28 22:37:22 +00002345 }
2346}
2347
Avi Kivitya8170e52012-10-23 12:30:10 +02002348void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00002349{
j_mayerbc98a7e2007-04-04 07:55:12 +00002350 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002351 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00002352
Avi Kivityac1970f2012-10-03 16:22:53 +02002353 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002354
Avi Kivityf3705d52012-03-08 16:16:34 +02002355 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002356 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002357 if (memory_region_is_ram(section->mr)) {
2358 section = &phys_sections[phys_section_rom];
2359 }
j_mayerbc98a7e2007-04-04 07:55:12 +00002360#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002361 io_mem_write(section->mr, addr, val >> 32, 4);
2362 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002363#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002364 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2365 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002366#endif
2367 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002368 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002369 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002370 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00002371 stq_p(ptr, val);
2372 }
2373}
2374
bellard8df1cd02005-01-28 22:37:22 +00002375/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002376static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002377 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002378{
bellard8df1cd02005-01-28 22:37:22 +00002379 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002380 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002381
Avi Kivityac1970f2012-10-03 16:22:53 +02002382 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002383
Avi Kivityf3705d52012-03-08 16:16:34 +02002384 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002385 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002386 if (memory_region_is_ram(section->mr)) {
2387 section = &phys_sections[phys_section_rom];
2388 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002389#if defined(TARGET_WORDS_BIGENDIAN)
2390 if (endian == DEVICE_LITTLE_ENDIAN) {
2391 val = bswap32(val);
2392 }
2393#else
2394 if (endian == DEVICE_BIG_ENDIAN) {
2395 val = bswap32(val);
2396 }
2397#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002398 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002399 } else {
2400 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002401 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002402 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002403 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002404 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002405 switch (endian) {
2406 case DEVICE_LITTLE_ENDIAN:
2407 stl_le_p(ptr, val);
2408 break;
2409 case DEVICE_BIG_ENDIAN:
2410 stl_be_p(ptr, val);
2411 break;
2412 default:
2413 stl_p(ptr, val);
2414 break;
2415 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002416 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002417 }
2418}
2419
Avi Kivitya8170e52012-10-23 12:30:10 +02002420void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002421{
2422 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2423}
2424
Avi Kivitya8170e52012-10-23 12:30:10 +02002425void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002426{
2427 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2428}
2429
Avi Kivitya8170e52012-10-23 12:30:10 +02002430void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002431{
2432 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2433}
2434
bellardaab33092005-10-30 20:48:42 +00002435/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002436void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002437{
2438 uint8_t v = val;
2439 cpu_physical_memory_write(addr, &v, 1);
2440}
2441
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002442/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002443static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002444 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002445{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002446 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002447 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002448
Avi Kivityac1970f2012-10-03 16:22:53 +02002449 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002450
Avi Kivityf3705d52012-03-08 16:16:34 +02002451 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002452 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002453 if (memory_region_is_ram(section->mr)) {
2454 section = &phys_sections[phys_section_rom];
2455 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002456#if defined(TARGET_WORDS_BIGENDIAN)
2457 if (endian == DEVICE_LITTLE_ENDIAN) {
2458 val = bswap16(val);
2459 }
2460#else
2461 if (endian == DEVICE_BIG_ENDIAN) {
2462 val = bswap16(val);
2463 }
2464#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002465 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002466 } else {
2467 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002468 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002469 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002470 /* RAM case */
2471 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002472 switch (endian) {
2473 case DEVICE_LITTLE_ENDIAN:
2474 stw_le_p(ptr, val);
2475 break;
2476 case DEVICE_BIG_ENDIAN:
2477 stw_be_p(ptr, val);
2478 break;
2479 default:
2480 stw_p(ptr, val);
2481 break;
2482 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002483 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002484 }
bellardaab33092005-10-30 20:48:42 +00002485}
2486
Avi Kivitya8170e52012-10-23 12:30:10 +02002487void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002488{
2489 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2490}
2491
Avi Kivitya8170e52012-10-23 12:30:10 +02002492void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002493{
2494 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2495}
2496
Avi Kivitya8170e52012-10-23 12:30:10 +02002497void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002498{
2499 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2500}
2501
bellardaab33092005-10-30 20:48:42 +00002502/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002503void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002504{
2505 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002506 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002507}
2508
Avi Kivitya8170e52012-10-23 12:30:10 +02002509void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002510{
2511 val = cpu_to_le64(val);
2512 cpu_physical_memory_write(addr, &val, 8);
2513}
2514
Avi Kivitya8170e52012-10-23 12:30:10 +02002515void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002516{
2517 val = cpu_to_be64(val);
2518 cpu_physical_memory_write(addr, &val, 8);
2519}
2520
aliguori5e2972f2009-03-28 17:51:36 +00002521/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002522int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002523 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002524{
2525 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002526 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002527 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002528
2529 while (len > 0) {
2530 page = addr & TARGET_PAGE_MASK;
2531 phys_addr = cpu_get_phys_page_debug(env, page);
2532 /* if no physical page mapped, return an error */
2533 if (phys_addr == -1)
2534 return -1;
2535 l = (page + TARGET_PAGE_SIZE) - addr;
2536 if (l > len)
2537 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002538 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002539 if (is_write)
2540 cpu_physical_memory_write_rom(phys_addr, buf, l);
2541 else
aliguori5e2972f2009-03-28 17:51:36 +00002542 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002543 len -= l;
2544 buf += l;
2545 addr += l;
2546 }
2547 return 0;
2548}
Paul Brooka68fe892010-03-01 00:08:59 +00002549#endif
bellard13eb76e2004-01-24 15:23:36 +00002550
Paul Brookb3755a92010-03-12 16:54:58 +00002551#if !defined(CONFIG_USER_ONLY)
2552
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00002553/*
2554 * A helper function for the _utterly broken_ virtio device model to find out if
2555 * it's running on a big endian machine. Don't do this at home kids!
2556 */
2557bool virtio_is_big_endian(void);
2558bool virtio_is_big_endian(void)
2559{
2560#if defined(TARGET_WORDS_BIGENDIAN)
2561 return true;
2562#else
2563 return false;
2564#endif
2565}
2566
bellard61382a52003-10-27 21:22:23 +00002567#endif
Wen Congyang76f35532012-05-07 12:04:18 +08002568
2569#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002570bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002571{
2572 MemoryRegionSection *section;
2573
Avi Kivityac1970f2012-10-03 16:22:53 +02002574 section = phys_page_find(address_space_memory.dispatch,
2575 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002576
2577 return !(memory_region_is_ram(section->mr) ||
2578 memory_region_is_romd(section->mr));
2579}
2580#endif